Compare commits

...

22 Commits

Author SHA1 Message Date
Roberto Jimenez Sanchez c236119c06 Merge remote-tracking branch 'origin/main' into provisioning/dual-writer-auth-fixes 2025-12-15 11:47:28 +01:00
Jean-Philippe Quéméner a716549f36 fix(dashboards): return right token for version api (#115313) 2025-12-15 11:23:18 +01:00
Tobias Skarhed e5c1de390d Scopes: Update ScopeNavigation type (#115312)
Update scope types
2025-12-15 11:13:35 +01:00
Marc M. 20f17d72c3 DynamicDashboards: Add background (#115273) 2025-12-15 11:05:56 +01:00
Marc M. a3d7bd8dca DynamicDashboards: In view mode, hide config button when panel has not been configured (#115261) 2025-12-15 11:05:36 +01:00
Robert Horvath 074e8ce128 Chore: fix grafana 12 release and support dates (#115235)
fix release and support dates of grafana 12.3.x and 12.4.x
2025-12-15 10:59:24 +01:00
Joe Elliott 4149767391 Tempo: Correctly escape/unescape tag when looking for tag values (#114275)
* Correctly escape/unescape tag

Signed-off-by: Joe Elliott <number101010@gmail.com>

* changelog

Signed-off-by: Joe Elliott <number101010@gmail.com>

* Revert "changelog"

This reverts commit e0cde18994c67fbdd601514d2f930798b0ae76c6.

---------

Signed-off-by: Joe Elliott <number101010@gmail.com>
2025-12-15 10:41:24 +01:00
Gonzalo Trigueros Manzanas 0c49337205 Provisioning: add warning column to JobSummary UI. (#115220) 2025-12-15 08:22:33 +00:00
grafana-pr-automation[bot] c5345498b1 I18n: Download translations from Crowdin (#115291)
New Crowdin translations by GitHub Action

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-12-13 00:42:48 +00:00
Isabel Matwawana 1bcccd5e61 Docs: Update export as JSON task (#115288) 2025-12-12 22:26:28 +00:00
Oscar Kilhed 12b38d1b7a Dashboards: Never allow rows with hidden header to be collapsed (#115284)
Never allow rows with hidden header to be collapsed
2025-12-12 22:14:48 +00:00
Paul Marbach 359d097154 Table: Remove hardcoded assumption of __nestedFrames field name (#115117)
* Table: Remove hardcoded assumption of __nestedFrames field name

* E2E for nested tables

* Apply suggestion from @fastfrwrd
2025-12-12 21:57:47 +00:00
Haris Rozajac cfc5d96c34 Dashboard Schema V2: Fix panel query tab (#115276)
fix panel query tab for v2 schema
2025-12-12 14:39:43 -07:00
Larissa Wandzura 3459c67bfb DOCS: Overhaul Azure Monitor data source docs (#115121)
* continued edits

* authentication updates

* added more info to configure doc

* started work on query editor

* reviewed the configure doc, consolidated sections

* fixed issue with headings

* fixed errors

* updates to the template variables doc

* created initial troubleshooting doc

* removed gerunds and fixed heading issues

* new annotations doc added

* more updates to query editor

* fixed spelling

* fixed some linter issues

* fixed flow for the intro doc

* updates to the intro doc

* fixed transformation links

* added review date to front matter

* ran prettier

* added a new alerting doc

* linter updates

* some final edits

* ran prettier again

* Update docs/sources/datasources/azure-monitor/configure/index.md

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>

* Update docs/sources/datasources/azure-monitor/configure/index.md

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>

* Update docs/sources/datasources/azure-monitor/troubleshooting/index.md

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>

* edits based on feedback

* removed all relative reference links

* ran prettier

---------

Co-authored-by: Andreas Christou <andreas.christou@grafana.com>
2025-12-12 21:10:03 +00:00
Larissa Wandzura 37ccd8bc3d Docs: Added troubleshooting guide for the InfluxDB data source (#115191)
* Docs: Add troubleshooting guide for InfluxDB data source

* linter fixes, updates based on feedback
2025-12-12 20:21:50 +00:00
colin-stuart 5156177079 SCIM: show error if SCIM-provisioned user attempts login with non-SAML auth module (#115271) 2025-12-12 13:51:37 -06:00
Paul Marbach 4817ecf6a3 Sparkline: Guess decimals rather than going with 0 (#115246)
* Sparkline: Guess decimals rather than going with 0

* Update packages/grafana-ui/src/components/Sparkline/utils.test.ts
2025-12-12 13:59:54 -05:00
Renato Costa c73cab8eef chore: add cleanup task for duplicated provisioned dashboards (#115103)
* chore: add cleanup task for duplicated provisioned dashboards
2025-12-12 13:56:47 -05:00
Adela Almasan a37ebf609e VizSuggestions: Fix unique key warning (#115112) 2025-12-12 12:25:03 -06:00
Kristina Demeshchik b29e8ccb45 Dashboards: Generate default tab title when converting rows with empty titles to tabs (#115256)
Generate default title for empty row titles
2025-12-12 13:14:56 -05:00
Matias Chomicki 644f7b7001 Infinite scroll: Fix interaction with client-side filter (#115243)
Infinite scroll: fix interaction with client-side filter
2025-12-12 18:59:49 +01:00
Charandas Batra 7fa972e914 fix: validate authz for delete folder and create folder 2025-11-19 17:52:12 -08:00
68 changed files with 3818 additions and 492 deletions
+2 -1
View File
@@ -520,7 +520,7 @@ i18next.config.ts @grafana/grafana-frontend-platform
/e2e-playwright/various-suite/solo-route.spec.ts @grafana/dashboards-squad
/e2e-playwright/various-suite/trace-view-scrolling.spec.ts @grafana/observability-traces-and-profiling
/e2e-playwright/various-suite/verify-i18n.spec.ts @grafana/grafana-frontend-platform
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dashboards-squad
/e2e-playwright/various-suite/visualization-suggestions.spec.ts @grafana/dataviz-squad
/e2e-playwright/various-suite/perf-test.spec.ts @grafana/grafana-frontend-platform
# Packages
@@ -956,6 +956,7 @@ playwright.storybook.config.ts @grafana/grafana-frontend-platform
/public/app/features/notifications/ @grafana/grafana-search-navigate-organise
/public/app/features/org/ @grafana/grafana-search-navigate-organise
/public/app/features/panel/ @grafana/dashboards-squad
/public/app/features/panel/components/VizTypePicker/VisualizationSuggestions.tsx @grafana/dataviz-squad
/public/app/features/panel/suggestions/ @grafana/dataviz-squad
/public/app/features/playlist/ @grafana/dashboards-squad
/public/app/features/plugins/ @grafana/plugins-platform-frontend
@@ -211,6 +211,12 @@ type ScopeNavigationSpec struct {
Scope string `json:"scope"`
// Used to navigate to a sub-scope of the main scope. URL will not be used if this is set.
SubScope string `json:"subScope,omitempty"`
// Preload the subscope children, as soon as the ScopeNavigation is loaded.
PreLoadSubScopeChildren bool `json:"preLoadSubScopeChildren,omitempty"`
// Expands to display the subscope children when the ScopeNavigation is loaded.
ExpandOnLoad bool `json:"expandOnLoad,omitempty"`
// Makes the subscope not selectable, only serving as a way to build the tree.
DisableSubScopeSelection bool `json:"disableSubScopeSelection,omitempty"`
}
// Type of the item.
@@ -642,6 +642,27 @@ func schema_pkg_apis_scope_v0alpha1_ScopeNavigationSpec(ref common.ReferenceCall
Format: "",
},
},
"preLoadSubScopeChildren": {
SchemaProps: spec.SchemaProps{
Description: "Preload the subscope children, as soon as the ScopeNavigation is loaded.",
Type: []string{"boolean"},
Format: "",
},
},
"expandOnLoad": {
SchemaProps: spec.SchemaProps{
Description: "Expands to display the subscope children when the ScopeNavigation is loaded.",
Type: []string{"boolean"},
Format: "",
},
},
"disableSubScopeSelection": {
SchemaProps: spec.SchemaProps{
Description: "Makes the subscope not selectable, only serving as a way to build the tree.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"url", "scope"},
},
+78 -270
View File
@@ -3,7 +3,6 @@ aliases:
- ../data-sources/azure-monitor/
- ../features/datasources/azuremonitor/
- azuremonitor/
- azuremonitor/deprecated-application-insights/
description: Guide for using Azure Monitor in Grafana
keywords:
- grafana
@@ -23,6 +22,7 @@ labels:
menuTitle: Azure Monitor
title: Azure Monitor data source
weight: 300
last_reviewed: 2025-12-04
refs:
configure-grafana-feature-toggles:
- pattern: /docs/grafana/
@@ -49,6 +49,11 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
transform-data:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/transform-data/
configure-grafana-azure:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
@@ -63,295 +68,98 @@ refs:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-access/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
query-editor-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
template-variables-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
alerting-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
troubleshooting-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
annotations-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
---
# Azure Monitor data source
Grafana ships with built-in support for Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
This topic explains configuring and querying specific to the Azure Monitor data source.
The Azure Monitor data source plugin allows you to query and visualize data from Azure Monitor, the Azure service to maximize the availability and performance of applications and services in the Azure Cloud.
For instructions on how to add a data source to Grafana, refer to the [administration documentation](ref:data-source-management).
Only users with the organization administrator role can add data sources.
## Supported Azure clouds
Once you've added the Azure Monitor data source, you can [configure it](#configure-the-data-source) so that your Grafana instance's users can create queries in its [query editor](query-editor/) when they [build dashboards](ref:build-dashboards) and use [Explore](ref:explore).
The Azure Monitor data source supports the following Azure cloud environments:
The Azure Monitor data source supports visualizing data from four Azure services:
- **Azure** - Azure public cloud (default)
- **Azure US Government** - Azure Government cloud
- **Azure China** - Azure China cloud operated by 21Vianet
- **Azure Monitor Metrics:** Collect numeric data from resources in your Azure account.
- **Azure Monitor Logs:** Collect log and performance data from your Azure account, and query using the Kusto Query Language (KQL).
- **Azure Resource Graph:** Query your Azure resources across subscriptions.
- **Azure Monitor Application Insights:** Collect trace logging data and other application performance metrics.
## Supported Azure services
## Configure the data source
The Azure Monitor data source supports the following Azure services:
**To access the data source configuration page:**
| Service | Description |
| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------- |
| **Azure Monitor Metrics** | Collect numeric data from resources in your Azure account. Supports dimensions, aggregations, and time grain configuration. |
| **Azure Monitor Logs** | Collect log and performance data from your Azure account using the Kusto Query Language (KQL). |
| **Azure Resource Graph** | Query your Azure resources across subscriptions using KQL. Useful for inventory, compliance, and resource management. |
| **Application Insights Traces** | Collect distributed trace data and correlate requests across your application components. |
1. Click **Connections** in the left-side menu.
1. Under Your connections, click **Data sources**.
1. Enter `Azure Monitor` in the search bar.
1. Click **Azure Monitor**.
## Get started
The **Settings** tab of the data source is displayed.
The following documents will help you get started with the Azure Monitor data source:
### Configure Azure Active Directory (AD) authentication
- [Configure the Azure Monitor data source](ref:configure-azure-monitor) - Set up authentication and connect to Azure
- [Azure Monitor query editor](ref:query-editor-azure-monitor) - Create and edit queries for Metrics, Logs, Traces, and Resource Graph
- [Template variables](ref:template-variables-azure-monitor) - Create dynamic dashboards with Azure Monitor variables
- [Alerting](ref:alerting-azure-monitor) - Create alert rules using Azure Monitor data
- [Troubleshooting](ref:troubleshooting-azure-monitor) - Solve common configuration and query errors
You must create an app registration and service principal in Azure AD to authenticate the data source.
For configuration details, refer to the [Azure documentation for service principals](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
## Additional features
The app registration you create must have the `Reader` role assigned on the subscription.
For more information, refer to [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
After you have configured the Azure Monitor data source, you can:
If you host Grafana in Azure, such as in App Service or Azure Virtual Machines, you can configure the Azure Monitor data source to use Managed Identity for secure authentication without entering credentials into Grafana.
For details, refer to [Configuring using Managed Identity](#configuring-using-managed-identity).
- Add [Annotations](ref:annotations-azure-monitor) to overlay Azure log events on your graphs.
- Configure and use [Template variables](ref:template-variables-azure-monitor) for dynamic dashboards.
- Add [Transformations](ref:transform-data) to manipulate query results.
- Set up [Alerting](ref:alerting-azure-monitor) and recording rules using Metrics, Logs, Traces, and Resource Graph queries.
- Use [Explore](ref:explore) to investigate your Azure data without building a dashboard.
You can configure the Azure Monitor data source to use Workload Identity for secure authentication without entering credentials into Grafana if you host Grafana in a Kubernetes environment, such as AKS, and require access to Azure resources.
For details, refer to [Configuring using Workload Identity](#configuring-using-workload-identity).
## Pre-built dashboards
| Name | Description |
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Authentication** | Enables Managed Identity. Selecting Managed Identity hides many of the other fields. For details, see [Configuring using Managed Identity](#configuring-using-managed-identity). |
| **Azure Cloud** | Sets the national cloud for your Azure account. For most users, this is the default "Azure". For details, see the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/develop/authentication-national-cloud). |
| **Directory (tenant) ID** | Sets the directory/tenant ID for the Azure AD app registration to use for authentication. For details, see the [Azure tenant and app ID docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in). |
| **Application (client) ID** | Sets the application/client ID for the Azure AD app registration to use for authentication. |
| **Client secret** | Sets the application client secret for the Azure AD app registration to use for authentication. For details, see the [Azure application secret docs](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret). |
| **Default subscription** | _(Optional)_ Sets a default subscription for template variables to use. |
| **Enable Basic Logs** | Allows this data source to execute queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces. These queries may incur additional costs. |
The Azure Monitor plugin includes the following pre-built dashboards:
### Provision the data source
- **Azure Monitor Overview** - Displays key metrics across your Azure subscriptions and resources.
- **Azure Storage Account** - Shows storage account metrics including availability, latency, and transactions.
You can define and configure the data source in YAML files as part of Grafana's provisioning system.
For more information about provisioning, and for available configuration options, refer to [Provisioning Grafana](ref:provisioning-data-sources).
To import a pre-built dashboard:
#### Provisioning examples
1. Go to **Connections** > **Data sources**.
1. Select your Azure Monitor data source.
1. Click the **Dashboards** tab.
1. Click **Import** next to the dashboard you want to use.
**Azure AD App Registration (client secret):**
## Related resources
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: clientsecret
cloudName: azuremonitor # See table below
tenantId: <tenant-id>
clientId: <client-id>
subscriptionId: <subscription-id> # Optional, default subscription
secureJsonData:
clientSecret: <client-secret>
version: 1
```
**Managed Identity:**
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: msi
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
**Workload Identity:**
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: workloadidentity
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
**Current User:**
{{< admonition type="note" >}}
The `oauthPassThru` property is required for current user authentication to function.
Additionally, `disableGrafanaCache` is necessary to prevent the data source returning cached responses for resources users don't have access to.
{{< /admonition >}}
```yaml
apiVersion: 1 # config file version
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: currentuser
oauthPassThru: true
disableGrafanaCache: true
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
#### Supported cloud names
| Azure Cloud | `cloudName` Value |
| ------------------------------------ | -------------------------- |
| **Microsoft Azure public cloud** | `azuremonitor` (_Default_) |
| **Microsoft Chinese national cloud** | `chinaazuremonitor` |
| **US Government cloud** | `govazuremonitor` |
{{< admonition type="note" >}}
Cloud names for current user authentication differ to the `cloudName` values in the preceding table.
The public cloud name is `AzureCloud`, the Chinese national cloud name is `AzureChinaCloud`, and the US Government cloud name is `AzureUSGovernment`.
{{< /admonition >}}
### Configure Managed Identity
{{< admonition type="note" >}}
Managed Identity is available only in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or Grafana OSS/Enterprise when deployed in Azure. It is not available in Grafana Cloud.
{{< /admonition >}}
You can use managed identity to configure Azure Monitor in Grafana if you host Grafana in Azure (such as an App Service or with Azure Virtual Machines) and have managed identity enabled on your VM.
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
**To enable managed identity for Grafana:**
1. Set the `managed_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
```ini
[azure]
managed_identity_enabled = true
```
2. In the Azure Monitor data source configuration, set **Authentication** to **Managed Identity**.
This hides the directory ID, application ID, and client secret fields, and the data source uses managed identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Managed Identity authentication" >}}
3. You can set the `managed_identity_client_id` field in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure) to allow a user-assigned managed identity to be used instead of the default system-assigned identity.
```ini
[azure]
managed_identity_enabled = true
managed_identity_client_id = USER_ASSIGNED_IDENTITY_CLIENT_ID
```
### Configure Workload Identity
You can use workload identity to configure Azure Monitor in Grafana if you host Grafana in a Kubernetes environment, such as AKS, in conjunction with managed identities.
This lets you securely authenticate data sources without manually configuring credentials via Azure AD App Registrations.
For details on workload identity, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
**To enable workload identity for Grafana:**
1. Set the `workload_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
```ini
[azure]
workload_identity_enabled = true
```
2. In the Azure Monitor data source configuration, set **Authentication** to **Workload Identity**.
This hides the directory ID, application ID, and client secret fields, and the data source uses workload identity to authenticate to Azure Monitor Metrics and Logs, and Azure Resource Graph.
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Workload Identity authentication" >}}
3. There are additional configuration variables that can control the authentication method.`workload_identity_tenant_id` represents the Azure AD tenant that contains the managed identity, `workload_identity_client_id` represents the client ID of the managed identity if it differs from the default client ID, `workload_identity_token_file` represents the path to the token file. Refer to the [documentation](https://azure.github.io/azure-workload-identity/docs/) for more information on what values these variables should use, if any.
```ini
[azure]
workload_identity_enabled = true
workload_identity_tenant_id = IDENTITY_TENANT_ID
workload_identity_client_id = IDENTITY_CLIENT_ID
workload_identity_token_file = TOKEN_FILE_PATH
```
### Configure Current User authentication
{{< admonition type="note" >}}
Current user authentication is an [experimental feature](/docs/release-life-cycle). Engineering and on-call support is not available. Documentation is either limited or not provided outside of code comments. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud. Aspects of Grafana may not work as expected when using this authentication method.
{{< /admonition >}}
If your Grafana instance is configured with Azure Entra (formerly Active Directory) authentication for login, this authentication method can be used to forward the currently logged in user's credentials to the data source. The users credentials will then be used when requesting data from the data source. For details on how to configure your Grafana instance using Azure Entra refer to the [documentation](ref:configure-grafana-azure-auth).
{{< admonition type="note" >}}
Additional configuration is required to ensure that the App Registration used to login a user via Azure provides an access token with the permissions required by the data source.
The App Registration must be configured to issue both **Access Tokens** and **ID Tokens**.
1. In the Azure Portal, open the App Registration that requires configuration.
2. Select **Authentication** in the side menu.
3. Under **Implicit grant and hybrid flows** check both the **Access tokens** and **ID tokens** boxes.
4. Save the changes to ensure the App Registration is updated.
The App Registration must also be configured with additional **API Permissions** to provide authenticated users with access to the APIs utilised by the data source.
1. In the Azure Portal, open the App Registration that requires configuration.
1. Select **API Permissions** in the side menu.
1. Ensure the `openid`, `profile`, `email`, and `offline_access` permissions are present under the **Microsoft Graph** section. If not, they must be added.
1. Select **Add a permission** and choose the following permissions. They must be added individually. Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
- Select **Azure Service Management** > **Delegated permissions** > `user_impersonation` > **Add permissions**
- Select **APIs my organization uses** > Search for **Log Analytics API** and select it > **Delegated permissions** > `Date.Read` > **Add permissions**
Once all permissions have been added, the Azure authentication section in Grafana must be updated. The `scopes` section must be updated to include the `.default` scope to ensure that a token with access to all APIs declared on the App Registration is requested by Grafana. Once updated the scopes value should equal: `.default openid email profile`.
{{< /admonition >}}
This method of authentication doesn't inherently support all backend functionality as a user's credentials won't be in scope.
Affected functionality includes alerting, reporting, and recorded queries.
In order to support backend queries when using a data source configured with current user authentication, you can configure service credentials.
Also, note that query and resource caching is disabled by default for data sources using current user authentication.
{{< admonition type="note" >}}
To configure fallback service credentials the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true` and `user_identity_fallback_credentials_enabled` must be enabled in the [Azure configuration section](ref:configure-grafana-azure) (enabled by default when `user_identity_enabled` is set to `true`).
{{< /admonition >}}
Permissions for fallback credentials may need to be broad to appropriately support backend functionality.
For example, an alerting query created by a user is dependent on their permissions.
If a user tries to create an alert for a resource that the fallback credentials can't access, the alert will fail.
**To enable current user authentication for Grafana:**
1. Set the `user_identity_enabled` flag in the `[azure]` section of the [Grafana server configuration](ref:configure-grafana-azure).
By default this will also enable fallback service credentials.
If you want to disable service credentials at the instance level set `user_identity_fallback_credentials_enabled` to false.
```ini
[azure]
user_identity_enabled = true
```
1. In the Azure Monitor data source configuration, set **Authentication** to **Current User**.
If fallback service credentials are enabled at the instance level, an additional configuration section is visible that you can use to enable or disable using service credentials for this data source.
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor screenshot showing Current User authentication" >}}
1. If you want backend functionality to work with this data source, enable service credentials and configure the data source using the most applicable credentials for your circumstances.
## Query the data source
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
For details, see the [query editor documentation](query-editor/).
## Use template variables
Instead of hard-coding details such as server, application, and sensor names in metric queries, you can use variables.
Grafana lists these variables in dropdown select boxes at the top of the dashboard to help you change the data displayed in your dashboard.
Grafana refers to such variables as template variables.
For details, see the [template variables documentation](template-variables/).
## Application Insights and Insights Analytics (removed)
Until Grafana v8.0, you could query the same Azure Application Insights data using Application Insights and Insights Analytics.
These queries were deprecated in Grafana v7.5. In Grafana v8.0, Application Insights and Insights Analytics were made read-only in favor of querying this data through Metrics and Logs. These query methods were completely removed in Grafana v9.0.
If you're upgrading from a Grafana version prior to v9.0 and relied on Application Insights and Analytics queries, refer to the [Grafana v9.0 documentation](/docs/grafana/v9.0/datasources/azuremonitor/deprecated-application-insights/) for help migrating these queries to Metrics and Logs queries.
- [Azure Monitor documentation](https://docs.microsoft.com/en-us/azure/azure-monitor/)
- [Kusto Query Language (KQL) reference](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/)
- [Grafana community forum](https://community.grafana.com/)
@@ -0,0 +1,262 @@
---
aliases:
- ../../data-sources/azure-monitor/alerting/
description: Set up alerts using Azure Monitor data in Grafana
keywords:
- grafana
- azure
- monitor
- alerting
- alerts
- metrics
- logs
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Alerting
title: Azure Monitor alerting
weight: 500
refs:
alerting:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/
alerting-fundamentals:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/fundamentals/
create-alert-rule:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-grafana-managed-rule/
grafana-managed-recording-rules:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/alerting/alerting-rules/create-recording-rules/create-grafana-managed-recording-rules/
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
troubleshoot:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
---
# Azure Monitor alerting
The Azure Monitor data source supports [Grafana Alerting](ref:alerting) and [Grafana-managed recording rules](ref:grafana-managed-recording-rules), allowing you to create alert rules based on Azure metrics, logs, traces, and resource data. You can monitor your Azure environment and receive notifications when specific conditions are met.
## Before you begin
- Ensure you have the appropriate permissions to create alert rules in Grafana.
- Verify your Azure Monitor data source is configured and working correctly.
- Familiarize yourself with [Grafana Alerting concepts](ref:alerting-fundamentals).
- **Important**: Verify your data source uses a supported authentication method. Refer to [Authentication requirements](#authentication-requirements).
## Supported query types for alerting
All Azure Monitor query types support alerting and recording rules:
| Query type | Use case | Notes |
| -------------------- | -------------------------------------------------- | -------------------------------------------------------- |
| Metrics | Threshold-based alerts on Azure resource metrics | Best suited for alerting; returns time-series data |
| Logs | Alert on log patterns, error counts, or thresholds | Use KQL to aggregate data into numeric values |
| Azure Resource Graph | Alert on resource state or configuration changes | Use count aggregations to return numeric data |
| Traces | Alert on trace data and application performance | Use aggregations to return numeric values for evaluation |
{{< admonition type="note" >}}
Alert queries must return numeric data that Grafana can evaluate against a threshold. Queries that return only text or non-numeric data cannot be used directly for alerting.
{{< /admonition >}}
## Authentication requirements
Alerting and recording rules run as background processes without a user context. This means they require service-level authentication and don't work with all authentication methods.
| Authentication method | Supported |
| -------------------------------- | ------------------------------------- |
| App Registration (client secret) | ✓ |
| Managed Identity | ✓ |
| Workload Identity | ✓ |
| Current User | ✓ (with fallback service credentials) |
{{< admonition type="note" >}}
If you use **Current User** authentication, you must configure **fallback service credentials** for alerting and recording rules to function. User credentials aren't available for background operations, so Grafana uses the fallback credentials instead. Refer to [configure the data source](ref:configure-azure-monitor) for details on setting up fallback credentials.
{{< /admonition >}}
## Create an alert rule
To create an alert rule using Azure Monitor data:
1. Go to **Alerting** > **Alert rules**.
1. Click **New alert rule**.
1. Enter a name for your alert rule.
1. In the **Define query and alert condition** section:
- Select your Azure Monitor data source.
- Configure your query (for example, a Metrics query for CPU usage or a Logs query using KQL).
- Add a **Reduce** expression if your query returns multiple series.
- Add a **Threshold** expression to define the alert condition.
1. Configure the **Set evaluation behavior**:
- Select or create a folder and evaluation group.
- Set the evaluation interval (how often the alert is checked).
- Set the pending period (how long the condition must be true before firing).
1. Add labels and annotations to provide context for notifications.
1. Click **Save rule**.
For detailed instructions, refer to [Create a Grafana-managed alert rule](ref:create-alert-rule).
## Example: VM CPU usage alert
This example creates an alert that fires when virtual machine CPU usage exceeds 80%:
1. Create a new alert rule.
1. Configure the query:
- **Service**: Metrics
- **Resource**: Select your virtual machine
- **Metric namespace**: `Microsoft.Compute/virtualMachines`
- **Metric**: `Percentage CPU`
- **Aggregation**: `Average`
1. Add expressions:
- **Reduce**: Last (to get the most recent data point)
- **Threshold**: Is above 80
1. Set evaluation to run every 1 minute with a 5-minute pending period.
1. Save the rule.
## Example: Error log count alert
This example alerts when error logs exceed a threshold using a KQL query:
1. Create a new alert rule.
1. Configure the query:
- **Service**: Logs
- **Resource**: Select your Log Analytics workspace
- **Query**:
```kusto
AppExceptions
| where TimeGenerated > ago(5m)
| summarize ErrorCount = count() by bin(TimeGenerated, 1m)
```
1. Add expressions:
- **Reduce**: Max (to get the highest count in the period)
- **Threshold**: Is above 10
1. Set evaluation to run every 5 minutes.
1. Save the rule.
## Example: Resource count alert
This example alerts when the number of running virtual machines drops below a threshold using Azure Resource Graph:
1. Create a new alert rule.
1. Configure the query:
- **Service**: Azure Resource Graph
- **Subscriptions**: Select your subscriptions
- **Query**:
```kusto
resources
| where type == "microsoft.compute/virtualmachines"
| where properties.extended.instanceView.powerState.displayStatus == "VM running"
| summarize RunningVMs = count()
```
1. Add expressions:
- **Reduce**: Last
- **Threshold**: Is below 3
1. Set evaluation to run every 5 minutes.
1. Save the rule.
## Best practices
Follow these recommendations to create reliable and efficient alerts with Azure Monitor data.
### Use appropriate query intervals
- Set the alert evaluation interval to be greater than or equal to the minimum data resolution from Azure Monitor.
- Azure Monitor Metrics typically have 1-minute granularity at minimum.
- Avoid very short intervals (less than 1 minute) as they may cause evaluation timeouts or miss data points.
### Reduce multiple series
When your Azure Monitor query returns multiple time series (for example, CPU usage across multiple VMs), use the **Reduce** expression to aggregate them:
- **Last**: Use the most recent value
- **Mean**: Average across all series
- **Max/Min**: Use the highest or lowest value
- **Sum**: Total across all series
### Optimize Log Analytics queries
For Logs queries used in alerting:
- Use `summarize` to aggregate data into numeric values.
- Include appropriate time filters using `ago()` or `TimeGenerated`.
- Avoid returning large result sets; aggregate data in the query.
- Test queries in Explore before using them in alert rules.
### Handle no data conditions
Configure what happens when no data is returned:
1. In the alert rule, find **Configure no data and error handling**.
1. Choose an appropriate action:
- **No Data**: Keep the alert in its current state
- **Alerting**: Treat no data as an alert condition
- **OK**: Treat no data as a healthy state
### Test queries before alerting
Always verify your query returns expected data before creating an alert:
1. Go to **Explore**.
1. Select your Azure Monitor data source.
1. Run the query you plan to use for alerting.
1. Confirm the data format and values are correct.
1. Verify the query returns numeric data suitable for threshold evaluation.
## Troubleshooting
If your Azure Monitor alerts aren't working as expected, use the following sections to diagnose and resolve common issues.
### Alerts not firing
- Verify the data source uses a supported authentication method. If using Current User authentication, ensure fallback service credentials are configured.
- Check that the query returns numeric data in Explore.
- Ensure the evaluation interval allows enough time for data to be available.
- Review the alert rule's health and any error messages in the Alerting UI.
### Authentication errors in alert evaluation
If you see authentication errors when alerts evaluate:
- Confirm the data source is configured with App Registration, Managed Identity, Workload Identity, or Current User with fallback service credentials.
- If using App Registration, verify the client secret hasn't expired.
- If using Current User, verify that fallback service credentials are configured and valid.
- Check that the service principal has appropriate permissions on Azure resources.
### Query timeout errors
- Simplify complex KQL queries.
- Reduce the time range in Log Analytics queries.
- Add more specific filters to narrow result sets.
For additional troubleshooting help, refer to [Troubleshoot Azure Monitor](ref:troubleshoot).
## Additional resources
- [Grafana Alerting documentation](ref:alerting)
- [Create alert rules](ref:create-alert-rule)
- [Azure Monitor query editor](ref:query-editor)
- [Grafana-managed recording rules](ref:grafana-managed-recording-rules)
@@ -0,0 +1,218 @@
---
aliases:
- ../../data-sources/azure-monitor/annotations/
description: Use annotations with the Azure Monitor data source in Grafana
keywords:
- grafana
- azure
- monitor
- annotations
- events
- logs
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Annotations
title: Azure Monitor annotations
weight: 450
refs:
annotate-visualizations:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/annotate-visualizations/
query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
---
# Azure Monitor annotations
[Annotations](ref:annotate-visualizations) overlay rich event information on top of graphs. You can use Azure Monitor Log Analytics queries to create annotations that mark important events, deployments, alerts, or other significant occurrences on your dashboards.
## Before you begin
- Ensure you have configured the Azure Monitor data source.
- You need access to a Log Analytics workspace containing the data you want to use for annotations.
- Annotations use Log Analytics (KQL) queries only. Metrics, Traces, and Azure Resource Graph queries are not supported for annotations.
## Create an annotation query
To add an Azure Monitor annotation to a dashboard:
1. Open the dashboard where you want to add annotations.
1. Click **Dashboard settings** (gear icon) in the top navigation.
1. Select **Annotations** in the left menu.
1. Click **Add annotation query**.
1. Enter a **Name** for the annotation (e.g., "Azure Activity", "Deployments").
1. Select your **Azure Monitor** data source.
1. Choose the **Logs** service.
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
1. Write a KQL query that returns the annotation data.
1. Click **Apply** to save.
## Query requirements
Your KQL query should return columns that Grafana can use to create annotations:
| Column | Required | Description |
| ------------------ | ----------- | ------------------------------------------------------------------------------------------------ |
| `TimeGenerated` | Yes | The timestamp for the annotation. Grafana uses this to position the annotation on the time axis. |
| `Text` | Recommended | The annotation text displayed when you hover over or click the annotation. |
| Additional columns | Optional | Any other columns returned become annotation tags. |
{{< admonition type="note" >}}
Always include a time filter in your query to limit results to the dashboard's time range. Use the `$__timeFilter()` macro.
{{< /admonition >}}
## Annotation query examples
The following examples demonstrate common annotation use cases.
### Azure Activity Log events
Display Azure Activity Log events such as resource modifications, deployments, and administrative actions:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where Level == "Error" or Level == "Warning" or CategoryValue == "Administrative"
| project TimeGenerated, Text=OperationNameValue, Level, ResourceGroup, Caller
| order by TimeGenerated desc
| take 100
```
### Deployment events
Show deployment-related activity:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where OperationNameValue contains "deployments"
| project TimeGenerated, Text=strcat("Deployment: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
| order by TimeGenerated desc
```
### Application Insights exceptions
Mark application exceptions as annotations:
```kusto
AppExceptions
| where $__timeFilter(TimeGenerated)
| project TimeGenerated, Text=strcat(ProblemId, ": ", OuterMessage), SeverityLevel, AppRoleName
| order by TimeGenerated desc
| take 50
```
### Custom events from Application Insights
Display custom events logged by your application:
```kusto
AppEvents
| where $__timeFilter(TimeGenerated)
| where Name == "DeploymentStarted" or Name == "DeploymentCompleted"
| project TimeGenerated, Text=Name, AppRoleName
| order by TimeGenerated desc
```
### Security alerts
Show security-related alerts:
```kusto
SecurityAlert
| where $__timeFilter(TimeGenerated)
| project TimeGenerated, Text=AlertName, Severity=AlertSeverity, Description
| order by TimeGenerated desc
| take 50
```
### Resource health events
Display resource health status changes:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where CategoryValue == "ResourceHealth"
| project TimeGenerated, Text=OperationNameValue, Status=ActivityStatusValue, ResourceId
| order by TimeGenerated desc
```
### VM start and stop events
Mark virtual machine state changes:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where OperationNameValue has_any ("start", "deallocate", "restart")
| where ResourceProviderValue == "MICROSOFT.COMPUTE"
| project TimeGenerated, Text=OperationNameValue, VM=Resource, Status=ActivityStatusValue
| order by TimeGenerated desc
```
### Autoscale events
Show autoscale operations:
```kusto
AzureActivity
| where $__timeFilter(TimeGenerated)
| where OperationNameValue contains "autoscale"
| project TimeGenerated, Text=strcat("Autoscale: ", OperationNameValue), Status=ActivityStatusValue, ResourceGroup
| order by TimeGenerated desc
```
## Customize annotation appearance
After creating an annotation query, you can customize its appearance:
| Setting | Description |
| ------------- | -------------------------------------------------------------------------------------------------------- |
| **Color** | Choose a color for the annotation markers. Use different colors to distinguish between annotation types. |
| **Show in** | Select which panels display the annotations. |
| **Filter by** | Add filters to limit when annotations appear. |
## Best practices
Follow these recommendations when creating annotations:
1. **Limit results**: Always use `take` or `limit` to restrict the number of annotations. Too many annotations can clutter your dashboard and impact performance.
2. **Use time filters**: Include `$__timeFilter()` to ensure queries only return data within the dashboard's time range.
3. **Create meaningful text**: Use `strcat()` or `project` to create descriptive annotation text that provides context at a glance.
4. **Add relevant tags**: Include columns like `ResourceGroup`, `Severity`, or `Status` that become clickable tags for filtering.
5. **Use descriptive names**: Name your annotations clearly (e.g., "Production Deployments", "Critical Alerts") so dashboard users understand what they represent.
## Troubleshoot annotations
If annotations aren't appearing as expected, try the following solutions.
### Annotations don't appear
- Verify the query returns data in the selected time range.
- Check that the query includes a `TimeGenerated` column.
- Test the query in the Azure Portal Log Analytics query editor.
- Ensure the annotation is enabled (toggle is on).
### Too many annotations
- Add more specific filters to your query.
- Use `take` to limit results.
- Narrow the time range.
### Annotations appear at wrong times
- Verify the `TimeGenerated` column contains the correct timestamp.
- Check your dashboard's timezone settings.
@@ -0,0 +1,605 @@
---
aliases:
- ../../data-sources/azure-monitor/configure/
description: Guide for configuring the Azure Monitor data source in Grafana.
keywords:
- grafana
- microsoft
- azure
- monitor
- application
- insights
- log
- analytics
- guide
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Configure
title: Configure the Azure Monitor data source
weight: 200
last_reviewed: 2025-12-04
refs:
configure-grafana-feature-toggles:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#feature_toggles
provisioning-data-sources:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/provisioning/#data-sources
explore:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
configure-grafana-azure-auth:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/
build-dashboards:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/build-dashboards/
configure-grafana-azure:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/#azure
data-source-management:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/administration/data-source-management/
configure-grafana-azure-auth-scopes:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-security/configure-authentication/azuread/#enable-azure-ad-oauth-in-grafana
data-sources:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/
private-data-source-connect:
- pattern: /docs/grafana/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
- pattern: /docs/grafana-cloud/
destination: docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/
configure-pdc:
- pattern: /docs/grafana/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
- pattern: /docs/grafana-cloud/
destination: /docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/configure-pdc/#configure-grafana-private-data-source-connect-pdc
---
# Configure the Azure Monitor data source
This document explains how to configure the Azure Monitor data source and the available configuration options.
For general information about data sources, refer to [Grafana data sources](ref:data-sources) and [Data source management](ref:data-source-management).
## Before you begin
Before configuring the Azure Monitor data source, ensure you have the following:
- **Grafana permissions:** You must have the `Organization administrator` role to configure data sources.
Organization administrators can also [configure the data source via YAML](#provision-the-data-source) with the Grafana provisioning system or [using Terraform](#configure-with-terraform).
- **Azure prerequisites:** Depending on your chosen authentication method, you may need:
- A Microsoft Entra ID (formerly Azure AD) app registration with a service principal (for App Registration authentication)
- A Managed Identity enabled on your Azure VM or App Service (for Managed Identity authentication)
- Workload identity configured in your Kubernetes cluster (for Workload Identity authentication)
- Microsoft Entra ID authentication configured for Grafana login (for Current User authentication)
{{< admonition type="note" >}}
**Grafana Cloud users:** Managed Identity and Workload Identity authentication methods are not available in Grafana Cloud because they require Grafana to run on your Azure infrastructure. Use **App Registration** authentication instead.
{{< /admonition >}}
- **Azure RBAC permissions:** The identity used to authenticate must have the `Reader` role on the Azure subscription containing the resources you want to monitor.
For Log Analytics queries, the identity also needs appropriate permissions on the Log Analytics workspaces to be queried.
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
{{< admonition type="note" >}}
The Azure Monitor data source plugin is built into Grafana. No additional installation is required.
{{< /admonition >}}
## Add the data source
To add the Azure Monitor data source:
1. Click **Connections** in the left-side menu.
1. Click **Add new connection**.
1. Type `Azure Monitor` in the search bar.
1. Select **Azure Monitor**.
1. Click **Add new data source** in the upper right.
You're taken to the **Settings** tab where you can configure the data source.
## Choose an authentication method
The Azure Monitor data source supports four authentication methods. Choose based on where Grafana is hosted and your security requirements:
| Authentication method | Best for | Requirements |
| --------------------- | ------------------------------------------ | -------------------------------------------------------------- |
| **App Registration** | Any Grafana deployment | Microsoft Entra ID app registration with client secret |
| **Managed Identity** | Grafana hosted in Azure (VMs, App Service) | Managed identity enabled on the Azure resource |
| **Workload Identity** | Grafana in Kubernetes (AKS) | Workload identity federation configured |
| **Current User** | User-level access control | Microsoft Entra ID authentication configured for Grafana login |
## Configure authentication
Select one of the following authentication methods and complete the configuration.
### App Registration
Use a Microsoft Entra ID app registration (service principal) to authenticate. This method works with any Grafana deployment.
#### App Registration prerequisites
1. Create an app registration in Microsoft Entra ID.
Refer to the [Azure documentation for creating a service principal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#get-tenant-and-app-id-values-for-signing-in).
1. Create a client secret for the app registration.
Refer to the [Azure documentation for creating a client secret](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).
1. Assign the `Reader` role to the app registration on the subscription or resources you want to monitor.
Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current).
#### App Registration UI configuration
| Setting | Description |
| --------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| **Authentication** | Select **App Registration**. |
| **Azure Cloud** | The Azure environment to connect to. Select **Azure** for the public cloud, or choose Azure Government or Azure China for national clouds. |
| **Directory (tenant) ID** | The GUID that identifies your Microsoft Entra ID tenant. |
| **Application (client) ID** | The GUID for the app registration you created. |
| **Client secret** | The secret key for the app registration. Keep this secure and rotate periodically. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
#### Provision App Registration with YAML
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: clientsecret
cloudName: azuremonitor # See supported cloud names below
tenantId: <tenant-id>
clientId: <client-id>
subscriptionId: <subscription-id> # Optional, default subscription
secureJsonData:
clientSecret: <client-secret>
version: 1
```
### Managed Identity
Use Azure Managed Identity for secure, credential-free authentication when Grafana is hosted in Azure.
{{< admonition type="note" >}}
Managed Identity is available in [Azure Managed Grafana](https://azure.microsoft.com/en-us/products/managed-grafana) or self-hosted Grafana deployed in Azure. It is not available in Grafana Cloud.
{{< /admonition >}}
#### Managed Identity prerequisites
- Grafana must be hosted in Azure (App Service, Azure VMs, or Azure Managed Grafana).
- Managed identity must be enabled on the Azure resource hosting Grafana.
- The managed identity must have the `Reader` role on the subscription or resources you want to monitor.
For details on Azure managed identities, refer to the [Azure documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview).
#### Managed Identity Grafana server configuration
Enable managed identity in the Grafana server configuration:
```ini
[azure]
managed_identity_enabled = true
```
To use a user-assigned managed identity instead of the system-assigned identity, also set:
```ini
[azure]
managed_identity_enabled = true
managed_identity_client_id = <USER_ASSIGNED_IDENTITY_CLIENT_ID>
```
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) for more details.
#### Managed Identity UI configuration
| Setting | Description |
| ------------------------ | --------------------------------------------------------------------------------------------------- |
| **Authentication** | Select **Managed Identity**. The directory ID, application ID, and client secret fields are hidden. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
{{< figure src="/media/docs/grafana/data-sources/screenshot-managed-identity-2.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Managed Identity" >}}
#### Provision Managed Identity with YAML
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: msi
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
### Workload Identity
Use Azure Workload Identity for secure authentication in Kubernetes environments like AKS.
#### Workload Identity prerequisites
- Grafana must be running in a Kubernetes environment with workload identity federation configured.
- The workload identity must have the `Reader` role on the subscription or resources you want to monitor.
For details, refer to the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/).
#### Workload Identity Grafana server configuration
Enable workload identity in the Grafana server configuration:
```ini
[azure]
workload_identity_enabled = true
```
Optional configuration variables:
```ini
[azure]
workload_identity_enabled = true
workload_identity_tenant_id = <IDENTITY_TENANT_ID> # Microsoft Entra ID tenant containing the managed identity
workload_identity_client_id = <IDENTITY_CLIENT_ID> # Client ID if different from default
workload_identity_token_file = <TOKEN_FILE_PATH> # Path to the token file
```
Refer to [Grafana Azure configuration](ref:configure-grafana-azure) and the [Azure workload identity documentation](https://azure.github.io/azure-workload-identity/docs/) for more details.
#### Workload Identity UI configuration
| Setting | Description |
| ------------------------ | ---------------------------------------------------------------------------------------------------- |
| **Authentication** | Select **Workload Identity**. The directory ID, application ID, and client secret fields are hidden. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
{{< figure src="/media/docs/grafana/data-sources/screenshot-workload-identity.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Workload Identity" >}}
#### Provision Workload Identity with YAML
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: workloadidentity
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
### Current User
Forward the logged-in Grafana user's Azure credentials to the data source for user-level access control.
{{< admonition type="warning" >}}
Current User authentication is an [experimental feature](/docs/release-life-cycle/). Engineering and on-call support is not available. Documentation is limited. No SLA is provided. Contact Grafana Support to enable this feature in Grafana Cloud.
{{< /admonition >}}
#### Current User prerequisites
Your Grafana instance must be configured with Microsoft Entra ID authentication. Refer to the [Microsoft Entra ID authentication documentation](ref:configure-grafana-azure-auth).
#### Configure your Azure App Registration
The App Registration used for Grafana login requires additional configuration:
**Enable token issuance:**
1. In the Azure Portal, open your App Registration.
1. Select **Authentication** in the side menu.
1. Under **Implicit grant and hybrid flows**, check both **Access tokens** and **ID tokens**.
1. Save your changes.
**Add API permissions:**
1. In the Azure Portal, open your App Registration.
1. Select **API Permissions** in the side menu.
1. Ensure these permissions are present under **Microsoft Graph**: `openid`, `profile`, `email`, and `offline_access`.
1. Add the following permissions:
- **Azure Service Management** > **Delegated permissions** > `user_impersonation`
- **APIs my organization uses** > Search for **Log Analytics API** > **Delegated permissions** > `Data.Read`
Refer to the [Azure documentation](https://learn.microsoft.com/en-us/entra/identity-platform/quickstart-configure-app-access-web-apis) for more information.
**Update Grafana scopes:**
Update the `scopes` section in your Grafana Azure authentication configuration to include the `.default` scope:
```
.default openid email profile
```
#### Current User Grafana server configuration
Enable current user authentication in the Grafana server configuration:
```ini
[azure]
user_identity_enabled = true
```
By default, this also enables fallback service credentials. To disable fallback credentials at the instance level:
```ini
[azure]
user_identity_enabled = true
user_identity_fallback_credentials_enabled = false
```
{{< admonition type="note" >}}
To use fallback service credentials, the [feature toggle](ref:configure-grafana-feature-toggles) `idForwarding` must be set to `true`.
{{< /admonition >}}
#### Limitations and fallback credentials
Current User authentication doesn't support backend functionality like alerting, reporting, and recorded queries because user credentials aren't available for background operations.
To support these features, configure **fallback service credentials**. When enabled, Grafana uses the fallback credentials for backend operations. Note that operations using fallback credentials are limited to the permissions of those credentials, not the user's permissions.
{{< admonition type="note" >}}
Query and resource caching is disabled by default for data sources using Current User authentication.
{{< /admonition >}}
#### Current User UI configuration
| Setting | Description |
| -------------------------------- | ------------------------------------------------------------------------------------------- |
| **Authentication** | Select **Current User**. |
| **Default Subscription** | Click **Load Subscriptions** to populate available subscriptions, then select your default. |
| **Fallback Service Credentials** | Enable and configure credentials for backend features like alerting. |
{{< figure src="/media/docs/grafana/data-sources/screenshot-current-user.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor data source configured with Current User authentication" >}}
#### Provision Current User with YAML
{{< admonition type="note" >}}
The `oauthPassThru` property is required for Current User authentication. The `disableGrafanaCache` property prevents returning cached responses for resources users don't have access to.
{{< /admonition >}}
```yaml
apiVersion: 1
datasources:
- name: Azure Monitor
type: grafana-azure-monitor-datasource
access: proxy
jsonData:
azureAuthType: currentuser
oauthPassThru: true
disableGrafanaCache: true
subscriptionId: <subscription-id> # Optional, default subscription
version: 1
```
## Additional configuration options
These settings apply to all authentication methods.
### General settings
| Setting | Description |
| ----------- | ------------------------------------------------------------------------------- |
| **Name** | The data source name used in panels and queries. Example: `azure-monitor-prod`. |
| **Default** | Toggle to make this the default data source for new panels. |
### Enable Basic Logs
Toggle **Enable Basic Logs** to allow queries against [Basic Logs tables](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/basic-logs-query?tabs=portal-1) in supported Log Analytics Workspaces.
{{< admonition type="note" >}}
Querying Basic Logs tables incurs additional costs on a per-query basis.
{{< /admonition >}}
### Private data source connect (Grafana Cloud only)
If you're using Grafana Cloud and need to connect to Azure resources in a private network, use Private Data Source Connect (PDC).
1. Click the **Private data source connect** dropdown to select your PDC configuration.
1. Click **Manage private data source connect** to view your PDC connection details.
For more information, refer to [Private data source connect](ref:private-data-source-connect) and [Configure PDC](ref:configure-pdc).
## Supported cloud names
When provisioning the data source, use the following `cloudName` values:
| Azure Cloud | `cloudName` value |
| -------------------------------- | ------------------------ |
| Microsoft Azure public cloud | `azuremonitor` (default) |
| Microsoft Chinese national cloud | `chinaazuremonitor` |
| US Government cloud | `govazuremonitor` |
{{< admonition type="note" >}}
For Current User authentication, the cloud names differ: use `AzureCloud` for public cloud, `AzureChinaCloud` for the Chinese national cloud, and `AzureUSGovernment` for the US Government cloud.
{{< /admonition >}}
## Verify the connection
After configuring the data source, click **Save & test**. A successful connection displays a message confirming that the credentials are valid and have access to the configured default subscription.
If the test fails, verify:
- Your credentials are correct (tenant ID, client ID, client secret)
- The identity has the required Azure RBAC permissions
- For Managed Identity or Workload Identity, that the Grafana server configuration is correct
- Network connectivity to Azure endpoints
## Provision the data source
You can define and configure the Azure Monitor data source in YAML files as part of the Grafana provisioning system.
For more information about provisioning, refer to [Provisioning Grafana](ref:provisioning-data-sources).
### Provision quick reference
| Authentication method | `azureAuthType` value | Required fields |
| --------------------- | --------------------- | -------------------------------------------------- |
| App Registration | `clientsecret` | `tenantId`, `clientId`, `clientSecret` |
| Managed Identity | `msi` | None (uses VM identity) |
| Workload Identity | `workloadidentity` | None (uses pod identity) |
| Current User | `currentuser` | `oauthPassThru: true`, `disableGrafanaCache: true` |
All methods support the optional `subscriptionId` field to set a default subscription.
For complete YAML examples, see the [authentication method sections](#configure-authentication) above.
## Configure with Terraform
You can configure the Azure Monitor data source using the [Grafana Terraform provider](https://registry.terraform.io/providers/grafana/grafana/latest/docs). This approach enables infrastructure-as-code workflows and version control for your Grafana configuration.
### Terraform prerequisites
- [Terraform](https://www.terraform.io/downloads) installed
- Grafana Terraform provider configured with appropriate credentials
- For Grafana Cloud: A [Cloud Access Policy token](https://grafana.com/docs/grafana-cloud/account-management/authentication-and-permissions/access-policies/) with data source permissions
### Provider configuration
Configure the Grafana provider to connect to your Grafana instance:
```hcl
terraform {
required_providers {
grafana = {
source = "grafana/grafana"
version = ">= 2.0.0"
}
}
}
# For Grafana Cloud
provider "grafana" {
url = "<YOUR_GRAFANA_CLOUD_STACK_URL>"
auth = "<YOUR_SERVICE_ACCOUNT_TOKEN>"
}
# For self-hosted Grafana
# provider "grafana" {
# url = "http://localhost:3000"
# auth = "<API_KEY_OR_SERVICE_ACCOUNT_TOKEN>"
# }
```
### Terraform examples
The following examples show how to configure the Azure Monitor data source for each authentication method.
**App Registration (client secret):**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "clientsecret"
cloudName = "azuremonitor"
tenantId = "<TENANT_ID>"
clientId = "<CLIENT_ID>"
subscriptionId = "<SUBSCRIPTION_ID>"
})
secure_json_data_encoded = jsonencode({
clientSecret = "<CLIENT_SECRET>"
})
}
```
**Managed Identity:**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "msi"
subscriptionId = "<SUBSCRIPTION_ID>"
})
}
```
**Workload Identity:**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "workloadidentity"
subscriptionId = "<SUBSCRIPTION_ID>"
})
}
```
**Current User:**
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "currentuser"
oauthPassThru = true
disableGrafanaCache = true
subscriptionId = "<SUBSCRIPTION_ID>"
})
}
```
**With Basic Logs enabled:**
Add `enableBasicLogs = true` to any of the above configurations:
```hcl
resource "grafana_data_source" "azure_monitor" {
type = "grafana-azure-monitor-datasource"
name = "Azure Monitor"
json_data_encoded = jsonencode({
azureAuthType = "clientsecret"
cloudName = "azuremonitor"
tenantId = "<TENANT_ID>"
clientId = "<CLIENT_ID>"
subscriptionId = "<SUBSCRIPTION_ID>"
enableBasicLogs = true
})
secure_json_data_encoded = jsonencode({
clientSecret = "<CLIENT_SECRET>"
})
}
```
For more information about the Grafana Terraform provider, refer to the [provider documentation](https://registry.terraform.io/providers/grafana/grafana/latest/docs) and the [grafana_data_source resource](https://registry.terraform.io/providers/grafana/grafana/latest/docs/resources/data_source).
@@ -21,6 +21,7 @@ labels:
menuTitle: Query editor
title: Azure Monitor query editor
weight: 300
last_reviewed: 2025-12-04
refs:
query-transform-data-query-options:
- pattern: /docs/grafana/
@@ -32,30 +33,85 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/panels-visualizations/query-transform-data/
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
explore:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/explore/
troubleshoot-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/troubleshooting/
configure-grafana-feature-toggles:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/setup-grafana/configure-grafana/feature-toggles/
template-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
alerting-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/alerting/
annotations-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/annotations/
---
# Azure Monitor query editor
This topic explains querying specific to the Azure Monitor data source.
For general documentation on querying data sources in Grafana, see [Query and transform data](ref:query-transform-data).
Grafana provides a query editor for the Azure Monitor data source, which is located on the [Explore page](ref:explore). You can also access the Azure Monitor query editor from a dashboard panel. Click the menu in the upper right of the panel and select **Edit**.
## Choose a query editing mode
This document explains querying specific to the Azure Monitor data source.
For general documentation on querying data sources in Grafana, refer to [Query and transform data](ref:query-transform-data).
The Azure Monitor data source's query editor has three modes depending on which Azure service you want to query:
The Azure Monitor data source can query data from Azure Monitor Metrics and Logs, the Azure Resource Graph, and Application Insights Traces. Each source has its own specialized query editor.
## Before you begin
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
- Verify your credentials have appropriate permissions for the resources you want to query.
## Key concepts
If you're new to Azure Monitor, here are some key terms used throughout this documentation:
| Term | Description |
| ------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **KQL (Kusto Query Language)** | The query language used for Azure Monitor Logs and Azure Resource Graph. KQL uses a pipe-based syntax similar to Unix commands and is optimized for read-only data exploration. If you know SQL, the [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet) can help you get started. |
| **Log Analytics workspace** | An Azure resource that collects and stores log data from your Azure resources, applications, and services. You query this data using KQL. |
| **Application Insights** | Azure's application performance monitoring (APM) service. It collects telemetry data like requests, exceptions, and traces from your applications. |
| **Metrics vs. Logs** | **Metrics** are lightweight numeric values collected at regular intervals (e.g., CPU percentage). **Logs** are detailed records of events with varying schemas (e.g., request logs, error messages). Metrics use a visual query builder; Logs require KQL. |
## Choose a query editor mode
The Azure Monitor data source's query editor has four modes depending on which Azure service you want to query:
- **Metrics** for [Azure Monitor Metrics](#query-azure-monitor-metrics)
- **Logs** for [Azure Monitor Logs](#query-azure-monitor-logs)
- [**Azure Resource Graph**](#query-azure-resource-graph)
- **Traces** for [Application Insights Traces](#query-application-insights-traces)
- **Azure Resource Graph** for [Azure Resource Graph](#query-azure-resource-graph)
## Query Azure Monitor Metrics
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximise availability and performance.
Azure Monitor Metrics collects numeric data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and you can query them to investigate your resources' health and usage and maximize availability and performance.
Monitor Metrics use a lightweight format that stores only numeric data in a specific structure and supports near real-time scenarios, making it useful for fast detection of issues.
In contrast, Azure Monitor Logs can store a variety of data types, each with their own structure.
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Logs Metrics sample query visualizing CPU percentage over time" >}}
{{< figure src="/static/img/docs/azure-monitor/query-editor-metrics.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Metrics sample query visualizing CPU percentage over time" >}}
### Create a Metrics query
@@ -85,7 +141,7 @@ Optionally, you can apply further aggregations or filter by dimensions.
The available options change depending on what is relevant to the selected metric.
You can also augment queries by using [template variables](../template-variables/).
You can also augment queries by using [template variables](ref:template-variables).
### Format legend aliases
@@ -109,7 +165,7 @@ For example:
| `{{ dimensionname }}` | _(Legacy for backward compatibility)_ Replaced with the name of the first dimension. |
| `{{ dimensionvalue }}` | _(Legacy for backward compatibility)_ Replaced with the value of the first dimension. |
### Filter using dimensions
### Filter with dimensions
Some metrics also have dimensions, which associate additional metadata.
Dimensions are represented as key-value pairs assigned to each value of a metric.
@@ -121,7 +177,7 @@ For more information on multi-dimensional metrics, refer to the [Azure Monitor d
## Query Azure Monitor Logs
Azure Monitor Logs collects and organises log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
Azure Monitor Logs collects and organizes log and performance data from [supported resources](https://docs.microsoft.com/en-us/azure/azure-monitor/monitor-reference), and makes many sources of data available to query together with the [Kusto Query Language (KQL)](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/).
While Azure Monitor Metrics stores only simplified numerical data, Logs can store different data types, each with their own structure.
You can also perform complex analysis of Logs data by using KQL.
@@ -130,6 +186,32 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
{{< figure src="/static/img/docs/azure-monitor/query-editor-logs.png" max-width="800px" class="docs-image--no-shadow" caption="Azure Monitor Logs sample query comparing successful requests to failed requests" >}}
### Logs query builder (public preview)
{{< admonition type="note" >}}
The Logs query builder is a [public preview feature](/docs/release-life-cycle/). It may not be enabled in all Grafana environments.
{{< /admonition >}}
The Logs query builder provides a visual interface for building Azure Monitor Logs queries without writing KQL. This is helpful if you're new to KQL or want to quickly build simple queries.
**To enable the Logs query builder:**
1. Enable the `azureMonitorLogsBuilderEditor` [feature toggle](ref:configure-grafana-feature-toggles) in your Grafana configuration.
1. Restart Grafana for the change to take effect.
**To switch between Builder and Code modes:**
When the feature is enabled, a **Builder / Code** toggle appears in the Logs query editor:
- **Builder**: Use the visual interface to select tables, columns, filters, and aggregations. The builder generates the KQL query for you.
- **Code**: Write KQL queries directly. Use this mode for complex queries that require full KQL capabilities.
New queries default to Builder mode. Existing queries that were created with raw KQL remain in Code mode.
{{< admonition type="note" >}}
You can switch from Builder to Code mode at any time to view or edit the generated KQL. However, switching from Code to Builder mode may not preserve complex queries that can't be represented in the builder interface.
{{< /admonition >}}
### Create a Logs query
**To create a Logs query:**
@@ -140,13 +222,13 @@ The Azure Monitor data source also supports querying of [Basic Logs](https://lea
Alternatively, you can dynamically query all resources under a single resource group or subscription.
{{< admonition type="note" >}}
If a timespan is specified in the query, the overlap of the timespan between the query and the dashboard will be used as the query timespan. See the [API documentation for
If a time span is specified in the query, the overlap between the query time span and the dashboard time range will be used. See the [API documentation for
details.](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters)
{{< /admonition >}}
1. Enter your KQL query.
You can also augment queries by using [template variables](../template-variables/).
You can also augment queries by using [template variables](ref:template-variables).
**To create a Basic Logs query:**
@@ -161,7 +243,7 @@ You can also augment queries by using [template variables](../template-variables
{{< /admonition >}}
1. Enter your KQL query.
You can also augment queries by using [template variables](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/).
You can also augment queries by using [template variables](ref:template-variables).
### Logs query examples
@@ -174,24 +256,28 @@ The Azure documentation includes resources to help you learn KQL:
- [Tutorial: Use Kusto queries in Azure Monitor](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/tutorial?pivots=azuremonitor)
- [SQL to Kusto cheat sheet](https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/sqlcheatsheet)
> **Time-range:** The time-range that will be used for the query can be modified via the time-range switch. Selecting `Query` will only make use of time-ranges specified within the query.
> Specifying `Dashboard` will only make use of the Grafana time-range.
> If there are no time-ranges specified within the query, the default Log Analytics time-range will apply.
> For more details on this change, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters).
> If the `Intersection` option was previously chosen it will be migrated by default to `Dashboard`.
{{< admonition type="note" >}}
**Time-range:** The time-range used for the query can be modified via the time-range switch:
This example query returns a virtual machine's CPU performance, averaged over 5ms time grains:
- Selecting **Query** uses only time-ranges specified within the query.
- Selecting **Dashboard** uses only the Grafana dashboard time-range.
- If no time-range is specified in the query, the default Log Analytics time-range applies.
For more details, refer to the [Azure Monitor Logs API documentation](https://learn.microsoft.com/en-us/rest/api/loganalytics/dataaccess/query/get?tabs=HTTP#uri-parameters). If you previously used the `Intersection` option, it has been migrated to `Dashboard`.
{{< /admonition >}}
This example query returns a virtual machine's CPU performance, averaged over 5-minute time grains:
```kusto
Perf
# $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
// $__timeFilter is a special Grafana macro that filters the results to the time span of the dashboard
| where $__timeFilter(TimeGenerated)
| where CounterName == "% Processor Time"
| summarize avg(CounterValue) by bin(TimeGenerated, 5m), Computer
| order by TimeGenerated asc
```
Use time series queries for values that change over time, usually for graph visualisations such as the Time series panel.
Use time series queries for values that change over time, usually for graph visualizations such as the Time series panel.
Each query should return at least a datetime column and numeric value column.
The result must also be sorted in ascending order by the datetime column.
@@ -357,21 +443,33 @@ Application Insights stores trace data in an underlying Log Analytics workspace
This query type only supports Application Insights resources.
{{< /admonition >}}
Running a query of this kind will return all trace data within the timespan specified by the panel/dashboard.
1. (Optional) Specify an **Operation ID** value to filter traces.
1. (Optional) Specify **event types** to filter by.
1. (Optional) Specify **event properties** to filter by.
1. (Optional) Change the **Result format** to switch between tabular format and trace format.
Optionally, you can apply further filtering or select a specific Operation ID to query. The result format can also be switched between a tabular format or the trace format which will return the data in a format that can be used with the Trace visualization.
{{< admonition type="note" >}}
Selecting the trace format filters events to only the `trace` type. Use this format with the Trace visualization.
{{< /admonition >}}
{{< admonition type="note" >}}
Selecting the trace format will filter events with the `trace` type.
{{< /admonition >}}
Running a query returns all trace data within the time span specified by the panel or dashboard time range.
1. Specify an Operation ID value.
1. Specify event types to filter by.
1. Specify event properties to filter by.
You can also augment queries by using [template variables](ref:template-variables).
You can also augment queries by using [template variables](../template-variables/).
## Use queries for alerting and recording rules
## Working with large Azure resource data sets
All Azure Monitor query types (Metrics, Logs, Azure Resource Graph, and Traces) can be used with Grafana Alerting and recording rules.
For detailed information about creating alert rules, supported query types, authentication requirements, and examples, refer to [Azure Monitor alerting](ref:alerting-azure-monitor).
## Work with large Azure resource datasets
If a request exceeds the [maximum allowed value of records](https://docs.microsoft.com/en-us/azure/governance/resource-graph/concepts/work-with-data#paging-results), the result is paginated and only the first page of results are returned.
You can use filters to reduce the amount of records returned under that value.
## Next steps
- [Use template variables](../template-variables/) to create dynamic, reusable dashboards
- [Add annotations](ref:annotations-azure-monitor) to overlay events on your graphs
- [Set up alerting](ref:alerting-azure-monitor) to create alert rules based on Azure Monitor data
- [Troubleshoot](ref:troubleshoot-azure-monitor) common query and configuration issues
@@ -23,6 +23,7 @@ labels:
menuTitle: Template variables
title: Azure Monitor template variables
weight: 400
last_reviewed: 2025-12-04
refs:
variables:
- pattern: /docs/grafana/
@@ -34,6 +35,11 @@ refs:
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/dashboards/variables/add-template-variables/
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
---
# Azure Monitor template variables
@@ -42,58 +48,173 @@ Instead of hard-coding details such as resource group or resource name values in
This helps you create more interactive, dynamic, and reusable dashboards.
Grafana refers to such variables as template variables.
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables) documentation.
For an introduction to templating and template variables, refer to the [Templating](ref:variables) and [Add and manage variables](ref:add-template-variables).
## Use query variables
## Before you begin
You can specify these Azure Monitor data source queries in the Variable edit view's **Query Type** field.
- Ensure you have [configured the Azure Monitor data source](ref:configure-azure-monitor).
- If you want template variables to auto-populate subscriptions, set a **Default Subscription** in the data source configuration.
| Name | Description |
| ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
| **Subscriptions** | Returns subscriptions. |
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value. |
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is provided, only the namespaces within that group are returned. |
| **Regions** | Returns regions for the specified subscription |
| **Resource Names** | Returns a list of resource names for a specified subscription, resource group and namespace. Supports multi-value. |
| **Metric Names** | Returns a list of metric names for a resource. |
| **Workspaces** | Returns a list of workspaces for the specified subscription. |
| **Logs** | Use a KQL query to return values. |
| **Custom Namespaces** | Returns metric namespaces for the specified resource. |
| **Custom Metric Names** | Returns a list of custom metric names for the specified resource. |
## Create a template variable
To create a template variable for Azure Monitor:
1. Open the dashboard where you want to add the variable.
1. Click **Dashboard settings** (gear icon) in the top navigation.
1. Select **Variables** in the left menu.
1. Click **Add variable**.
1. Enter a **Name** for your variable (e.g., `subscription`, `resourceGroup`, `resource`).
1. In the **Type** dropdown, select **Query**.
1. In the **Data source** dropdown, select your Azure Monitor data source.
1. In the **Query Type** dropdown, select the appropriate query type (see [Available query types](#available-query-types)).
1. Configure any additional fields required by the selected query type.
1. Click **Run query** to preview the variable values.
1. Configure display options such as **Multi-value** or **Include All option** as needed.
1. Click **Apply** to save the variable.
## Available query types
The Azure Monitor data source provides the following query types for template variables:
| Query type | Description |
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- |
| **Subscriptions** | Returns a list of Azure subscriptions accessible to the configured credentials. |
| **Resource Groups** | Returns resource groups for a specified subscription. Supports multi-value selection. |
| **Namespaces** | Returns metric namespaces for the specified subscription. If a resource group is specified, returns only namespaces within that group. |
| **Regions** | Returns Azure regions available for the specified subscription. |
| **Resource Names** | Returns resource names for a specified subscription, resource group, and namespace. Supports multi-value selection. |
| **Metric Names** | Returns available metric names for a specified resource. |
| **Workspaces** | Returns Log Analytics workspaces for the specified subscription. |
| **Logs** | Executes a KQL query and returns the results as variable values. See [Create a Logs variable](#create-a-logs-variable). |
| **Custom Namespaces** | Returns custom metric namespaces for a specified resource. |
| **Custom Metric Names** | Returns custom metric names for a specified resource. |
{{< admonition type="note" >}}
Custom metrics cannot be emitted against a subscription or resource group. Select resources only when you need to retrieve custom metric namespaces or custom metric names associated with a specific resource.
Custom metrics cannot be emitted against a subscription or resource group. Select specific resources when retrieving custom metric namespaces or custom metric names.
{{< /admonition >}}
You can use any Log Analytics Kusto Query Language (KQL) query that returns a single list of values in the `Query` field.
For example:
## Create cascading variables
| Query | List of values returned |
| ----------------------------------------------------------------------------------------- | --------------------------------------- |
| `workspace("myWorkspace").Heartbeat \| distinct Computer` | Virtual machines |
| `workspace("$workspace").Heartbeat \| distinct Computer` | Virtual machines with template variable |
| `workspace("$workspace").Perf \| distinct ObjectName` | Objects from the Perf table |
| `workspace("$workspace").Perf \| where ObjectName == "$object"` `\| distinct CounterName` | Metric names from the Perf table |
Cascading variables (also called dependent or chained variables) allow you to create dropdown menus that filter based on previous selections. This is useful for drilling down from subscription to resource group to specific resource.
### Query variable example
### Example: Subscription → Resource Group → Resource Name
This time series query uses query variables:
**Step 1: Create a Subscription variable**
1. Create a variable named `subscription`.
1. Set **Query Type** to **Subscriptions**.
**Step 2: Create a Resource Group variable**
1. Create a variable named `resourceGroup`.
1. Set **Query Type** to **Resource Groups**.
1. In the **Subscription** field, select `$subscription`.
**Step 3: Create a Resource Name variable**
1. Create a variable named `resource`.
1. Set **Query Type** to **Resource Names**.
1. In the **Subscription** field, select `$subscription`.
1. In the **Resource Group** field, select `$resourceGroup`.
1. Select the appropriate **Namespace** for your resources (e.g., `Microsoft.Compute/virtualMachines`).
Now when you change the subscription, the resource group dropdown updates automatically, and when you change the resource group, the resource name dropdown updates.
## Create a Logs variable
The **Logs** query type lets you use a KQL query to populate variable values. The query must return a single column of values.
**To create a Logs variable:**
1. Create a new variable with **Query Type** set to **Logs**.
1. Select a **Resource** (Log Analytics workspace or Application Insights resource).
1. Enter a KQL query that returns a single column.
### Logs variable query examples
| Query | Returns |
| ----------------------------------------- | ------------------------------------- |
| `Heartbeat \| distinct Computer` | List of virtual machine names |
| `Perf \| distinct ObjectName` | List of performance object names |
| `AzureActivity \| distinct ResourceGroup` | List of resource groups with activity |
| `AppRequests \| distinct Name` | List of application request names |
You can reference other variables in your Logs query:
```kusto
workspace("$workspace").Heartbeat | distinct Computer
```
```kusto
workspace("$workspace").Perf
| where ObjectName == "$object"
| distinct CounterName
```
## Variable refresh options
Control when your variables refresh by setting the **Refresh** option:
| Option | Behavior |
| ------------------------ | ----------------------------------------------------------------------------------------- |
| **On dashboard load** | Variables refresh each time the dashboard loads. Best for data that changes infrequently. |
| **On time range change** | Variables refresh when the dashboard time range changes. Use for time-sensitive queries. |
For dashboards with many variables or complex queries, use **On dashboard load** to improve performance.
## Use variables in queries
After you create template variables, you can use them in your Azure Monitor queries by referencing them with the `$` prefix.
### Metrics query example
In a Metrics query, select your variables in the resource picker fields:
- **Subscription**: `$subscription`
- **Resource Group**: `$resourceGroup`
- **Resource Name**: `$resource`
### Logs query example
Reference variables directly in your KQL queries:
```kusto
Perf
| where ObjectName == "$object" and CounterName == "$metric"
| where TimeGenerated >= $__timeFrom() and TimeGenerated <= $__timeTo()
| where $__contains(Computer, $computer)
| where $__contains(Computer, $computer)
| summarize avg(CounterValue) by bin(TimeGenerated, $__interval), Computer
| order by TimeGenerated asc
```
### Multi-value variables
## Multi-value variables
It is possible to select multiple values for **Resource Groups** and **Resource Names** and use a single metrics query pointing to those values as long as they:
You can enable **Multi-value** selection for **Resource Groups** and **Resource Names** variables. When using multi-value variables in a Metrics query, all selected resources must:
- Belong to the same subscription.
- Are in the same region.
- Are of the same type (namespace).
- Belong to the same subscription
- Be in the same Azure region
- Be of the same resource type (namespace)
Also, note that if a template variable pointing to multiple resource groups or names is used in another template variable as a parameter (e.g. to retrieve metric names), only the first value will be used. This means that the combination of the first resource group and name selected should be valid.
{{< admonition type="note" >}}
When a multi-value variable is used as a parameter in another variable query (for example, to retrieve metric names), only the first selected value is used. Ensure the first resource group and resource name combination is valid.
{{< /admonition >}}
## Troubleshoot template variables
If you encounter issues with template variables, try the following solutions.
### Variable returns no values
- Verify the Azure Monitor data source is configured correctly and can connect to Azure.
- Check that the credentials have appropriate permissions to list the requested resources.
- For cascading variables, ensure parent variables have valid selections.
### Variable values are outdated
- Check the **Refresh** setting and adjust if needed.
- Click the refresh icon next to the variable dropdown to manually refresh.
### Multi-value selection not working in queries
- Ensure the resources meet the requirements (same subscription, region, and type).
- For Logs queries, use the `$__contains()` macro to handle multi-value variables properly.
@@ -0,0 +1,320 @@
---
aliases:
- ../../data-sources/azure-monitor/troubleshooting/
description: Troubleshooting guide for the Azure Monitor data source in Grafana
keywords:
- grafana
- azure
- monitor
- troubleshooting
- errors
- authentication
- query
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshoot
title: Troubleshoot Azure Monitor data source issues
weight: 500
last_reviewed: 2025-12-04
refs:
configure-azure-monitor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/configure/
template-variables:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/template-variables/
query-editor:
- pattern: /docs/grafana/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
- pattern: /docs/grafana-cloud/
destination: /docs/grafana/<GRAFANA_VERSION>/datasources/azure-monitor/query-editor/
---
# Troubleshoot Azure Monitor data source issues
This document provides solutions to common issues you may encounter when configuring or using the Azure Monitor data source.
## Configuration and authentication errors
These errors typically occur when setting up the data source or when authentication credentials are invalid.
### "Authorization failed" or "Access denied"
**Symptoms:**
- Save & test fails with "Authorization failed"
- Queries return "Access denied" errors
- Subscriptions don't load when clicking **Load Subscriptions**
**Possible causes and solutions:**
| Cause | Solution |
| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| App registration doesn't have required permissions | Assign the `Reader` role to the app registration on the subscription or resource group you want to monitor. Refer to the [Azure documentation for role assignments](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=current). |
| Incorrect tenant ID, client ID, or client secret | Verify the credentials in the Azure Portal under **App registrations** > your app > **Overview** (for IDs) and **Certificates & secrets** (for secret). |
| Client secret has expired | Create a new client secret in Azure and update the data source configuration. |
| Managed Identity not enabled on the Azure resource | For VMs, enable managed identity in the Azure Portal under **Identity**. For App Service, enable it under **Identity** in the app settings. |
| Managed Identity not assigned the Reader role | Assign the `Reader` role to the managed identity on the target subscription or resources. |
### "Invalid client secret" or "Client secret not found"
**Symptoms:**
- Authentication fails immediately after configuration
- Error message references invalid credentials
**Solutions:**
1. Ensure you copied the client secret **value**, not the secret ID. In Azure Portal under **Certificates & secrets**, the secret value is only shown once when created. The secret ID is a different identifier and won't work for authentication.
2. Verify the client secret was copied correctly (no extra spaces or truncation).
3. Check if the secret has expired in Azure Portal under **App registrations** > your app > **Certificates & secrets**.
4. Create a new secret and update the data source configuration.
### "Tenant not found" or "Invalid tenant ID"
**Symptoms:**
- Data source test fails with tenant-related errors
- Unable to authenticate
**Solutions:**
1. Verify the Directory (tenant) ID in Azure Portal under **Microsoft Entra ID** > **Overview**.
2. Ensure you're using the correct Azure cloud setting (Azure, Azure Government, or Azure China).
3. Check that the tenant ID is a valid GUID format.
### Managed Identity not working
**Symptoms:**
- Managed Identity option is available but authentication fails
- Error: "Managed identity authentication is not available"
**Solutions:**
1. Verify `managed_identity_enabled = true` is set in the Grafana server configuration under `[azure]`.
2. Confirm the Azure resource hosting Grafana has managed identity enabled.
3. For user-assigned managed identity, ensure `managed_identity_client_id` is set correctly.
4. Verify the managed identity has the `Reader` role on the target resources.
5. Restart Grafana after changing server configuration.
### Workload Identity not working
**Symptoms:**
- Workload Identity authentication fails in Kubernetes/AKS environment
- Token file errors
**Solutions:**
1. Verify `workload_identity_enabled = true` is set in the Grafana server configuration.
2. Check that the service account is correctly annotated for workload identity.
3. Verify the federated credential is configured in Azure.
4. Ensure the token path is accessible to the Grafana pod.
5. Check the workload identity webhook is running in the cluster.
## Query errors
These errors occur when executing queries against Azure Monitor services.
### "No data" or empty results
**Symptoms:**
- Query executes without error but returns no data
- Charts show "No data" message
**Possible causes and solutions:**
| Cause | Solution |
| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- |
| Time range doesn't contain data | Expand the dashboard time range or verify data exists in Azure Portal. |
| Wrong resource selected | Verify you've selected the correct subscription, resource group, and resource. |
| Metric not available for resource | Not all metrics are available for all resources. Check available metrics in Azure Portal under the resource's **Metrics** blade. |
| Metric has no values | Some metrics only populate under certain conditions (e.g., error counts when errors occur). |
| Permissions issue | Verify the identity has read access to the specific resource. |
### "Bad request" or "Invalid query"
**Symptoms:**
- Query fails with 400 error
- Error message indicates query syntax issues
**Solutions for Logs queries:**
1. Validate your KQL syntax in the Azure Portal Log Analytics query editor.
2. Check for typos in table names or column names.
3. Ensure referenced tables exist in the selected workspace.
4. Verify the time range is valid (not in the future, not too far in the past for data retention).
**Solutions for Metrics queries:**
1. Verify the metric name is valid for the selected resource type.
2. Check that dimension filters use valid dimension names and values.
3. Ensure the aggregation type is supported for the selected metric.
### "Resource not found"
**Symptoms:**
- Query fails with 404 error
- Resource picker shows resources that can't be queried
**Solutions:**
1. Verify the resource still exists in Azure (it may have been deleted or moved).
2. Check that the subscription is correct.
3. Refresh the resource picker by re-selecting the subscription.
4. Verify the identity has access to the resource's resource group.
### Logs query timeout
**Symptoms:**
- Query runs for a long time then fails
- Error mentions timeout or query limits
**Solutions:**
1. Narrow the time range to reduce data volume.
2. Add filters to reduce the result set.
3. Use `summarize` to aggregate data instead of returning raw rows.
4. Consider using Basic Logs for large datasets (if enabled).
5. Break complex queries into smaller parts.
### "Metrics not available" for a resource
**Symptoms:**
- Resource appears in picker but no metrics are listed
- Metric dropdown is empty
**Solutions:**
1. Verify the resource type supports Azure Monitor metrics.
2. Check if the resource is in a region that supports metrics.
3. Some resources require diagnostic settings to emit metrics—configure these in Azure Portal.
4. Try selecting a different namespace for the resource.
## Azure Resource Graph errors
These errors are specific to Azure Resource Graph (ARG) queries.
### "Query execution failed"
**Symptoms:**
- ARG query fails with execution errors
- Results don't match expected resources
**Solutions:**
1. Validate query syntax in Azure Portal Resource Graph Explorer.
2. Check that you have access to the subscriptions being queried.
3. Verify table names are correct (e.g., `Resources`, `ResourceContainers`).
4. Some ARG features require specific permissions, check [ARG documentation](https://docs.microsoft.com/en-us/azure/governance/resource-graph/).
### Query returns incomplete results
**Symptoms:**
- Not all expected resources appear in results
- Results seem truncated
**Solutions:**
1. ARG queries are paginated. The data source handles pagination automatically, but very large result sets may be limited.
2. Add filters to reduce result set size.
3. Verify you have access to all subscriptions containing the resources.
## Application Insights Traces errors
These errors are specific to the Traces query type.
### "No traces found"
**Symptoms:**
- Trace query returns empty results
- Operation ID search finds nothing
**Solutions:**
1. Verify the Application Insights resource is collecting trace data.
2. Check that the time range includes when the traces were generated.
3. Ensure the Operation ID is correct (copy directly from another trace or log).
4. Verify the identity has access to the Application Insights resource.
## Template variable errors
For detailed troubleshooting of template variables, refer to the [template variables troubleshooting section](ref:template-variables).
### Variables return no values
**Solutions:**
1. Verify the data source connection is working (test it in the data source settings).
2. Check that parent variables (for cascading variables) have valid selections.
3. Verify the identity has permissions to list the requested resources.
4. For Logs variables, ensure the KQL query returns a single column.
### Variables are slow to load
**Solutions:**
1. Set variable refresh to **On dashboard load** instead of **On time range change**.
2. Reduce the scope of variable queries (e.g., filter by resource group instead of entire subscription).
3. For Logs variables, optimize the KQL query to return results faster.
## Connection and network errors
These errors indicate problems with network connectivity between Grafana and Azure services.
### "Connection refused" or timeout errors
**Symptoms:**
- Data source test fails with network errors
- Queries timeout without returning results
**Solutions:**
1. Verify network connectivity from Grafana to Azure endpoints.
2. Check firewall rules allow outbound HTTPS (port 443) to Azure services.
3. For private networks, ensure Private Link or VPN is configured correctly.
4. For Grafana Cloud, configure [Private Data Source Connect](ref:configure-azure-monitor) if accessing private resources.
### SSL/TLS certificate errors
**Symptoms:**
- Certificate validation failures
- SSL handshake errors
**Solutions:**
1. Ensure the system time is correct (certificate validation fails with incorrect time).
2. Verify corporate proxy isn't intercepting HTTPS traffic.
3. Check that required CA certificates are installed on the Grafana server.
## Get additional help
If you've tried the solutions above and still encounter issues:
1. Check the [Grafana community forums](https://community.grafana.com/) for similar issues.
1. Review the [Azure Monitor data source GitHub issues](https://github.com/grafana/grafana/issues) for known bugs.
1. Enable debug logging in Grafana to capture detailed error information.
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
1. When reporting issues, include:
- Grafana version
- Error messages (redact sensitive information)
- Steps to reproduce
- Relevant configuration (redact credentials)
@@ -52,6 +52,7 @@ The following documents will help you get started with the InfluxDB data source
- [Configure the InfluxDB data source](./configure-influxdb-data-source/)
- [InfluxDB query editor](./query-editor/)
- [InfluxDB templates and variables](./template-variables/)
- [Troubleshoot issues with the InfluxDB data source](./troubleshooting/)
Once you have configured the data source you can:
@@ -0,0 +1,291 @@
---
aliases:
- ../../data-sources/influxdb/troubleshooting/
description: Troubleshooting the InfluxDB data source in Grafana
keywords:
- grafana
- influxdb
- troubleshooting
- errors
- flux
- influxql
- sql
labels:
products:
- cloud
- enterprise
- oss
menuTitle: Troubleshooting
title: Troubleshoot issues with the InfluxDB data source
weight: 600
---
# Troubleshoot issues with the InfluxDB data source
This document provides troubleshooting information for common errors you may encounter when using the InfluxDB data source in Grafana.
## Connection errors
The following errors occur when Grafana cannot establish or maintain a connection to InfluxDB.
### Failed to connect to InfluxDB
**Error message:** "error performing influxQL query" or "error performing flux query" or "error performing sql query"
**Cause:** Grafana cannot establish a network connection to the InfluxDB server.
**Solution:**
1. Verify that the InfluxDB URL is correct in the data source configuration.
1. Check that InfluxDB is running and accessible from the Grafana server.
1. Ensure the URL includes the protocol (`http://` or `https://`).
1. Verify the port is correct (the InfluxDB default API port is `8086`).
1. Ensure there are no firewall rules blocking the connection.
1. For Grafana Cloud, ensure you have configured [Private data source connect](https://grafana.com/docs/grafana-cloud/connect-externally-hosted/private-data-source-connect/) if your InfluxDB instance is not publicly accessible.
### Request timed out
**Error message:** "context deadline exceeded" or "request timeout"
**Cause:** The connection to InfluxDB timed out before receiving a response.
**Solution:**
1. Check the network latency between Grafana and InfluxDB.
1. Verify that InfluxDB is not overloaded or experiencing performance issues.
1. Increase the timeout setting in the data source configuration under **Advanced HTTP Settings**.
1. Reduce the time range or complexity of your query.
## Authentication errors
The following errors occur when there are issues with authentication credentials or permissions.
### Unauthorized (401)
**Error message:** "401 Unauthorized" or "authorization failed"
**Cause:** The authentication credentials are invalid or missing.
**Solution:**
1. Verify that the token or password is correct in the data source configuration.
1. For Flux and SQL, ensure the token has not expired.
1. For InfluxQL with InfluxDB 2.x, verify the token is set as an `Authorization` header with the value `Token <your-token>`.
1. For InfluxDB 1.x, verify the username and password are correct.
1. Check that the token has the required permissions to access the specified bucket or database.
### Forbidden (403)
**Error message:** "403 Forbidden" or "access denied"
**Cause:** The authenticated user or token does not have permission to access the requested resource.
**Solution:**
1. Verify the token has read access to the specified bucket or database.
1. Check the token's permissions in the InfluxDB UI under **API Tokens**.
1. Ensure the organization ID is correct for Flux queries.
1. For InfluxQL with InfluxDB 2.x, verify the DBRP mapping is configured correctly.
## Configuration errors
The following errors occur when the data source is not configured correctly.
### Unknown influx version
**Error message:** "unknown influx version"
**Cause:** The query language is not properly configured in the data source settings.
**Solution:**
1. Open the data source configuration in Grafana.
1. Verify that a valid query language is selected: **Flux**, **InfluxQL**, or **SQL**.
1. Ensure the selected query language matches your InfluxDB version:
- Flux: InfluxDB 1.8+ and 2.x
- InfluxQL: InfluxDB 1.x and 2.x (with DBRP mapping)
- SQL: InfluxDB 3.x only
### Invalid data source info received
**Error message:** "invalid data source info received"
**Cause:** The data source configuration is incomplete or corrupted.
**Solution:**
1. Delete and recreate the data source.
1. Ensure all required fields are populated based on your query language:
- **Flux:** URL, Organization, Token, Default Bucket
- **InfluxQL:** URL, Database, User, Password
- **SQL:** URL, Database, Token
### DBRP mapping required
**Error message:** "database not found" or queries return no data with InfluxQL on InfluxDB 2.x
**Cause:** InfluxQL queries on InfluxDB 2.x require a Database and Retention Policy (DBRP) mapping.
**Solution:**
1. Create a DBRP mapping in InfluxDB using the CLI or API.
1. Refer to [Manage DBRP Mappings](https://docs.influxdata.com/influxdb/cloud/query-data/influxql/dbrp/) for guidance.
1. Verify the database name in Grafana matches the DBRP mapping.
## Query errors
The following errors occur when there are issues with query syntax or execution.
### Query syntax error
**Error message:** "error parsing query: found THING" or "failed to parse query: found WERE, expected ; at line 1, char 38"
**Cause:** The query contains invalid syntax.
**Solution:**
1. Check your query syntax for typos or invalid keywords.
1. For InfluxQL, verify the query follows the correct syntax:
```sql
SELECT <field> FROM <measurement> WHERE <condition>
```
1. For Flux, ensure proper pipe-forward syntax and function calls.
1. Use the InfluxDB UI or CLI to test your query directly.
### Query timeout limit exceeded
**Error message:** "query-timeout limit exceeded"
**Cause:** The query took longer than the configured timeout limit in InfluxDB.
**Solution:**
1. Reduce the time range of your query.
1. Add more specific filters to limit the data scanned.
1. Increase the query timeout setting in InfluxDB if you have admin access.
1. Optimize your query to reduce complexity.
### Too many series or data points
**Error message:** "max-series-per-database limit exceeded" or "A query returned too many data points and the results have been truncated"
**Cause:** The query is returning more data than the configured limits allow.
**Solution:**
1. Reduce the time range of your query.
1. Add filters to limit the number of series returned.
1. Increase the **Max series** setting in the data source configuration under **Advanced Database Settings**.
1. Use aggregation functions to reduce the number of data points.
1. For Flux, use `aggregateWindow()` to downsample data.
### No time column found
**Error message:** "no time column found"
**Cause:** The query result does not include a time column, which is required for time series visualization.
**Solution:**
1. Ensure your query includes a time field.
1. For Flux, verify the query includes `_time` in the output.
1. For SQL, ensure the query returns a timestamp column.
1. Check that the time field is not being filtered out or excluded.
## Health check errors
The following errors occur when testing the data source connection.
### Error getting flux query buckets
**Error message:** "error getting flux query buckets"
**Cause:** The health check query `buckets()` failed to return results.
**Solution:**
1. Verify the token has permission to list buckets.
1. Check that the organization ID is correct.
1. Ensure InfluxDB is running and accessible.
### Error connecting InfluxDB influxQL
**Error message:** "error connecting InfluxDB influxQL"
**Cause:** The health check query `SHOW MEASUREMENTS` failed.
**Solution:**
1. Verify the database name is correct.
1. Check that the user has permission to run `SHOW MEASUREMENTS`.
1. Ensure the database exists and contains measurements.
1. For InfluxDB 2.x, verify DBRP mapping is configured.
### 0 measurements found
**Error message:** "data source is working. 0 measurements found"
**Cause:** The connection is successful, but the database contains no measurements.
**Solution:**
1. Verify you are connecting to the correct database.
1. Check that data has been written to the database.
1. If the database is new, add some test data to verify the connection.
## Other common issues
The following issues don't produce specific error messages but are commonly encountered.
### Empty query results
**Cause:** The query returns no data.
**Solution:**
1. Verify the time range includes data in your database.
1. Check that the measurement and field names are correct.
1. Test the query directly in the InfluxDB UI or CLI.
1. Ensure filters are not excluding all data.
1. For InfluxQL, verify the retention policy contains data for the selected time range.
### Slow query performance
**Cause:** Queries take a long time to execute.
**Solution:**
1. Reduce the time range of your query.
1. Add more specific filters to limit the data scanned.
1. Increase the **Min time interval** setting to reduce the number of data points.
1. Check InfluxDB server performance and resource utilization.
1. For Flux, use `aggregateWindow()` to downsample data before visualization.
1. Consider using continuous queries or tasks to pre-aggregate data.
### Data appears delayed or missing recent points
**Cause:** The visualization doesn't show the most recent data.
**Solution:**
1. Check the dashboard time range and refresh settings.
1. Verify the **Min time interval** is not set too high.
1. Ensure InfluxDB has finished writing the data.
1. Check for clock synchronization issues between Grafana and InfluxDB.
## Get additional help
If you continue to experience issues after following this troubleshooting guide:
1. Check the [InfluxDB documentation](https://docs.influxdata.com/) for API-specific guidance.
1. Review the [Grafana community forums](https://community.grafana.com/) for similar issues.
1. Contact Grafana Support if you're an Enterprise, Cloud Pro or Cloud Contracted user.
1. When reporting issues, include:
- Grafana version
- InfluxDB version and product (OSS, Cloud, Enterprise)
- Query language (Flux, InfluxQL, or SQL)
- Error messages (redact sensitive information)
- Steps to reproduce
- Relevant configuration such as data source settings, HTTP method, and TLS settings (redact tokens, passwords, and other credentials)
@@ -107,8 +107,8 @@ Here is an overview of version support through 2026:
| 12.0.x | May 5, 2025 | February 5, 2026 | Patch Support |
| 12.1.x | July 22, 2025 | April 22, 2026 | Patch Support |
| 12.2.x | September 23, 2025 | June 23, 2026 | Patch Support |
| 12.3.x | November 18, 2025 | August 18, 2026 | Yet to be released |
| 12.4.x (Last minor of 12) | February 24, 2026 | November 24, 2026 | Yet to be released |
| 12.3.x | November 19, 2025 | August 19, 2026 | Patch Support |
| 12.4.x (Last minor of 12) | February 24, 2026 | May 24, 2027 | Yet to be released |
| 13.0.0 | TBD | TBD | Yet to be released |
## How are these versions supported?
@@ -223,17 +223,25 @@ To export a dashboard in its current state as a PDF, follow these steps:
1. Click the **X** at the top-right corner to close the share drawer.
### Export a dashboard as JSON
### Export a dashboard as code
Export a Grafana JSON file that contains everything you need, including layout, variables, styles, data sources, queries, and so on, so that you can later import the dashboard. To export a JSON file, follow these steps:
1. Click **Dashboards** in the main menu.
1. Open the dashboard you want to export.
1. Click the **Export** drop-down list in the top-right corner and select **Export as JSON**.
1. Click the **Export** drop-down list in the top-right corner and select **Export as code**.
The **Export dashboard JSON** drawer opens.
The **Export dashboard** drawer opens.
1. Select the dashboard JSON model that you to export:
- **Classic** - Export dashboards created using the [current dashboard schema](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/visualizations/dashboards/build-dashboards/view-dashboard-json-model/).
- **V1 Resource** - Export dashboards created using the [current dashboard schema](https://grafana.com/docs/grafana/<GRAFANA_VERSION>/visualizations/dashboards/build-dashboards/view-dashboard-json-model/) wrapped in the `spec` property of the [V1 Kubernetes-style resource](https://play.grafana.org/swagger?api=dashboard.grafana.app-v2alpha1). Choose between **JSON** and **YAML** format.
- **V2 Resource** - Export dashboards created using the [V2 Resource schema](https://play.grafana.org/swagger?api=dashboard.grafana.app-v2beta1). Choose between **JSON** and **YAML** format.
1. Do one of the following:
- Toggle the **Export for sharing externally** switch to generate the JSON with a different data source UID.
- Toggle the **Remove deployment details** switch to make the dashboard externally shareable.
1. Toggle the **Export the dashboard to use in another instance** switch to generate the JSON with a different data source UID.
1. Click **Download file** or **Copy to clipboard**.
1. Click the **X** at the top-right corner to close the share drawer.
@@ -343,6 +343,33 @@ test.describe('Panels test: Table - Kitchen Sink', { tag: ['@panels', '@table']
// TODO -- saving for another day.
});
test('Tests nested table expansion', async ({ gotoDashboardPage, selectors, page }) => {
const dashboardPage = await gotoDashboardPage({
uid: DASHBOARD_UID,
queryParams: new URLSearchParams({ editPanel: '4' }),
});
await expect(
dashboardPage.getByGrafanaSelector(selectors.components.Panels.Panel.title('Nested tables'))
).toBeVisible();
await waitForTableLoad(page);
await expect(page.locator('[role="row"]')).toHaveCount(3); // header + 2 rows
const firstRowExpander = dashboardPage
.getByGrafanaSelector(selectors.components.Panels.Visualization.TableNG.RowExpander)
.first();
await firstRowExpander.click();
await expect(page.locator('[role="row"]')).not.toHaveCount(3); // more rows are present now, it is dynamic tho.
// TODO: test sorting
await firstRowExpander.click();
await expect(page.locator('[role="row"]')).toHaveCount(3); // back to original state
});
test('Tests tooltip interactions', async ({ gotoDashboardPage, selectors }) => {
const dashboardPage = await gotoDashboardPage({
uid: DASHBOARD_UID,
-5
View File
@@ -804,11 +804,6 @@
"count": 2
}
},
"packages/grafana-ui/src/components/Table/TableNG/utils.ts": {
"@typescript-eslint/consistent-type-assertions": {
"count": 1
}
},
"packages/grafana-ui/src/components/Table/TableRT/Filter.tsx": {
"@typescript-eslint/no-explicit-any": {
"count": 1
@@ -499,6 +499,9 @@ export const versionedComponents = {
},
},
TableNG: {
RowExpander: {
'12.4.0': 'data-testid tableng row expander',
},
Filters: {
HeaderButton: {
'12.1.0': 'data-testid tableng header filter',
@@ -119,7 +119,14 @@ describe('Get y range', () => {
values: [2, 1.999999999999999, 2.000000000000001, 2, 2],
type: FieldType.number,
config: {},
state: { range: { min: 1.999999999999999, max: 2.000000000000001, delta: 0 } },
state: { range: { min: 1.9999999999999999999, max: 2.000000000000000001, delta: 0 } },
};
const decimalsNotCloseYField: Field = {
name: 'y',
values: [2, 0.0094, 0.0053, 0.0078, 0.0061],
type: FieldType.number,
config: {},
state: { range: { min: 0.0053, max: 0.0094, delta: 0.0041 } },
};
const xField: Field = {
name: 'x',
@@ -183,6 +190,11 @@ describe('Get y range', () => {
field: decimalsCloseYField,
expected: [2, 4],
},
{
description: 'decimal values which are not close to equal should not be rounded out',
field: decimalsNotCloseYField,
expected: [0.0053, 0.0094],
},
])(`should return correct range for $description`, ({ field, expected }) => {
const actual = getYRange(getAlignedFrame(field));
expect(actual).toEqual(expected);
@@ -8,6 +8,7 @@ import {
FieldType,
getFieldColorModeForField,
GrafanaTheme2,
guessDecimals,
isLikelyAscendingVector,
nullToValue,
roundDecimals,
@@ -76,8 +77,6 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
min = Math.min(min!, field.config.min ?? Infinity);
max = Math.max(max!, field.config.max ?? -Infinity);
// console.log({ min, max });
// if noValue is set, ensure that it is included in the range as well
const noValue = +field.config?.noValue!;
if (!Number.isNaN(noValue)) {
@@ -85,9 +84,11 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
max = Math.max(max, noValue);
}
const decimals = field.config.decimals ?? Math.max(guessDecimals(min), guessDecimals(max));
// call roundDecimals to mirror what is going to eventually happen in uplot
let roundedMin = roundDecimals(min, field.config.decimals ?? 0);
let roundedMax = roundDecimals(max, field.config.decimals ?? 0);
let roundedMin = roundDecimals(min, decimals);
let roundedMax = roundDecimals(max, decimals);
// if the rounded min and max are different,
// we can return the real min and max.
@@ -102,11 +103,9 @@ export function getYRange(alignedFrame: DataFrame): Range.MinMax {
roundedMax = 1;
} else if (roundedMin < 0) {
// both are negative
// max = 0;
roundedMin *= 2;
} else {
// both are positive
// min = 0;
roundedMax *= 2;
}
@@ -154,8 +154,18 @@ export function TableNG(props: TableNGProps) {
const resizeHandler = useColumnResize(onColumnResize);
const rows = useMemo(() => frameToRecords(data), [data]);
const hasNestedFrames = useMemo(() => getIsNestedTable(data.fields), [data]);
const nestedFramesFieldName = useMemo(() => {
if (!hasNestedFrames) {
return;
}
const firstNestedField = data.fields.find((f) => f.type === FieldType.nestedFrames);
if (!firstNestedField) {
return;
}
return getDisplayName(firstNestedField);
}, [data, hasNestedFrames]);
const rows = useMemo(() => frameToRecords(data, nestedFramesFieldName), [data, nestedFramesFieldName]);
const getTextColorForBackground = useMemo(() => memoize(_getTextColorForBackground, { maxSize: 1000 }), []);
const {
@@ -374,7 +384,11 @@ export function TableNG(props: TableNGProps) {
return null;
}
const expandedRecords = applySort(frameToRecords(nestedData), nestedData.fields, sortColumns);
const expandedRecords = applySort(
frameToRecords(nestedData, nestedFramesFieldName),
nestedData.fields,
sortColumns
);
if (!expandedRecords.length) {
return (
<div className={styles.noDataNested}>
@@ -398,7 +412,7 @@ export function TableNG(props: TableNGProps) {
width: COLUMN.EXPANDER_WIDTH,
minWidth: COLUMN.EXPANDER_WIDTH,
}),
[commonDataGridProps, data.fields.length, expandedRows, sortColumns, styles]
[commonDataGridProps, data.fields.length, expandedRows, sortColumns, styles, nestedFramesFieldName]
);
const fromFields = useCallback(
@@ -1,6 +1,7 @@
import { css } from '@emotion/css';
import { GrafanaTheme2 } from '@grafana/data';
import { selectors } from '@grafana/e2e-selectors';
import { t } from '@grafana/i18n';
import { useStyles2 } from '../../../../themes/ThemeContext';
@@ -16,13 +17,21 @@ export function RowExpander({ onCellExpand, isExpanded }: RowExpanderNGProps) {
}
}
return (
<div role="button" tabIndex={0} className={styles.expanderCell} onClick={onCellExpand} onKeyDown={handleKeyDown}>
<div
role="button"
tabIndex={0}
className={styles.expanderCell}
onClick={onCellExpand}
onKeyDown={handleKeyDown}
data-testid={selectors.components.Panels.Visualization.TableNG.RowExpander}
>
<Icon
aria-label={
isExpanded
? t('grafana-ui.row-expander-ng.aria-label-collapse', 'Collapse row')
: t('grafana-ui.row-expander.aria-label-expand', 'Expand row')
}
aria-expanded={isExpanded}
name={isExpanded ? 'angle-down' : 'angle-right'}
size="lg"
/>
@@ -79,7 +79,6 @@ export interface TableRow {
// Nested table properties
data?: DataFrame;
__nestedFrames?: DataFrame[];
__expanded?: boolean; // For row expansion state
// Generic typing for column values
@@ -262,7 +261,7 @@ export type TableCellStyles = (theme: GrafanaTheme2, options: TableCellStyleOpti
export type Comparator = (a: TableCellValue, b: TableCellValue) => number;
// Type for converting a DataFrame into an array of TableRows
export type FrameToRowsConverter = (frame: DataFrame) => TableRow[];
export type FrameToRowsConverter = (frame: DataFrame, nestedFramesFieldName?: string) => TableRow[];
// Type for mapping column names to their field types
export type ColumnTypes = Record<string, FieldType>;
@@ -675,10 +675,12 @@ export function applySort(
/**
* @internal
*/
export const frameToRecords = (frame: DataFrame): TableRow[] => {
export const frameToRecords = (frame: DataFrame, nestedFramesFieldName?: string): TableRow[] => {
const fnBody = `
const rows = Array(frame.length);
const values = frame.fields.map(f => f.values);
const hasNestedFrames = '${nestedFramesFieldName ?? ''}'.length > 0;
let rowCount = 0;
for (let i = 0; i < frame.length; i++) {
rows[rowCount] = {
@@ -686,11 +688,14 @@ export const frameToRecords = (frame: DataFrame): TableRow[] => {
__index: i,
${frame.fields.map((field, fieldIdx) => `${JSON.stringify(getDisplayName(field))}: values[${fieldIdx}][i]`).join(',')}
};
rowCount += 1;
if (rows[rowCount-1]['__nestedFrames']){
const childFrame = rows[rowCount-1]['__nestedFrames'];
rows[rowCount] = {__depth: 1, __index: i, data: childFrame[0]}
rowCount += 1;
rowCount++;
if (hasNestedFrames) {
const childFrame = rows[rowCount-1][${JSON.stringify(nestedFramesFieldName)}];
if (childFrame){
rows[rowCount] = {__depth: 1, __index: i, data: childFrame[0]}
rowCount++;
}
}
}
return rows;
@@ -698,8 +703,9 @@ export const frameToRecords = (frame: DataFrame): TableRow[] => {
// Creates a function that converts a DataFrame into an array of TableRows
// Uses new Function() for performance as it's faster than creating rows using loops
const convert = new Function('frame', fnBody) as FrameToRowsConverter;
return convert(frame);
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
const convert = new Function('frame', 'nestedFramesFieldName', fnBody) as FrameToRowsConverter;
return convert(frame, nestedFramesFieldName);
};
/* ----------------------------- Data grid comparator ---------------------------- */
@@ -90,6 +90,9 @@ func (r *DualReadWriter) Delete(ctx context.Context, opts DualWriteOptions) (*Pa
}
if safepath.IsDir(opts.Path) {
if err := r.authorizeDeleteFolder(ctx, opts.Path); err != nil {
return nil, err
}
return r.deleteFolder(ctx, opts)
}
@@ -527,26 +530,71 @@ func (r *DualReadWriter) authorize(ctx context.Context, parsed *ParsedResource,
return apierrors.NewForbidden(parsed.GVR.GroupResource(), parsed.Obj.GetName(), fmt.Errorf("could not determine identity type to check access"))
}
// only apply role based access if identity is not of type access policy
if idType == authlib.TypeAccessPolicy || id.GetOrgRole().Includes(identity.RoleEditor) {
if idType != authlib.TypeAnonymous {
return nil
}
return apierrors.NewForbidden(parsed.GVR.GroupResource(), parsed.Obj.GetName(),
fmt.Errorf("must be admin or editor to access files from provisioning"))
fmt.Errorf("must be logged in to access files from provisioning"))
}
func (r *DualReadWriter) authorizeCreateFolder(ctx context.Context, _ string) error {
func (r *DualReadWriter) authorizeCreateFolder(ctx context.Context, path string) error {
id, err := identity.GetRequester(ctx)
if err != nil {
return apierrors.NewUnauthorized(err.Error())
}
// Determine the parent folder where this folder will be created
parentFolderUID := ParentFolder(path, r.repo.Config())
rsp, err := r.access.Check(ctx, id, authlib.CheckRequest{
Group: FolderResource.Group,
Resource: FolderResource.Resource,
Namespace: id.GetNamespace(),
Name: "",
Verb: utils.VerbCreate,
}, parentFolderUID)
if err != nil || !rsp.Allowed {
return apierrors.NewForbidden(FolderResource.GroupResource(), "",
fmt.Errorf("no permission to create folder in parent folder %s", parentFolderUID))
}
return apierrors.NewForbidden(FolderResource.GroupResource(), "",
fmt.Errorf("must have permission to access folders with provisioning"))
}
func (r *DualReadWriter) authorizeDeleteFolder(ctx context.Context, path string) error {
id, err := identity.GetRequester(ctx)
if err != nil {
return apierrors.NewUnauthorized(err.Error())
}
// Parse the folder being deleted to get its UID
folderToDelete := ParseFolder(path, r.repo.Config().GetName())
// Determine the parent folder for hierarchical permission checking
parentFolderUID := ParentFolder(path, r.repo.Config())
rsp, err := r.access.Check(ctx, id, authlib.CheckRequest{
Group: FolderResource.Group,
Resource: FolderResource.Resource,
Namespace: id.GetNamespace(),
Name: folderToDelete.ID,
Verb: utils.VerbDelete,
}, parentFolderUID)
if err != nil || !rsp.Allowed {
return apierrors.NewForbidden(FolderResource.GroupResource(), folderToDelete.ID,
fmt.Errorf("no permission to delete folder %s", folderToDelete.ID))
}
// Simple role based access for now
if id.GetOrgRole().Includes(identity.RoleEditor) {
return nil
}
return apierrors.NewForbidden(FolderResource.GroupResource(), "",
return apierrors.NewForbidden(FolderResource.GroupResource(), folderToDelete.ID,
fmt.Errorf("must be admin or editor to access folders with provisioning"))
}
@@ -77,6 +77,10 @@ var (
"user.sync.user-externalUID-mismatch",
errutil.WithPublicMessage("User externalUID mismatch"),
)
errSCIMAuthModuleMismatch = errutil.Unauthorized(
"user.sync.scim-auth-module-mismatch",
errutil.WithPublicMessage("User was provisioned via SCIM and must login via SAML"),
)
)
var (
@@ -308,6 +312,21 @@ func (s *UserSync) SyncUserHook(ctx context.Context, id *authn.Identity, _ *auth
// just try to fetch the user one more to make the other request work.
if errors.Is(err, user.ErrUserAlreadyExists) {
usr, _, err = s.getUser(ctx, id)
// Check if this is a SCIM-provisioned user trying to login via an auth module that is not SAML or GCOM
if err == nil && usr != nil && usr.IsProvisioned && id.AuthenticatedBy != login.GrafanaComAuthModule {
_, authErr := s.authInfoService.GetAuthInfo(ctx, &login.GetAuthInfoQuery{
UserId: usr.ID,
AuthModule: id.AuthenticatedBy,
})
if errors.Is(authErr, user.ErrUserNotFound) {
s.log.FromContext(ctx).Error("SCIM-provisioned user attempted login via non-SAML auth module",
"user_id", usr.ID,
"attempted_module", id.AuthenticatedBy,
)
return errSCIMAuthModuleMismatch.Errorf("user was provisioned via SCIM but attempted login via %s", id.AuthenticatedBy)
}
}
}
if err != nil {
@@ -1926,3 +1926,100 @@ func TestUserSync_SCIMLoginUsageStatSet(t *testing.T) {
finalCount := finalStats["stats.features.scim.has_successful_login.count"].(int)
require.Equal(t, int(1), finalCount)
}
func TestUserSync_SyncUserHook_SCIMAuthModuleMismatch(t *testing.T) {
userSrv := usertest.NewMockService(t)
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
ID: 1,
Email: "test@test.com",
IsProvisioned: true,
}, nil).Once()
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
return q.AuthModule == "oauth_azuread"
})).Return(nil, user.ErrUserNotFound).Once()
s := ProvideUserSync(
userSrv,
authinfoimpl.ProvideOSSUserProtectionService(),
authInfoSrv,
&quotatest.FakeQuotaService{},
tracing.NewNoopTracerService(),
featuremgmt.WithFeatures(),
setting.NewCfg(),
nil,
)
email := "test@test.com"
err := s.SyncUserHook(context.Background(), &authn.Identity{
AuthenticatedBy: "oauth_azuread",
ClientParams: authn.ClientParams{
SyncUser: true,
AllowSignUp: true,
LookUpParams: login.UserLookupParams{
Email: &email,
},
},
}, nil)
require.Error(t, err)
assert.ErrorIs(t, err, errSCIMAuthModuleMismatch)
assert.Contains(t, err.Error(), "SCIM")
assert.Contains(t, err.Error(), "oauth_azuread")
}
func TestUserSync_SyncUserHook_SCIMUserAllowsGCOMLogin(t *testing.T) {
userSrv := usertest.NewMockService(t)
authInfoSrv := authinfotest.NewMockAuthInfoService(t)
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
})).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("Create", mock.Anything, mock.Anything).Return(nil, user.ErrUserAlreadyExists).Once()
authInfoSrv.On("GetAuthInfo", mock.Anything, mock.MatchedBy(func(q *login.GetAuthInfoQuery) bool {
return q.AuthModule == login.GrafanaComAuthModule && q.AuthId == "gcom-user-123"
})).Return(nil, user.ErrUserNotFound).Once()
userSrv.On("GetByEmail", mock.Anything, mock.Anything).Return(&user.User{
ID: 1,
Email: "test@test.com",
IsProvisioned: true,
}, nil).Once()
s := ProvideUserSync(
userSrv,
authinfoimpl.ProvideOSSUserProtectionService(),
authInfoSrv,
&quotatest.FakeQuotaService{},
tracing.NewNoopTracerService(),
featuremgmt.WithFeatures(),
setting.NewCfg(),
nil,
)
email := "test@test.com"
err := s.SyncUserHook(context.Background(), &authn.Identity{
AuthenticatedBy: login.GrafanaComAuthModule,
AuthID: "gcom-user-123",
ClientParams: authn.ClientParams{
SyncUser: true,
AllowSignUp: true,
LookUpParams: login.UserLookupParams{
Email: &email,
},
},
}, nil)
require.NoError(t, err)
}
+10 -1
View File
@@ -304,8 +304,15 @@ type DeleteDashboardCommand struct {
RemovePermissions bool
}
type ProvisioningConfig struct {
Name string
OrgID int64
Folder string
AllowUIUpdates bool
}
type DeleteOrphanedProvisionedDashboardsCommand struct {
ReaderNames []string
Config []ProvisioningConfig
}
type DashboardProvisioningSearchResults struct {
@@ -405,6 +412,8 @@ type DashboardSearchProjection struct {
FolderTitle string
SortMeta int64
Tags []string
ManagedBy utils.ManagerKind
ManagerId string
Deleted *time.Time
}
@@ -877,24 +877,32 @@ func (dr *DashboardServiceImpl) waitForSearchQuery(ctx context.Context, query *d
}
func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.Context, cmd *dashboards.DeleteOrphanedProvisionedDashboardsCommand) error {
// cleanup duplicate provisioned dashboards first (this will have the same name and external_id)
// note: only works in modes 1-3
if err := dr.DeleteDuplicateProvisionedDashboards(ctx); err != nil {
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
}
// check each org for orphaned provisioned dashboards
orgs, err := dr.orgService.Search(ctx, &org.SearchOrgsQuery{})
if err != nil {
return err
}
orgIDs := make([]int64, 0, len(orgs))
for _, org := range orgs {
orgIDs = append(orgIDs, org.ID)
}
if err := dr.DeleteDuplicateProvisionedDashboards(ctx, orgIDs, cmd.Config); err != nil {
dr.log.Error("Failed to delete duplicate provisioned dashboards", "error", err)
}
currentNames := make([]string, 0, len(cmd.Config))
for _, cfg := range cmd.Config {
currentNames = append(currentNames, cfg.Name)
}
for _, org := range orgs {
ctx, _ := identity.WithServiceIdentity(ctx, org.ID)
// find all dashboards in the org that have a file repo set that is not in the given readers list
foundDashs, err := dr.searchProvisionedDashboardsThroughK8s(ctx, &dashboards.FindPersistedDashboardsQuery{
ManagedBy: utils.ManagerKindClassicFP, //nolint:staticcheck
ManagerIdentityNotIn: cmd.ReaderNames,
ManagerIdentityNotIn: currentNames,
OrgId: org.ID,
})
if err != nil {
@@ -921,7 +929,129 @@ func (dr *DashboardServiceImpl) DeleteOrphanedProvisionedDashboards(ctx context.
return nil
}
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context) error {
// searchExistingProvisionedData fetches provisioned data for the purposes of
// duplication cleanup. Returns the set of folder UIDs for folders with the
// given title, and the set of resources contained in those folders.
func (dr *DashboardServiceImpl) searchExistingProvisionedData(
ctx context.Context, orgID int64, folderTitle string,
) ([]string, []dashboards.DashboardSearchProjection, error) {
ctx, user := identity.WithServiceIdentity(ctx, orgID)
cmd := folder.SearchFoldersQuery{
OrgID: orgID,
SignedInUser: user,
Title: folderTitle,
TitleExactMatch: true,
}
searchResults, err := dr.folderService.SearchFolders(ctx, cmd)
if err != nil {
return nil, nil, fmt.Errorf("checking if provisioning reset is required: %w", err)
}
var matchingFolders []string //nolint:prealloc
for _, result := range searchResults {
f, err := dr.folderService.Get(ctx, &folder.GetFolderQuery{
OrgID: orgID,
UID: &result.UID,
SignedInUser: user,
})
if err != nil {
return nil, nil, err
}
// We are only interested in folders at the top-level of the folder hierarchy.
// Cleanup is not performed for provisioned folders that were moved to
// a different location.
if f.ParentUID != "" {
continue
}
matchingFolders = append(matchingFolders, f.UID)
}
if len(matchingFolders) == 0 {
// If there are no folders with the same title as the provisioned folder we
// are looking for, there is nothing to be cleaned up.
return nil, nil, nil
}
resources, err := dr.FindDashboards(ctx, &dashboards.FindPersistedDashboardsQuery{
OrgId: orgID,
SignedInUser: user,
FolderUIDs: matchingFolders,
})
if err != nil {
return nil, nil, err
}
return matchingFolders, resources, nil
}
// maybeResetProvisioning will check for duplicated provisioned dashboards in the database. These duplications
// happen when multiple provisioned dashboards of the same title are found, or multiple provisioned
// folders are found. In this case, provisioned resources are deleted, allowing the provisioning
// process to start from scratch after this function returns.
func (dr *DashboardServiceImpl) maybeResetProvisioning(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) {
if skipReason := canBeAutomaticallyCleanedUp(configs); skipReason != "" {
dr.log.Info("not eligible for automated cleanup", "reason", skipReason)
return
}
folderTitle := configs[0].Folder
provisionedNames := map[string]bool{}
for _, c := range configs {
provisionedNames[c.Name] = true
}
for _, orgID := range orgs {
ctx, user := identity.WithServiceIdentity(ctx, orgID)
provFolders, resources, err := dr.searchExistingProvisionedData(ctx, orgID, folderTitle)
if err != nil {
dr.log.Error("failed to search for provisioned data for cleanup", "org", orgID, "error", err)
continue
}
steps, err := cleanupSteps(provFolders, resources, provisionedNames)
if err != nil {
dr.log.Warn("not possible to perform automated duplicate cleanup", "org", orgID, "error", err)
continue
}
for _, step := range steps {
var err error
switch step.Type {
case searchstore.TypeDashboard:
err = dr.deleteDashboard(ctx, 0, step.UID, orgID, false)
case searchstore.TypeFolder:
err = dr.folderService.Delete(ctx, &folder.DeleteFolderCommand{
OrgID: orgID,
SignedInUser: user,
UID: step.UID,
})
}
if err == nil {
dr.log.Info("deleted duplicated provisioned resource",
"type", step.Type, "uid", step.UID,
)
} else {
dr.log.Error("failed to delete duplicated provisioned resource",
"type", step.Type, "uid", step.UID, "error", err,
)
}
}
}
}
func (dr *DashboardServiceImpl) DeleteDuplicateProvisionedDashboards(ctx context.Context, orgs []int64, configs []dashboards.ProvisioningConfig) error {
// Start from scratch if duplications that cannot be fixed by the logic
// below are found in the database.
dr.maybeResetProvisioning(ctx, orgs, configs)
// cleanup duplicate provisioned dashboards (i.e., with the same name and external_id).
// Note: only works in modes 1-3. This logic can be removed once mode5 is
// enabled everywhere.
duplicates, err := dr.dashboardStore.GetDuplicateProvisionedDashboards(ctx)
if err != nil {
return err
@@ -1511,6 +1641,8 @@ func (dr *DashboardServiceImpl) FindDashboards(ctx context.Context, query *dashb
FolderTitle: folderTitle,
FolderID: folderID,
FolderSlug: slugify.Slugify(folderTitle),
ManagedBy: hit.ManagedBy.Kind,
ManagerId: hit.ManagedBy.ID,
Tags: hit.Tags,
}
@@ -779,7 +779,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
}, nil).Twice()
err := service.DeleteOrphanedProvisionedDashboards(context.Background(), &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
ReaderNames: []string{"test"},
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
})
require.NoError(t, err)
k8sCliMock.AssertExpectations(t)
@@ -874,7 +874,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
}, nil).Once()
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
ReaderNames: []string{"test"},
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
})
require.NoError(t, err)
k8sCliMock.AssertExpectations(t)
@@ -906,7 +906,7 @@ func TestDeleteOrphanedProvisionedDashboards(t *testing.T) {
}, nil)
err := singleOrgService.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{
ReaderNames: []string{"test"},
Config: []dashboards.ProvisioningConfig{{Name: "test"}},
})
require.NoError(t, err)
k8sCliMock.AssertExpectations(t)
@@ -0,0 +1,107 @@
package service
import (
"errors"
"fmt"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
)
// canBeAutomaticallyCleanedUp determines whether this instance can be automatically cleaned up
// if duplicated provisioned resources are found. To ensure the process does not delete
// resources it shouldn't, automatic cleanups only happen if all provisioned dashboards
// are stored in the same folder (by title), and no dashboards allow UI updates.
func canBeAutomaticallyCleanedUp(configs []dashboards.ProvisioningConfig) string {
if len(configs) == 0 {
return "no provisioned dashboards"
}
folderTitle := configs[0].Folder
if len(folderTitle) == 0 {
return fmt.Sprintf("dashboard has no folder: %s", configs[0].Name)
}
for _, cfg := range configs {
if cfg.AllowUIUpdates {
return "contains dashboards with allowUiUpdates"
}
if cfg.Folder != folderTitle {
return "dashboards provisioned across multiple folders"
}
}
return ""
}
type deleteProvisionedResource struct {
Type string
UID string
}
// cleanupSteps computes the sequence of steps to be performed in order to cleanup the
// provisioning resources and allow the process to start from scratch when duplication
// is detected. The sequence of steps will dictate the order in which dashboards and folders
// are to be deleted.
func cleanupSteps(provFolders []string, resources []dashboards.DashboardSearchProjection, configDashboards map[string]bool) ([]deleteProvisionedResource, error) {
var hasDuplicatedProvisionedDashboard bool
var hasUserCreatedResource bool
var uniqueNames = map[string]struct{}{}
var deleteProvisionedDashboards []deleteProvisionedResource //nolint:prealloc
for _, r := range resources {
// nolint:staticcheck
if r.IsFolder || r.ManagedBy != utils.ManagerKindClassicFP {
hasUserCreatedResource = true
continue
}
// Only delete dashboards if they are included in the provisioning configuration
// for this instance.
if !configDashboards[r.ManagerId] {
continue
}
if _, exists := uniqueNames[r.ManagerId]; exists {
hasDuplicatedProvisionedDashboard = true
}
uniqueNames[r.ManagerId] = struct{}{}
deleteProvisionedDashboards = append(deleteProvisionedDashboards, deleteProvisionedResource{
Type: searchstore.TypeDashboard,
UID: r.UID,
})
}
if len(provFolders) == 0 {
// When there are no provisioned folders, there is nothing to do.
return nil, nil
} else if len(provFolders) == 1 {
// If only one folder was found, keep it and delete the provisioned dashboards if
// duplication was found.
if hasDuplicatedProvisionedDashboard {
return deleteProvisionedDashboards, nil
}
} else {
// If multiple folders were found *and* a user-created resource exists in
// one of them, bail, as we wouldn't be able to delete one of the duplicated folders.
if hasUserCreatedResource {
return nil, errors.New("multiple provisioning folders exist with at least one user-created resource")
}
// Delete provisioned dashboards first, and then the folders.
steps := deleteProvisionedDashboards
for _, uid := range provFolders {
steps = append(steps, deleteProvisionedResource{
Type: searchstore.TypeFolder,
UID: uid,
})
}
return steps, nil
}
return nil, nil
}
@@ -0,0 +1,279 @@
package service
import (
"testing"
"github.com/grafana/grafana/pkg/apimachinery/utils"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/sqlstore/searchstore"
"github.com/stretchr/testify/require"
)
func Test_canBeAutomaticallyCleanedUp(t *testing.T) {
testCases := []struct {
name string
configs []dashboards.ProvisioningConfig
expectedSkip string
}{
{
name: "no dashboards defined in the configuration",
configs: []dashboards.ProvisioningConfig{},
expectedSkip: "no provisioned dashboards",
},
{
name: "first defined dashboard has no folder defined",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: ""},
{Folder: "f1"},
},
expectedSkip: "dashboard has no folder: 1",
},
{
name: "one of the provisioned dashboards has no folder defined",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1"},
{Name: "3", Folder: ""},
{Name: "4", Folder: "f1"},
},
expectedSkip: "dashboards provisioned across multiple folders",
},
{
name: "one of the provisioned dashboards allows UI updates",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1", AllowUIUpdates: true},
{Name: "3", Folder: "f1"},
{Name: "4", Folder: "f1"},
},
expectedSkip: "contains dashboards with allowUiUpdates",
},
{
name: "one of the provisioned dashboards is in a different folder",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1"},
{Name: "3", Folder: "f1"},
{Name: "4", Folder: "different"},
},
expectedSkip: "dashboards provisioned across multiple folders",
},
{
name: "can be skipped when all conditions are met",
configs: []dashboards.ProvisioningConfig{
{Name: "1", Folder: "f1"},
{Name: "2", Folder: "f1"},
{Name: "3", Folder: "f1"},
{Name: "4", Folder: "f1"},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expectedSkip, canBeAutomaticallyCleanedUp(tc.configs))
})
}
}
func Test_cleanupSteps(t *testing.T) {
isDashboard, isFolder := false, true
fromUser := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
return dashboards.DashboardSearchProjection{
UID: uid,
ManagerId: name,
IsFolder: isFolder,
}
}
provisioned := func(uid, name string, isFolder bool) dashboards.DashboardSearchProjection {
dashboard := fromUser(uid, name, isFolder)
dashboard.ManagedBy = utils.ManagerKindClassicFP //nolint:staticcheck
return dashboard
}
testCases := []struct {
name string
provisionedFolders []string
provisionedResources []dashboards.DashboardSearchProjection
configDashboards []string
expectedSteps []deleteProvisionedResource
expectedErr string
}{
{
name: "no provisioned folders, nothing to do",
provisionedFolders: []string{},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
},
},
{
name: "multiple folders, a user-created dashboard in one of them",
provisionedFolders: []string{"folder1", "folder2"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
fromUser("d3", "User1", isDashboard),
provisioned("d4", "Provisioned3", isDashboard),
},
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
},
{
name: "multiple folders, a user-created folder in one of them",
provisionedFolders: []string{"folder1", "folder2"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
fromUser("f1", "UserFolder1", isFolder),
},
expectedErr: "multiple provisioning folders exist with at least one user-created resource",
},
{
name: "single folder, some dashboards duplicated",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
// Provisioned1 is duplicated.
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned1", isDashboard),
provisioned("d4", "Provisioned3", isDashboard),
},
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
{Type: searchstore.TypeDashboard, UID: "d4"},
},
},
{
name: "single folder, duplicated dashboards, user-created dashboards are ignored",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
// Provisioned1 is duplicated.
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
fromUser("d3", "User1", isDashboard),
provisioned("d4", "Provisioned3", isDashboard),
provisioned("d5", "Provisioned1", isDashboard),
},
// User dashboard (d3) is not deleted.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d4"},
{Type: searchstore.TypeDashboard, UID: "d5"},
},
},
{
name: "single folder, duplicated dashboards, user-created folders are ignored",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3"},
provisionedResources: []dashboards.DashboardSearchProjection{
// Provisioned1 is duplicated.
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
provisioned("d4", "Provisioned1", isDashboard),
fromUser("f1", "UserFolder1", isFolder),
},
// User folder (f1) is not deleted.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
{Type: searchstore.TypeDashboard, UID: "d4"},
},
},
{
name: "multiple folders, only provisioned dashboards",
provisionedFolders: []string{"folder1", "folder2"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
provisioned("d4", "Provisioned4", isDashboard),
},
// Delete all dashboards, then all folders.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
{Type: searchstore.TypeDashboard, UID: "d4"},
{Type: searchstore.TypeFolder, UID: "folder1"},
{Type: searchstore.TypeFolder, UID: "folder2"},
},
},
{
name: "single folder, only deletes dashboards defined in the config file",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned1", isDashboard),
provisioned("d4", "Provisioned4", isDashboard),
provisioned("d5", "Provisioned4", isDashboard),
},
// Delete duplicated dashboards, but keep Provisioned4, since it's not in the config file.
expectedSteps: []deleteProvisionedResource{
{Type: searchstore.TypeDashboard, UID: "d1"},
{Type: searchstore.TypeDashboard, UID: "d2"},
{Type: searchstore.TypeDashboard, UID: "d3"},
},
},
{
name: "single folder, no duplicated dashboards",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
provisioned("d3", "Provisioned3", isDashboard),
provisioned("d4", "Provisioned4", isDashboard),
},
expectedSteps: nil, // no duplicates, nothing to do
},
{
name: "single folder, no duplicated dashboards, multiple user-created resources",
provisionedFolders: []string{"folder1"},
configDashboards: []string{"Provisioned1", "Provisioned2", "Provisioned3", "Provisioned4"},
provisionedResources: []dashboards.DashboardSearchProjection{
provisioned("d1", "Provisioned1", isDashboard),
provisioned("d2", "Provisioned2", isDashboard),
fromUser("f1", "UserFolder1", isFolder),
provisioned("d3", "Provisioned3", isDashboard),
fromUser("d4", "User1", isDashboard),
provisioned("d5", "Provisioned4", isDashboard),
fromUser("d6", "User2", isDashboard),
fromUser("f2", "UserFolder2", isFolder),
},
expectedSteps: nil, // no duplicates in the provisioned set, nothing to do
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
provisionedSet := make(map[string]bool)
for _, name := range tc.configDashboards {
provisionedSet[name] = true
}
steps, err := cleanupSteps(tc.provisionedFolders, tc.provisionedResources, provisionedSet)
if tc.expectedErr == "" {
require.NoError(t, err)
require.Equal(t, tc.expectedSteps, steps)
} else {
require.Error(t, err)
require.Equal(t, tc.expectedErr, err.Error())
}
})
}
}
@@ -274,6 +274,11 @@ func (s *Service) listDashboardVersionsThroughK8s(
continueToken = tempOut.GetContinue()
}
// Update the continue token on the response to reflect the actual position after all fetched items.
// Without this, the response would return the token from the first fetch, causing duplicate items
// on subsequent pages when multiple fetches were needed to fill the requested limit.
out.SetContinue(continueToken)
return out, nil
}
@@ -268,6 +268,58 @@ func TestListDashboardVersions(t *testing.T) {
}}}, res)
})
t.Run("List returns continue token when first fetch satisfies limit with more pages", func(t *testing.T) {
dashboardService := dashboards.NewFakeDashboardService(t)
dashboardVersionService := Service{dashSvc: dashboardService, features: featuremgmt.WithFeatures()}
mockCli := new(client.MockK8sHandler)
dashboardVersionService.k8sclient = mockCli
dashboardVersionService.features = featuremgmt.WithFeatures()
dashboardService.On("GetDashboardUIDByID", mock.Anything,
mock.AnythingOfType("*dashboards.GetDashboardRefByIDQuery")).
Return(&dashboards.DashboardRef{UID: "uid"}, nil)
query := dashver.ListDashboardVersionsQuery{DashboardID: 42, Limit: 2}
mockCli.On("GetUsersFromMeta", mock.Anything, mock.Anything).Return(map[string]*user.User{}, nil)
firstPage := &unstructured.UnstructuredList{
Items: []unstructured.Unstructured{
{Object: map[string]any{
"metadata": map[string]any{
"name": "uid",
"resourceVersion": "11",
"generation": int64(4),
"labels": map[string]any{
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
},
},
"spec": map[string]any{},
}},
{Object: map[string]any{
"metadata": map[string]any{
"name": "uid",
"resourceVersion": "12",
"generation": int64(5),
"labels": map[string]any{
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
},
},
"spec": map[string]any{},
}},
},
}
firstMeta, err := meta.ListAccessor(firstPage)
require.NoError(t, err)
firstMeta.SetContinue("t1") // More pages exist
mockCli.On("List", mock.Anything, mock.Anything, mock.Anything).Return(firstPage, nil).Once()
res, err := dashboardVersionService.List(context.Background(), &query)
require.Nil(t, err)
require.Equal(t, 2, len(res.Versions))
require.Equal(t, "t1", res.ContinueToken) // Token from first fetch when limit is satisfied
mockCli.AssertNumberOfCalls(t, "List", 1) // Only one fetch needed
})
t.Run("List returns correct continue token across multiple pages", func(t *testing.T) {
dashboardService := dashboards.NewFakeDashboardService(t)
dashboardVersionService := Service{dashSvc: dashboardService, features: featuremgmt.WithFeatures()}
@@ -333,7 +385,79 @@ func TestListDashboardVersions(t *testing.T) {
res, err := dashboardVersionService.List(context.Background(), &query)
require.Nil(t, err)
require.Equal(t, 3, len(res.Versions))
require.Equal(t, "t1", res.ContinueToken) // Implementation returns continue token from first page
require.Equal(t, "", res.ContinueToken) // Should return token from last fetch (empty = no more pages)
mockCli.AssertNumberOfCalls(t, "List", 2)
})
t.Run("List returns continue token from last fetch when more pages exist", func(t *testing.T) {
dashboardService := dashboards.NewFakeDashboardService(t)
dashboardVersionService := Service{dashSvc: dashboardService, features: featuremgmt.WithFeatures()}
mockCli := new(client.MockK8sHandler)
dashboardVersionService.k8sclient = mockCli
dashboardVersionService.features = featuremgmt.WithFeatures()
dashboardService.On("GetDashboardUIDByID", mock.Anything,
mock.AnythingOfType("*dashboards.GetDashboardRefByIDQuery")).
Return(&dashboards.DashboardRef{UID: "uid"}, nil)
query := dashver.ListDashboardVersionsQuery{DashboardID: 42, Limit: 3}
mockCli.On("GetUsersFromMeta", mock.Anything, mock.Anything).Return(map[string]*user.User{}, nil)
firstPage := &unstructured.UnstructuredList{
Items: []unstructured.Unstructured{
{Object: map[string]any{
"metadata": map[string]any{
"name": "uid",
"resourceVersion": "11",
"generation": int64(4),
"labels": map[string]any{
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
},
},
"spec": map[string]any{},
}},
{Object: map[string]any{
"metadata": map[string]any{
"name": "uid",
"resourceVersion": "12",
"generation": int64(5),
"labels": map[string]any{
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
},
},
"spec": map[string]any{},
}},
},
}
firstMeta, err := meta.ListAccessor(firstPage)
require.NoError(t, err)
firstMeta.SetContinue("t1")
secondPage := &unstructured.UnstructuredList{
Items: []unstructured.Unstructured{
{Object: map[string]any{
"metadata": map[string]any{
"name": "uid",
"resourceVersion": "13",
"generation": int64(6),
"labels": map[string]any{
utils.LabelKeyDeprecatedInternalID: "42", // nolint:staticcheck
},
},
"spec": map[string]any{},
}},
},
}
secondMeta, err := meta.ListAccessor(secondPage)
require.NoError(t, err)
secondMeta.SetContinue("t2") // More pages exist
mockCli.On("List", mock.Anything, mock.Anything, mock.Anything).Return(firstPage, nil).Once()
mockCli.On("List", mock.Anything, mock.Anything, mock.Anything).Return(secondPage, nil).Once()
res, err := dashboardVersionService.List(context.Background(), &query)
require.Nil(t, err)
require.Equal(t, 3, len(res.Versions))
require.Equal(t, "t2", res.ContinueToken) // Must return token from LAST fetch, not first
mockCli.AssertNumberOfCalls(t, "List", 2)
})
@@ -202,6 +202,11 @@ func (s *Service) searchFoldersFromApiServer(ctx context.Context, query folder.S
if query.Title != "" {
// allow wildcard search
request.Query = "*" + strings.ToLower(query.Title) + "*"
// or perform exact match if requested
if query.TitleExactMatch {
request.Query = query.Title
}
// if using query, you need to specify the fields you want
request.Fields = dashboardsearch.IncludeFields
}
+7 -6
View File
@@ -224,12 +224,13 @@ type GetFoldersQuery struct {
}
type SearchFoldersQuery struct {
OrgID int64
UIDs []string
IDs []int64
Title string
Limit int64
SignedInUser identity.Requester `json:"-"`
OrgID int64
UIDs []string
IDs []int64
Title string
TitleExactMatch bool
Limit int64
SignedInUser identity.Requester `json:"-"`
}
// GetParentsQuery captures the information required by the folder service to
@@ -153,13 +153,20 @@ func (provider *Provisioner) Provision(ctx context.Context) error {
// CleanUpOrphanedDashboards deletes provisioned dashboards missing a linked reader.
func (provider *Provisioner) CleanUpOrphanedDashboards(ctx context.Context) {
currentReaders := make([]string, len(provider.fileReaders))
configs := make([]dashboards.ProvisioningConfig, len(provider.fileReaders))
for index, reader := range provider.fileReaders {
currentReaders[index] = reader.Cfg.Name
configs[index] = dashboards.ProvisioningConfig{
Name: reader.Cfg.Name,
OrgID: reader.Cfg.OrgID,
Folder: reader.Cfg.Folder,
AllowUIUpdates: reader.Cfg.AllowUIUpdates,
}
}
if err := provider.provisioner.DeleteOrphanedProvisionedDashboards(ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{ReaderNames: currentReaders}); err != nil {
if err := provider.provisioner.DeleteOrphanedProvisionedDashboards(
ctx, &dashboards.DeleteOrphanedProvisionedDashboardsCommand{Config: configs},
); err != nil {
provider.log.Warn("Failed to delete orphaned provisioned dashboards", "err", err)
}
}
+9 -1
View File
@@ -280,7 +280,15 @@ func (s *Service) handleTagValues(rw http.ResponseWriter, req *http.Request) {
return
}
tempoPath := fmt.Sprintf("api/v2/search/tag/%s/values", encodedTag)
// escape tag
tag, err := url.PathUnescape(encodedTag)
if err != nil {
s.logger.Error("Failed to unescape", "error", err, "tag", encodedTag)
http.Error(rw, "Invalid 'tag' parameter", http.StatusBadRequest)
return
}
tempoPath := fmt.Sprintf("api/v2/search/tag/%s/values", tag)
s.proxyToTempo(rw, req, tempoPath)
}
@@ -25,10 +25,17 @@ import { DashboardDataDTO } from 'app/types/dashboard';
import { PanelInspectDrawer } from '../../inspect/PanelInspectDrawer';
import { PanelTimeRange, PanelTimeRangeState } from '../../scene/panel-timerange/PanelTimeRange';
import { DashboardLayoutManager } from '../../scene/types/DashboardLayoutManager';
import { transformSaveModelSchemaV2ToScene } from '../../serialization/transformSaveModelSchemaV2ToScene';
import { transformSaveModelToScene } from '../../serialization/transformSaveModelToScene';
import { findVizPanelByKey } from '../../utils/utils';
import { buildPanelEditScene } from '../PanelEditor';
import { testDashboard, panelWithTransformations, panelWithQueriesOnly } from '../testfiles/testDashboard';
import {
testDashboard,
panelWithTransformations,
panelWithQueriesOnly,
testDashboardV2,
} from '../testfiles/testDashboard';
import { PanelDataQueriesTab, PanelDataQueriesTabRendered } from './PanelDataQueriesTab';
@@ -824,6 +831,78 @@ describe('PanelDataQueriesTab', () => {
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
});
});
describe('V2 schema behavior - panel datasource undefined but queries have datasource', () => {
it('should load datasource from first query for V2 panel with prometheus datasource', async () => {
// panel-1 has a query with prometheus datasource
const { queriesTab } = await setupV2Scene('panel-1');
// V2 panels have undefined panel-level datasource for non-mixed panels
expect(queriesTab.queryRunner.state.datasource).toBeUndefined();
// But the query has its own datasource
expect(queriesTab.queryRunner.state.queries[0].datasource).toEqual({
type: 'grafana-prometheus-datasource',
uid: 'gdev-prometheus',
});
// Should load the datasource from the first query
expect(queriesTab.state.datasource?.uid).toBe('gdev-prometheus');
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-prometheus');
});
it('should load datasource from first query for V2 panel with testdata datasource', async () => {
// panel-2 has a query with testdata datasource
const { queriesTab } = await setupV2Scene('panel-2');
// V2 panels have undefined panel-level datasource for non-mixed panels
expect(queriesTab.queryRunner.state.datasource).toBeUndefined();
// But the query has its own datasource
expect(queriesTab.queryRunner.state.queries[0].datasource).toEqual({
type: 'grafana-testdata-datasource',
uid: 'gdev-testdata',
});
// Should load the datasource from the first query
expect(queriesTab.state.datasource?.uid).toBe('gdev-testdata');
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
});
it('should fall back to last used datasource when V2 query has no explicit datasource', async () => {
store.exists.mockReturnValue(true);
store.getObject.mockImplementation((key: string, def: unknown) => {
if (key === PANEL_EDIT_LAST_USED_DATASOURCE) {
return {
dashboardUid: 'v2-dashboard-uid',
datasourceUid: 'gdev-testdata',
};
}
return def;
});
// panel-3 has a query with NO explicit datasource (datasource.name is undefined)
const { queriesTab } = await setupV2Scene('panel-3');
// V2 panel with no explicit datasource on query should fall back to last used
expect(queriesTab.state.datasource?.uid).toBe('gdev-testdata');
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
});
it('should use panel-level datasource when available (V1 behavior preserved)', async () => {
const { queriesTab } = await setupScene('panel-1');
// V1 panels have panel-level datasource set
expect(queriesTab.queryRunner.state.datasource).toEqual({
uid: 'gdev-testdata',
type: 'grafana-testdata-datasource',
});
// Should use the panel-level datasource
expect(queriesTab.state.datasource?.uid).toBe('gdev-testdata');
expect(queriesTab.state.dsSettings?.uid).toBe('gdev-testdata');
});
});
});
});
@@ -844,3 +923,24 @@ async function setupScene(panelId: string) {
return { panel, scene: dashboard, queriesTab };
}
// Setup V2 scene - uses transformSaveModelSchemaV2ToScene
async function setupV2Scene(panelKey: string) {
const dashboard = transformSaveModelSchemaV2ToScene(testDashboardV2);
const vizPanels = (dashboard.state.body as DashboardLayoutManager).getVizPanels();
const panel = vizPanels.find((p) => p.state.key === panelKey)!;
const panelEditor = buildPanelEditScene(panel);
dashboard.setState({ editPanel: panelEditor });
deactivators.push(dashboard.activate());
deactivators.push(panelEditor.activate());
const queriesTab = panelEditor.state.dataPane!.state.tabs[0] as PanelDataQueriesTab;
deactivators.push(queriesTab.activate());
await Promise.resolve();
return { panel, scene: dashboard, queriesTab };
}
@@ -86,6 +86,17 @@ export class PanelDataQueriesTab extends SceneObjectBase<PanelDataQueriesTabStat
let datasource: DataSourceApi | undefined;
let dsSettings: DataSourceInstanceSettings | undefined;
// If no panel-level datasource (V2 schema non-mixed case), infer from first query
// This also improves the V1 behavior because it doesn't make sense to rely on last used
// if underlying queries have different datasources
if (!datasourceToLoad) {
const queries = this.queryRunner.state.queries;
const firstQueryDs = queries[0]?.datasource;
if (firstQueryDs) {
datasourceToLoad = firstQueryDs;
}
}
if (!datasourceToLoad) {
const dashboardScene = getDashboardSceneFor(this);
const dashboardUid = dashboardScene.state.uid ?? '';
@@ -1,3 +1,6 @@
import { Spec as DashboardV2Spec, defaultDataQueryKind } from '@grafana/schema/dist/esm/schema/dashboard/v2';
import { DashboardWithAccessInfo } from 'app/features/dashboard/api/types';
export const panelWithQueriesOnly = {
datasource: {
type: 'grafana-testdata-datasource',
@@ -751,3 +754,223 @@ export const testDashboard = {
version: 6,
weekStart: '',
};
// V2 Dashboard fixture - panels have queries with datasources but NO panel-level datasource
export const testDashboardV2: DashboardWithAccessInfo<DashboardV2Spec> = {
kind: 'DashboardWithAccessInfo',
metadata: {
name: 'v2-dashboard-uid',
namespace: 'default',
labels: {},
generation: 1,
resourceVersion: '1',
creationTimestamp: new Date().toISOString(),
},
spec: {
title: 'V2 Test Dashboard',
description: 'Test dashboard for V2 schema',
tags: [],
cursorSync: 'Off',
liveNow: false,
editable: true,
preload: false,
links: [],
variables: [],
annotations: [],
timeSettings: {
from: 'now-6h',
to: 'now',
autoRefresh: '',
autoRefreshIntervals: ['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'],
fiscalYearStartMonth: 0,
hideTimepicker: false,
timezone: '',
weekStart: undefined,
quickRanges: [],
},
elements: {
'panel-1': {
kind: 'Panel',
spec: {
id: 1,
title: 'Panel with Prometheus datasource',
description: '',
links: [],
data: {
kind: 'QueryGroup',
spec: {
queries: [
{
kind: 'PanelQuery',
spec: {
refId: 'A',
hidden: false,
query: {
kind: 'DataQuery',
version: defaultDataQueryKind().version,
group: 'grafana-prometheus-datasource',
datasource: {
name: 'gdev-prometheus',
},
spec: {
expr: 'up',
},
},
},
},
],
transformations: [],
queryOptions: {},
},
},
vizConfig: {
kind: 'VizConfig',
group: 'timeseries',
version: '1.0.0',
spec: {
options: {},
fieldConfig: {
defaults: {},
overrides: [],
},
},
},
},
},
'panel-2': {
kind: 'Panel',
spec: {
id: 2,
title: 'Panel with TestData datasource',
description: '',
links: [],
data: {
kind: 'QueryGroup',
spec: {
queries: [
{
kind: 'PanelQuery',
spec: {
refId: 'A',
hidden: false,
query: {
kind: 'DataQuery',
version: defaultDataQueryKind().version,
group: 'grafana-testdata-datasource',
datasource: {
name: 'gdev-testdata',
},
spec: {
scenarioId: 'random_walk',
},
},
},
},
],
transformations: [],
queryOptions: {},
},
},
vizConfig: {
kind: 'VizConfig',
group: 'timeseries',
version: '1.0.0',
spec: {
options: {},
fieldConfig: {
defaults: {},
overrides: [],
},
},
},
},
},
'panel-3': {
kind: 'Panel',
spec: {
id: 3,
title: 'Panel with no datasource on query',
description: '',
links: [],
data: {
kind: 'QueryGroup',
spec: {
queries: [
{
kind: 'PanelQuery',
spec: {
refId: 'A',
hidden: false,
query: {
kind: 'DataQuery',
version: defaultDataQueryKind().version,
group: 'grafana-testdata-datasource',
// No datasource.name - simulates panel with no explicit datasource
spec: {},
},
},
},
],
transformations: [],
queryOptions: {},
},
},
vizConfig: {
kind: 'VizConfig',
group: 'timeseries',
version: '1.0.0',
spec: {
options: {},
fieldConfig: {
defaults: {},
overrides: [],
},
},
},
},
},
},
layout: {
kind: 'GridLayout',
spec: {
items: [
{
kind: 'GridLayoutItem',
spec: {
x: 0,
y: 0,
width: 12,
height: 8,
element: { kind: 'ElementReference', name: 'panel-1' },
},
},
{
kind: 'GridLayoutItem',
spec: {
x: 12,
y: 0,
width: 12,
height: 8,
element: { kind: 'ElementReference', name: 'panel-2' },
},
},
{
kind: 'GridLayoutItem',
spec: {
x: 0,
y: 8,
width: 12,
height: 8,
element: { kind: 'ElementReference', name: 'panel-3' },
},
},
],
},
},
},
access: {
url: '/d/v2-dashboard-uid',
slug: 'v2-test-dashboard',
},
apiVersion: 'v2',
};
@@ -5,7 +5,19 @@ import { CoreApp, GrafanaTheme2, PanelPlugin, PanelProps } from '@grafana/data';
import { Trans, t } from '@grafana/i18n';
import { config, locationService } from '@grafana/runtime';
import { sceneUtils } from '@grafana/scenes';
import { Box, Button, ButtonGroup, Dropdown, Icon, Menu, Stack, Text, usePanelContext, useStyles2 } from '@grafana/ui';
import {
Box,
Button,
ButtonGroup,
Dropdown,
EmptyState,
Icon,
Menu,
Stack,
Text,
usePanelContext,
useStyles2,
} from '@grafana/ui';
import { NEW_PANEL_TITLE } from '../../dashboard/utils/dashboard';
import { DashboardInteractions } from '../utils/interactions';
@@ -92,20 +104,30 @@ function UnconfiguredPanelComp(props: PanelProps) {
);
}
const { isEditing } = dashboard.state;
return (
<Stack direction={'row'} alignItems={'center'} height={'100%'} justifyContent={'center'}>
<Box paddingBottom={2}>
<ButtonGroup>
<Button icon="sliders-v-alt" onClick={onConfigure}>
<Trans i18nKey="dashboard.new-panel.configure-button">Configure</Trans>
</Button>
<Dropdown overlay={MenuActions} placement="bottom-end" onVisibleChange={onMenuClick}>
<Button
aria-label={t('dashboard.new-panel.configure-button-menu', 'Toggle menu')}
icon={isOpen ? 'angle-up' : 'angle-down'}
/>
</Dropdown>
</ButtonGroup>
{isEditing ? (
<ButtonGroup>
<Button icon="sliders-v-alt" onClick={onConfigure}>
<Trans i18nKey="dashboard.new-panel.configure-button">Configure</Trans>
</Button>
<Dropdown overlay={MenuActions} placement="bottom-end" onVisibleChange={onMenuClick}>
<Button
aria-label={t('dashboard.new-panel.configure-button-menu', 'Toggle menu')}
icon={isOpen ? 'angle-up' : 'angle-down'}
/>
</Dropdown>
</ButtonGroup>
) : (
<EmptyState
variant="call-to-action"
message={t('dashboard.new-panel.missing-config', 'Missing panel configuration')}
hideImage
/>
)}
</Box>
</Stack>
);
@@ -18,7 +18,8 @@ import { isDashboardLayoutGrid } from '../types/DashboardLayoutGrid';
import { RowItem } from './RowItem';
export function RowItemRenderer({ model }: SceneComponentProps<RowItem>) {
const { layout, collapse: isCollapsed, fillScreen, hideHeader: isHeaderHidden, isDropTarget, key } = model.useState();
const { layout, collapse, fillScreen, hideHeader: isHeaderHidden, isDropTarget, key } = model.useState();
const isCollapsed = collapse && !isHeaderHidden; // never allow a row without a header to be collapsed
const isClone = isRepeatCloneOrChildOf(model);
const { isEditing } = useDashboardState(model);
const [isConditionallyHidden, conditionalRenderingClass, conditionalRenderingOverlay] = useIsConditionallyHidden(
@@ -237,6 +238,7 @@ function getStyles(theme: GrafanaTheme2) {
}),
dragging: css({
cursor: 'move',
backgroundColor: theme.colors.background.canvas,
}),
wrapperGrow: css({
flexGrow: 1,
@@ -262,4 +262,57 @@ describe('TabsLayoutManager', () => {
expect(manager.getVizPanels().length).toBe(1);
});
});
describe('createFromLayout', () => {
it('should convert rows with titles to tabs', () => {
const rowsLayout = new RowsLayoutManager({
rows: [new RowItem({ title: 'Row 1' }), new RowItem({ title: 'Row 2' })],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(2);
expect(tabsManager.state.tabs[0].state.title).toBe('Row 1');
expect(tabsManager.state.tabs[1].state.title).toBe('Row 2');
});
it('should use default title when row has empty title', () => {
const rowsLayout = new RowsLayoutManager({
rows: [new RowItem({ title: '' })],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(1);
expect(tabsManager.state.tabs[0].state.title).toBe('New tab');
});
it('should generate unique titles for multiple rows with empty titles', () => {
const rowsLayout = new RowsLayoutManager({
rows: [new RowItem({ title: '' }), new RowItem({ title: '' }), new RowItem({ title: '' })],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(3);
expect(tabsManager.state.tabs[0].state.title).toBe('New tab');
expect(tabsManager.state.tabs[1].state.title).toBe('New tab 1');
expect(tabsManager.state.tabs[2].state.title).toBe('New tab 2');
});
it('should generate unique titles when mixing empty and existing titles', () => {
const rowsLayout = new RowsLayoutManager({
rows: [
new RowItem({ title: 'New row' }), // existing title that matches default
new RowItem({ title: '' }), // empty, should get unique title
],
});
const tabsManager = TabsLayoutManager.createFromLayout(rowsLayout);
expect(tabsManager.state.tabs).toHaveLength(2);
expect(tabsManager.state.tabs[0].state.title).toBe('New row');
expect(tabsManager.state.tabs[1].state.title).toBe('New tab');
});
});
});
@@ -410,6 +410,10 @@ export class TabsLayoutManager extends SceneObjectBase<TabsLayoutManagerState> i
let tabs: TabItem[] = [];
if (layout instanceof RowsLayoutManager) {
const existingNames = new Set(
layout.state.rows.map((row) => row.state.title).filter((title): title is string => !!title)
);
for (const row of layout.state.rows) {
if (row.state.repeatSourceKey) {
continue;
@@ -420,10 +424,14 @@ export class TabsLayoutManager extends SceneObjectBase<TabsLayoutManagerState> i
// We need to clear the target since we don't want to point the original row anymore (if it was set)
conditionalRendering?.setTarget(undefined);
const newTitle =
row.state.title || generateUniqueTitle(t('dashboard.tabs-layout.tab.new', 'New tab'), existingNames);
existingNames.add(newTitle);
tabs.push(
new TabItem({
layout: row.state.layout.clone(),
title: row.state.title,
title: newTitle,
conditionalRendering,
repeatByVariable: row.state.repeatByVariable,
})
@@ -256,7 +256,11 @@ export const InfiniteScroll = ({
if (props.visibleStartIndex === 0) {
noScrollRef.current = scrollElement.scrollHeight <= scrollElement.clientHeight;
}
if (noScrollRef.current || infiniteLoaderState === 'loading' || infiniteLoaderState === 'out-of-bounds') {
if (noScrollRef.current) {
setInfiniteLoaderState('idle');
return;
}
if (infiniteLoaderState === 'loading' || infiniteLoaderState === 'out-of-bounds') {
return;
}
const lastLogIndex = logs.length - 1;
@@ -267,7 +271,7 @@ export const InfiniteScroll = ({
setInfiniteLoaderState('idle');
}
},
[infiniteLoaderState, logs.length, scrollElement]
[infiniteLoaderState, logs, scrollElement]
);
const getItemKey = useCallback((index: number) => (logs[index] ? logs[index].uid : index.toString()), [logs]);
@@ -1,5 +1,5 @@
import { css } from '@emotion/css';
import { useState, useEffect, useCallback, useMemo } from 'react';
import { Fragment, useState, useEffect, useCallback, useMemo } from 'react';
import { useAsync, useMeasure } from 'react-use';
import {
@@ -133,9 +133,9 @@ export function VisualizationSuggestions({ onChange, data, panel }: Props) {
return (
<div className={styles.grid}>
{isNewVizSuggestionsEnabled
? suggestionsByVizType.map(([vizType, vizTypeSuggestions]) => (
<>
<div className={styles.vizTypeHeader} key={vizType?.id || 'unknown-viz-type'}>
? suggestionsByVizType.map(([vizType, vizTypeSuggestions], groupIndex) => (
<Fragment key={vizType?.id || `unknown-viz-type-${groupIndex}`}>
<div className={styles.vizTypeHeader}>
<Text variant="body" weight="medium">
{vizType?.info && <img className={styles.vizTypeLogo} src={vizType.info.logos.small} alt="" />}
{vizType?.name || t('panel.visualization-suggestions.unknown-viz-type', 'Unknown visualization type')}
@@ -190,7 +190,7 @@ export function VisualizationSuggestions({ onChange, data, panel }: Props) {
</div>
);
})}
</>
</Fragment>
))
: suggestions?.map((suggestion, index) => (
<div key={suggestion.hash} className={styles.cardContainer} ref={index === 0 ? firstCardRef : undefined}>
@@ -33,6 +33,11 @@ const getSummaryColumns = () => [
header: 'Unchanged',
cell: ({ row: { original: item } }: SummaryCell) => item.noop?.toString() || '-',
},
{
id: 'warnings',
header: 'Warnings',
cell: ({ row: { original: item } }: SummaryCell) => item.warning?.toString() || '-',
},
{
id: 'errors',
header: 'Errors',
@@ -190,9 +190,7 @@ export default class TempoLanguageProvider extends LanguageProvider {
* @returns the encoded tag
*/
private encodeTag = (tag: string): string => {
// If we call `encodeURIComponent` only once, we still get an error when issuing a request to the backend
// Reference: https://stackoverflow.com/a/37456192
return encodeURIComponent(encodeURIComponent(tag));
return encodeURIComponent(tag);
};
generateQueryFromFilters({
@@ -911,7 +911,7 @@ const traceSubFrame = (
subFrame.add(transformSpanToTraceData(span, spanSet, trace));
});
return subFrame;
return toDataFrame(subFrame);
};
interface TraceTableData {
+17 -4
View File
@@ -3739,6 +3739,10 @@
"clear": "Vymazat vyhledávání a filtry",
"text": "Nebyly nalezeny žádné výsledky pro váš dotaz"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5992,13 +5996,25 @@
"title-error-loading-dashboard": "Chyba při načítání nástěnky"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Upravit panel",
"view-panel": "Zobrazit panel"
},
"title": {
"dashboard": "Nástěnka",
"discard-changes-to-dashboard": "Zahodit změny nástěnky?"
"discard-changes-to-dashboard": "Zahodit změny nástěnky?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10798,7 +10814,6 @@
"title": "Nové"
},
"new-dashboard": {
"empty-title": "",
"title": "Nová nástěnka"
},
"new-folder": {
@@ -11958,7 +11973,6 @@
"title-setting-connection-could-cause-temporary-outage": "Nastavení tohoto připojení může způsobit dočasný výpadek"
},
"getting-started-page": {
"header": "Zajišťování",
"subtitle-provisioning-feature": "Zobrazujte a spravujte vazby zajištění"
},
"git": {
@@ -12730,7 +12744,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importovat",
"new": "Nové",
"new-dashboard": "Nová nástěnka",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Suche und Filter löschen",
"text": "Keine Ergebnisse für deine Abfrage gefunden"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Fehler beim Laden des Dashboards"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Panel bearbeiten",
"view-panel": "Panel anzeigen"
},
"title": {
"dashboard": "Dashboard",
"discard-changes-to-dashboard": "Änderungen am Dashboard verwerfen?"
"discard-changes-to-dashboard": "Änderungen am Dashboard verwerfen?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Neu"
},
"new-dashboard": {
"empty-title": "",
"title": "Neues Dashboard"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "Das Einrichten dieser Verbindung kann zu einem vorübergehenden Ausfall führen"
},
"getting-started-page": {
"header": "Bereitstellung",
"subtitle-provisioning-feature": "Sehen und verwalten Sie Ihre Bereitstellungsverbindungen"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importieren",
"new": "Neu",
"new-dashboard": "Neues Dashboard",
+1
View File
@@ -5133,6 +5133,7 @@
"empty-state-message": "Run a query to visualize it here or go to all visualizations to add other panel types",
"menu-open-panel-editor": "Configure",
"menu-use-library-panel": "Use library panel",
"missing-config": "Missing panel configuration",
"suggestions": {
"empty-state-message": "Run a query to start seeing suggested visualizations"
}
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Borrar la búsqueda y los filtros",
"text": "No se han encontrado resultados para tu consulta"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Error al cargar el panel de control"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Editar panel",
"view-panel": "Ver panel"
},
"title": {
"dashboard": "Panel de control",
"discard-changes-to-dashboard": "¿Descartar los cambios en el dashboard?"
"discard-changes-to-dashboard": "¿Descartar los cambios en el dashboard?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Nuevo"
},
"new-dashboard": {
"empty-title": "",
"title": "Nuevo panel de control"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "Configurar esta conexión podría causar una interrupción temporal"
},
"getting-started-page": {
"header": "Aprovisionamiento",
"subtitle-provisioning-feature": "Ver y gestionar tus conexiones de aprovisionamiento"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importar",
"new": "Nuevo",
"new-dashboard": "Nuevo panel de control",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Effacer la recherche et les filtres",
"text": "Aucun résultat n'a été trouvé pour votre requête"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Erreur lors du chargement du tableau de bord"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Modifier le panneau",
"view-panel": "Afficher le panneau"
},
"title": {
"dashboard": "Tableau de bord",
"discard-changes-to-dashboard": "Abandonner les modifications apportées au tableau de bord ?"
"discard-changes-to-dashboard": "Abandonner les modifications apportées au tableau de bord ?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Nouveau"
},
"new-dashboard": {
"empty-title": "",
"title": "Nouveau tableau de bord"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "La configuration de cette connexion peut entraîner une interruption temporaire"
},
"getting-started-page": {
"header": "Mise en service",
"subtitle-provisioning-feature": "Afficher et gérer vos connexions de mise en service"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importer",
"new": "Nouveau",
"new-dashboard": "Nouveau tableau de bord",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Keresés és szűrők törlése",
"text": "Nincs találat a lekérdezésre"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Hiba történt az irányítópult betöltésekor"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Panel szerkesztése",
"view-panel": "Panel megtekintése"
},
"title": {
"dashboard": "Irányítópult",
"discard-changes-to-dashboard": "Elveti az irányítópult módosításait?"
"discard-changes-to-dashboard": "Elveti az irányítópult módosításait?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Új"
},
"new-dashboard": {
"empty-title": "",
"title": "Új irányítópult"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "A kapcsolat létrehozása ideiglenes üzemszünetet okozhat"
},
"getting-started-page": {
"header": "Kiépítés",
"subtitle-provisioning-feature": "Kiépítési kapcsolatok megtekintése és kezelése"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importálás",
"new": "Új",
"new-dashboard": "Új irányítópult",
+17 -4
View File
@@ -3691,6 +3691,10 @@
"clear": "Hapus pencarian dan filter",
"text": "Hasil untuk kueri Anda tidak ditemukan"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_other": "",
@@ -5929,13 +5933,25 @@
"title-error-loading-dashboard": "Kesalahan saat memuat dasbor"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Edit panel",
"view-panel": "Lihat panel"
},
"title": {
"dashboard": "Dasbor",
"discard-changes-to-dashboard": "Batalkan perubahan ke dasbor?"
"discard-changes-to-dashboard": "Batalkan perubahan ke dasbor?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10669,7 +10685,6 @@
"title": "Baru"
},
"new-dashboard": {
"empty-title": "",
"title": "Dasbor baru"
},
"new-folder": {
@@ -11805,7 +11820,6 @@
"title-setting-connection-could-cause-temporary-outage": "Mengatur koneksi ini dapat menyebabkan pemadaman sementara"
},
"getting-started-page": {
"header": "Penyediaan",
"subtitle-provisioning-feature": "Lihat dan kelola koneksi penyediaan Anda"
},
"git": {
@@ -12568,7 +12582,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Impor",
"new": "Baru",
"new-dashboard": "Dasbor baru",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Cancella ricerca e filtri",
"text": "Nessun risultato trovato per la ricerca"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Errore durante il caricamento del dashboard"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Modifica pannello",
"view-panel": "Visualizza pannello"
},
"title": {
"dashboard": "Dashboard",
"discard-changes-to-dashboard": "Annullare le modifiche alla dashboard?"
"discard-changes-to-dashboard": "Annullare le modifiche alla dashboard?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Nuovo"
},
"new-dashboard": {
"empty-title": "",
"title": "Nuovo dashboard"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "La configurazione di questa connessione potrebbe causare un'interruzione temporanea"
},
"getting-started-page": {
"header": "Provisioning",
"subtitle-provisioning-feature": "Visualizza e gestisci le connessioni di provisioning"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importa",
"new": "Nuovo",
"new-dashboard": "Nuovo dashboard",
+17 -4
View File
@@ -3691,6 +3691,10 @@
"clear": "検索とフィルタをクリア",
"text": "クエリに一致する結果が見つかりませんでした。"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_other": "",
@@ -5929,13 +5933,25 @@
"title-error-loading-dashboard": "ダッシュボードの読み込み中にエラーが発生しました"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "パネルを編集",
"view-panel": "パネルを表示"
},
"title": {
"dashboard": "ダッシュボード",
"discard-changes-to-dashboard": "ダッシュボードへの変更を破棄しますか?"
"discard-changes-to-dashboard": "ダッシュボードへの変更を破棄しますか?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10669,7 +10685,6 @@
"title": "新規"
},
"new-dashboard": {
"empty-title": "",
"title": "新しいダッシュボード"
},
"new-folder": {
@@ -11805,7 +11820,6 @@
"title-setting-connection-could-cause-temporary-outage": "この接続設定を行うことで、一時的に停止する可能性があります"
},
"getting-started-page": {
"header": "プロビジョニング",
"subtitle-provisioning-feature": "プロビジョニング接続を表示・管理"
},
"git": {
@@ -12568,7 +12582,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "インポート",
"new": "新規",
"new-dashboard": "新しいダッシュボード",
+17 -4
View File
@@ -3691,6 +3691,10 @@
"clear": "검색 및 필터 초기화",
"text": "쿼리에 대해 찾은 결과 없음"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_other": "",
@@ -5929,13 +5933,25 @@
"title-error-loading-dashboard": "대시보드 로딩 중 오류 발생"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "패널 편집",
"view-panel": "패널 보기"
},
"title": {
"dashboard": "대시보드",
"discard-changes-to-dashboard": "대시보드 변경 사항을 취소하시겠어요?"
"discard-changes-to-dashboard": "대시보드 변경 사항을 취소하시겠어요?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10669,7 +10685,6 @@
"title": "신규"
},
"new-dashboard": {
"empty-title": "",
"title": "새 대시보드"
},
"new-folder": {
@@ -11805,7 +11820,6 @@
"title-setting-connection-could-cause-temporary-outage": "이 연결을 설정하면 일시적인 중단이 발생할 수 있습니다"
},
"getting-started-page": {
"header": "프로비저닝",
"subtitle-provisioning-feature": "프로비저닝 연결 보기 및 관리"
},
"git": {
@@ -12568,7 +12582,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "가져오기",
"new": "신규",
"new-dashboard": "새 대시보드",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Zoekopdracht en filters wissen",
"text": "Geen resultaten gevonden voor je zoekopdracht"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Er is een fout opgetreden bij het laden van het dashboard"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Paneel bewerken",
"view-panel": "Paneel bekijken"
},
"title": {
"dashboard": "Dashboard",
"discard-changes-to-dashboard": "Wijzigingen in dashboard verwerpen?"
"discard-changes-to-dashboard": "Wijzigingen in dashboard verwerpen?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Nieuw"
},
"new-dashboard": {
"empty-title": "",
"title": "Nieuw dashboard"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "Het opzetten van deze verbinding kan een tijdelijke storing veroorzaken"
},
"getting-started-page": {
"header": "Provisioning",
"subtitle-provisioning-feature": "Je provisioningverbindingen bekijken en beheren"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importeren",
"new": "Nieuw",
"new-dashboard": "Nieuw dashboard",
+17 -4
View File
@@ -3739,6 +3739,10 @@
"clear": "Wyczyść wyszukiwanie i filtry",
"text": "Nie znaleziono wyników dla tego zapytania"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5992,13 +5996,25 @@
"title-error-loading-dashboard": "Błąd wczytywania pulpitu"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Edytuj panel",
"view-panel": "Wyświetl panel"
},
"title": {
"dashboard": "Pulpit",
"discard-changes-to-dashboard": "Odrzucić zmiany dotyczące pulpitu?"
"discard-changes-to-dashboard": "Odrzucić zmiany dotyczące pulpitu?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10798,7 +10814,6 @@
"title": "Nowy"
},
"new-dashboard": {
"empty-title": "",
"title": "Nowy pulpit"
},
"new-folder": {
@@ -11958,7 +11973,6 @@
"title-setting-connection-could-cause-temporary-outage": "Skonfigurowanie tego połączenia może spowodować tymczasową niedostępność"
},
"getting-started-page": {
"header": "Konfiguracja",
"subtitle-provisioning-feature": "Wyświetlaj połączenia aprowizacyjne i nimi zarządzaj"
},
"git": {
@@ -12730,7 +12744,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importuj",
"new": "Nowy",
"new-dashboard": "Nowy pulpit",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Limpar busca e filtros",
"text": "Nenhum resultado encontrado para sua consulta"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Erro ao carregar o painel de controle"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Editar painel",
"view-panel": "Visualizar painel"
},
"title": {
"dashboard": "Painel de controle",
"discard-changes-to-dashboard": "Deseja descartar as alterações no painel?"
"discard-changes-to-dashboard": "Deseja descartar as alterações no painel?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Novo"
},
"new-dashboard": {
"empty-title": "",
"title": "Novo painel de controle"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "Estabelecer esta conexão pode causar uma interrupção temporária"
},
"getting-started-page": {
"header": "Aprovisionamento",
"subtitle-provisioning-feature": "Visualize e gerencie suas conexões de provisionamento"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importar",
"new": "Novo",
"new-dashboard": "Novo painel de controle",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Limpar a pesquisa e os filtros",
"text": "Não foram encontrados resultados para a sua consulta"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Erro ao carregar o painel de controlo"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Editar painel",
"view-panel": "Visualizar painel"
},
"title": {
"dashboard": "Painel de controlo",
"discard-changes-to-dashboard": "Rejeitar alterações no painel de controlo?"
"discard-changes-to-dashboard": "Rejeitar alterações no painel de controlo?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Novo"
},
"new-dashboard": {
"empty-title": "",
"title": "Novo painel de controlo"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "Configurar esta ligação pode causar uma interrupção temporária"
},
"getting-started-page": {
"header": "Provisionamento",
"subtitle-provisioning-feature": "Ver e gerir as suas ligações de provisionamento"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importar",
"new": "Novo",
"new-dashboard": "Novo painel de controlo",
+17 -4
View File
@@ -3739,6 +3739,10 @@
"clear": "Очистить поиск и фильтры",
"text": "По вашему запросу ничего не найдено"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5992,13 +5996,25 @@
"title-error-loading-dashboard": "Ошибка при загрузке дашборда"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Редактировать панель",
"view-panel": "Просмотр панели"
},
"title": {
"dashboard": "Дашборд",
"discard-changes-to-dashboard": "Отменить изменения на дашборде?"
"discard-changes-to-dashboard": "Отменить изменения на дашборде?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10798,7 +10814,6 @@
"title": "Новые элементы"
},
"new-dashboard": {
"empty-title": "",
"title": "Новый дашборд"
},
"new-folder": {
@@ -11958,7 +11973,6 @@
"title-setting-connection-could-cause-temporary-outage": "Настройка этого подключения может привести к временному сбою"
},
"getting-started-page": {
"header": "Подготовка к работе",
"subtitle-provisioning-feature": "Просмотр подключений для подготовки и управлением ими"
},
"git": {
@@ -12730,7 +12744,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Импорт",
"new": "Новые элементы",
"new-dashboard": "Новый дашборд",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Rensa sökning och filter",
"text": "Inga resultat hittades för din fråga"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Fel vid laddning av instrumentpanel"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Redigera panel",
"view-panel": "Visa panel"
},
"title": {
"dashboard": "Instrumentpanel",
"discard-changes-to-dashboard": "Kassera ändringar i instrumentpanelen?"
"discard-changes-to-dashboard": "Kassera ändringar i instrumentpanelen?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Nyhet"
},
"new-dashboard": {
"empty-title": "",
"title": "Ny instrumentpanel"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "Konfiguration av den här anslutningen kan orsaka ett tillfälligt avbrott"
},
"getting-started-page": {
"header": "Provisionering",
"subtitle-provisioning-feature": "Visa och hantera dina provisioneringsanslutningar"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "Importera",
"new": "Nyhet",
"new-dashboard": "Ny instrumentpanel",
+17 -4
View File
@@ -3707,6 +3707,10 @@
"clear": "Aramayı ve filtreleri temizle",
"text": "Sorgunuz için sonuç bulunamadı"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_one": "",
@@ -5950,13 +5954,25 @@
"title-error-loading-dashboard": "Pano yüklenirken hata oluştu"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "Paneli düzenle",
"view-panel": "Paneli görüntüle"
},
"title": {
"dashboard": "Pano",
"discard-changes-to-dashboard": "Panodaki değişiklikler silinsin mi?"
"discard-changes-to-dashboard": "Panodaki değişiklikler silinsin mi?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10712,7 +10728,6 @@
"title": "Yeni"
},
"new-dashboard": {
"empty-title": "",
"title": "Yeni pano"
},
"new-folder": {
@@ -11856,7 +11871,6 @@
"title-setting-connection-could-cause-temporary-outage": "Bu bağlantıyı kurmak geçici bir kesintiye neden olabilir"
},
"getting-started-page": {
"header": "Sağlama",
"subtitle-provisioning-feature": "Sağlama bağlantılarınızı görüntüleyin ve yönetin"
},
"git": {
@@ -12622,7 +12636,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "İçe aktar",
"new": "Yeni",
"new-dashboard": "Yeni pano",
+17 -4
View File
@@ -3691,6 +3691,10 @@
"clear": "清除搜索和筛选条件",
"text": "未找到与您的查询相关的结果"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_other": "",
@@ -5929,13 +5933,25 @@
"title-error-loading-dashboard": "加载数据面板时出错"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "编辑面板",
"view-panel": "查看面板"
},
"title": {
"dashboard": "仪表板",
"discard-changes-to-dashboard": "放弃对数据面板的更改?"
"discard-changes-to-dashboard": "放弃对数据面板的更改?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10669,7 +10685,6 @@
"title": "新建"
},
"new-dashboard": {
"empty-title": "",
"title": "新建仪表板"
},
"new-folder": {
@@ -11805,7 +11820,6 @@
"title-setting-connection-could-cause-temporary-outage": "设置此连接可能会导致暂时中断"
},
"getting-started-page": {
"header": "配置",
"subtitle-provisioning-feature": "查看和管理您的预配连接"
},
"git": {
@@ -12568,7 +12582,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "导入",
"new": "新建",
"new-dashboard": "新建仪表板",
+17 -4
View File
@@ -3691,6 +3691,10 @@
"clear": "清除搜尋和篩選條件",
"text": "未找到您的查詢結果"
},
"recently-viewed": {
"empty": "",
"title": ""
},
"restore": {
"success": "",
"all-failed_other": "",
@@ -5929,13 +5933,25 @@
"title-error-loading-dashboard": "載入控制面板發生錯誤"
},
"dashboard-scene": {
"modal": {
"cancel": "",
"discard": "",
"save": "",
"text": {
"save-changes-question": ""
},
"title": {
"unsaved-changes": ""
}
},
"text": {
"edit-panel": "編輯面板",
"view-panel": "檢視面板"
},
"title": {
"dashboard": "儀表板",
"discard-changes-to-dashboard": "要捨棄儀表板的變更嗎?"
"discard-changes-to-dashboard": "要捨棄儀表板的變更嗎?",
"unsaved-changes-question": ""
}
},
"dashboard-scene-page-state-manager": {
@@ -10669,7 +10685,6 @@
"title": "新"
},
"new-dashboard": {
"empty-title": "",
"title": "新儀表板"
},
"new-folder": {
@@ -11805,7 +11820,6 @@
"title-setting-connection-could-cause-temporary-outage": "設定此連線可能會導致暫時中斷"
},
"getting-started-page": {
"header": "佈建",
"subtitle-provisioning-feature": "檢視及管理您的佈建連線"
},
"git": {
@@ -12568,7 +12582,6 @@
}
},
"dashboard-actions": {
"empty-dashboard": "",
"import": "匯入",
"new": "新",
"new-dashboard": "新儀表板",