diff --git a/pages/cockpit/how-to/assets/scaleway-advanced-options.webp b/pages/cockpit/how-to/assets/scaleway-advanced-options.webp new file mode 100644 index 0000000000..533d76992e Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-advanced-options.webp differ diff --git a/pages/cockpit/how-to/assets/scaleway-alert-firing.webp b/pages/cockpit/how-to/assets/scaleway-alert-firing.webp new file mode 100644 index 0000000000..e8005cd255 Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-alert-firing.webp differ diff --git a/pages/cockpit/how-to/assets/scaleway-alerts-firing.webp b/pages/cockpit/how-to/assets/scaleway-alerts-firing.webp deleted file mode 100644 index f714c6c705..0000000000 Binary files a/pages/cockpit/how-to/assets/scaleway-alerts-firing.webp and /dev/null differ diff --git a/pages/cockpit/how-to/assets/scaleway-alerts-via-grafana-ui.webp b/pages/cockpit/how-to/assets/scaleway-alerts-via-grafana-ui.webp new file mode 100644 index 0000000000..9c690b0daf Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-alerts-via-grafana-ui.webp differ diff --git a/pages/cockpit/how-to/assets/scaleway-datasource-managed.webp b/pages/cockpit/how-to/assets/scaleway-datasource-managed.webp new file mode 100644 index 0000000000..95939673fe Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-datasource-managed.webp differ diff --git a/pages/cockpit/how-to/assets/scaleway-firing-alert-tabs.webp b/pages/cockpit/how-to/assets/scaleway-firing-alert-tabs.webp new file mode 100644 index 0000000000..bb25b1b308 Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-firing-alert-tabs.webp differ diff --git a/pages/cockpit/how-to/assets/scaleway-metric-selection.webp b/pages/cockpit/how-to/assets/scaleway-metric-selection.webp deleted file mode 100644 index be358502aa..0000000000 Binary files a/pages/cockpit/how-to/assets/scaleway-metric-selection.webp and /dev/null differ diff --git a/pages/cockpit/how-to/assets/scaleway-metrics-displayed.webp b/pages/cockpit/how-to/assets/scaleway-metrics-displayed.webp deleted file mode 100644 index 5ea9a88d5b..0000000000 Binary files a/pages/cockpit/how-to/assets/scaleway-metrics-displayed.webp and /dev/null differ diff --git a/pages/cockpit/how-to/assets/scaleway-metrics-explorer.webp b/pages/cockpit/how-to/assets/scaleway-metrics-explorer.webp new file mode 100644 index 0000000000..9a04e03b02 Binary files /dev/null and b/pages/cockpit/how-to/assets/scaleway-metrics-explorer.webp differ diff --git a/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx b/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx index 02a9078417..03ee424e93 100644 --- a/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx +++ b/pages/cockpit/how-to/configure-alerts-for-scw-resources.mdx @@ -2,25 +2,19 @@ title: How to configure alerts for Scaleway resources in Grafana description: Learn how to configure alerts for Scaleway resources in Grafana. Follow the steps to create alert rules, define conditions, and set up notifications for your monitored resources. categories: - - observability cockpit + - observability dates: - validation: 2025-05-12 + validation: 2025-07-18 posted: 2023-11-06 --- import Requirements from '@macros/iam/requirements.mdx' -import image from './assets/scaleway-switch-to-managed-alerts-button.webp' -import image2 from './assets/scaleway-metrics-browser.webp' -import image3 from './assets/scaleway-metrics-displayed.webp' -import image4 from './assets/scaleway-metric-selection.webp' -import image5 from './assets/scaleway-metrics-browser.webp' -import image6 from './assets/scaleway-metrics-displayed.webp' -import image7 from './assets/scaleway-metrics-browser.webp' -import image8 from './assets/scaleway-metrics-displayed.webp' -import image9 from './assets/scaleway-metrics-browser.webp' -import image10 from './assets/scaleway-metrics-displayed.webp' -import image11 from './assets/scaleway-alerts-firing.webp' - +import AdvancedOptionsGrafana from './assets/scaleway-advanced-options.webp' +import DataSourceManaged from './assets/scaleway-datasource-managed.webp' +import MetricsExplorer from './assets/scaleway-metrics-explorer.webp' +import FiringAlertTabs from './assets/scaleway-firing-alert-tabs.webp' +import AlertsFiringGrafana from './assets/scaleway-alert-firing.webp' +import AlertsViaGrafanaUI from './assets/scaleway-alerts-via-grafana-ui.webp' Cockpit does not support Grafana-managed alerting. It integrates with Grafana to visualize metrics, but alerts are managed through the Scaleway alert manager. You should use Grafana only to define alert rules, not to evaluate or receive alert notifications. Once the conditions of your alert rule are met, the Scaleway alert manager evaluates the rule and sends a notification to the contact points you have configured in the Scaleway console or in Grafana. @@ -36,15 +30,24 @@ This page shows you how to create alert rules in Grafana for monitoring Scaleway - [Enabled](/cockpit/how-to/enable-alert-manager/) the Scaleway alert manager - [Created](/cockpit/how-to/add-contact-points/) a contact point in the Scaleway console or in Grafana (with the `Scaleway Alerting` alert manager of the same region as your `Scaleway Metrics` data source), otherwise alerts will not be delivered -## Switch to data source managed alert rules +## Switch to the data source-managed tab Data source managed alert rules allow you to configure alerts managed by the data source of your choice, instead of using Grafana's managed alerting system which is not supported by Cockpit. 1. [Log in to Grafana](/cockpit/how-to/access-grafana-and-managed-dashboards/) using your credentials. -2. Click the **Toggle menu** then click **Alerting**. -3. Click **Alert rules** and **+ New alert rule**. -4. In the **Define query and alert condition** section, scroll to the **Grafana-managed alert rule** information banner and click **Switch to data source-managed alert rule**. This step is **mandatory** because Cockpit does not support Grafana’s built-in alerting system, but only alerts configured and evaluated by the data source itself. You are redirected to the alert creation process. - +2. Click the Grafana icon in the top left side of your screen to open the menu. +3. Click the arrow next to **Alerting** on the left-side menu, then click **Alert rules**. +4. Click **+ New alert rule**. +5. Enter a name for your alert. +6. In the **Define query and alert condition** section, toggle **Advanced options**. + +7. Select the **Scaleway Metrics** data source in the drop-down. +8. In the **Rule type** subsection, click the **Data source-managed** tab. + + + Data source managed alert rules allow you to configure alerts managed by the data source of your choice, instead of using Grafana's managed alerting system which is not supported by Cockpit. + This step is **mandatory** because Cockpit does not support Grafana’s built-in alerting system, but only alerts configured and evaluated by the data source itself. + ## Define your metric and alert conditions @@ -54,129 +57,106 @@ Switch between the tabs below to create alerts for a Scaleway Instance, an Objec The steps below explain how to create the metric selection and configure an alert condition that triggers when **your Instance consumes more than 10% of a single CPU core over the past 5 minutes.** - 1. Type a name for your alert. For example, `alert-for-high-cpu-usage`. - 2. Select the **Scaleway Metrics** data source. - 3. Click the **Metrics browser** drop-down. - - - 4. Select the metric you want to configure an alert for. For example, `instance_server_cpu_seconds_total`. + 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id`) correspond to those of the target resource. + ```bash + rate(instance_server_cpu_seconds_total{resource_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"}[5m]) > 0.1 + ``` The `instance_server_cpu_seconds_total` metric records how many seconds of CPU time your Instance has used in total. It is helpful to detect unexpected CPU usage spikes. - 5. Select the appropriate labels to filter your metric and target specific resources. - 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector. - - 7. Click **Use query** to validate your metric selection. - 8. In the query field next to the **Metrics browser** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id` and `resource_name`) correspond to those of the target resource. - ```bash - rate(instance_server_cpu_seconds_total{resource_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",resource_name="name-of-your-resource"}[5m]) > 0.1 - ``` - 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert. - 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. - 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. + 2. In the **Set alert evaluation behavior** section, specify how long the condition must be true before triggering the alert. + 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. + 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears. + 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page. For example, if an alert has the label `team = instances-team`, you are telling Grafana to send a notification to the Instances team when your alert `alert-for-high-cpu-usage` gets triggered. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy). - 12. Click **Save rule** in the top right corner of your screen to save and activate your alert. - 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). + 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert. + 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). The steps below explain how to create the metric selection and configure an alert condition that triggers when **the object count in your bucket exceeds a specific threshold**. - 1. Type a name for your alert. - 2. Select the **Scaleway Metrics** data source. - 3. Click the **Metrics browser** drop-down. - - - 4. Select the metric you want to configure an alert for. For example, `object_storage_bucket_objects_total`. - - The `object_storage_bucket_objects_total` metric indicates the total number of objects stored in a given Object Storage bucket. It is useful to monitor and control object growth in your bucket and avoid hitting thresholds. - - 5. Select the appropriate labels to filter your metric and target specific resources. - 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector. - 7. Click **Use query** to validate your metric selection. Your selection displays in the query field next to the **Metrics browser** button. This prepares it for use in the alert condition, which we will define in the next steps. - 8. In the query field, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id` and `region`) correspond to those of the target resource. + 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_id` and `region`) correspond to those of the target resource. ```bash object_storage_bucket_objects_total{region="fr-par", resource_id="my-bucket"} > 2000 ``` - 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert. - 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. - 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. + + The `object_storage_bucket_objects_total` metric indicates the total number of objects stored in a given Object Storage bucket. It is useful to monitor and control object growth in your bucket and avoid hitting thresholds. + + 2. In the **Set alert evaluation behavior** section, specify how long the condition must be true before triggering the alert. + 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. + 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears. + 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page. For example, if an alert has the label `team = object-storage-team`, you are telling Grafana to send a notification to the Object Storage team when your alert is firing. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy). - 12. Click **Save rule** in the top right corner of your screen to save and activate your alert. - 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). + 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert. + 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). The steps below explain how to create the metric selection and configure an alert condition that triggers when **no new pod activity occurs, which could mean your cluster is stuck or unresponsive.** - 1. Type a name for your alert. - 2. Select the **Scaleway Metrics** data source. - 3. Click the **Metrics browser** drop-down. - - - 4. Select the metric you want to configure an alert for. For example, `kubernetes_cluster_k8s_shoot_nodes_pods_usage_total`. - - The `kubernetes_cluster_k8s_shoot_nodes_pods_usage_total` metric represents the total number of pods currently running across all nodes in your Kubernetes cluster. It is helpful to monitor current pod consumption per node pool or cluster, and help track resource saturation or unexpected workload spikes. - - 5. Select the appropriate labels to filter your metric and target specific resources. - 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector. - 7. Click **Use query** to validate your metric selection. Your selection displays in the query field next to the **Metrics browser** button. This prepares it for use in the alert condition, which we will define in the next steps. - 8. In the query field, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource. + 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource. ```bash rate(kubernetes_cluster_k8s_shoot_nodes_pods_usage_total{resource_name="k8s-par-quizzical-chatelet"}[15m]) == 0 ``` - 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert. - 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. - 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. + + The `kubernetes_cluster_k8s_shoot_nodes_pods_usage_total` metric represents the total number of pods currently running across all nodes in your Kubernetes cluster. It is helpful to monitor current pod consumption per node pool or cluster, and help track resource saturation or unexpected workload spikes. + + 2. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert. + 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. + 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears. + 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page. For example, if an alert has the label `team = kubernetes-team`, you are telling Grafana to send a notification to the Kubernetes team when your alert is firing. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy). - 12. Click **Save rule** in the top right corner of your screen to save and activate your alert. - 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). + 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert. + 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). The steps below explain how to create the metric selection and configure an alert condition that triggers when **no logs are stored for 5 minutes, which may indicate your app or system is broken**. - 1. Type a name for your alert. - 2. Select the **Scaleway Metrics** data source. - 3. Click the **Metrics browser** drop-down. - - - 4. Select the metric you want to configure an alert for. For example, `observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m`. - - The `observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m` metric represents the number of chunks (log storage blocks) that have been written over the last 5 minutes for a specific resource. It is useful to monitor log ingestion activity and detect issues such as a crash of the logging agent, or your application not producing logs. - - 5. Select the appropriate labels to filter your metric and target specific resources. - 6. Choose values for your selected labels. The **Resulting selector** field displays your final query selector. - 7. Click **Use query** to validate your metric selection. Your selection displays in the query field next to the **Metrics browser** button. This prepares it for use in the alert condition, which we will define in the next steps. - 8. In the query field, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource. + 1. In the query field next to the **Loading metrics... >** button, paste the following query. Make sure that the values for the labels you have selected (for example, `resource_name`) correspond to those of the target resource. ```bash observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m{resource_id="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"} == 0 ``` - 9. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert. - 10. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. - 11. Enter a label in the **Labels** field and a name in the **Value** field. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. + + The `observability_cockpit_loki_chunk_store_stored_chunks_total:increase5m` metric represents the number of chunks (log storage blocks) that have been written over the last 5 minutes for a specific resource. It is useful to monitor log ingestion activity and detect issues such as a crash of the logging agent, or your application not producing logs. + + 2. In the **Set alert evaluation behavior** field, specify how long the condition must be true before triggering the alert. + 3. Enter a name in the **Namespace** and **Group** fields to categorize and manage your alert, and optionally, add annotations. + 4. In the **Configure labels and notifications** section, click **+ Add labels**. A pop-up appears. + 5. Enter a label and value name and click **Save**. You can skip this step if you want your alerts to be sent to the contact points you may already have created in the Scaleway console. In Grafana, notifications are sent by matching alerts to notification policies based on labels. This step is about deciding how alerts will reach you or your team (Slack, email, etc.) based on labels you attach to them. Then, you can set up rules that define who receives notifications in the **Notification policies** page. For example, if an alert has the label `team = cockpit-team`, you are telling Grafana to send a notification to the Cockpit team when your alert is firing. Find out how to [configure notification policies in Grafana](/tutorials/configure-slack-alerting/#configuring-a-notification-policy). - 12. Click **Save rule** in the top right corner of your screen to save and activate your alert. - 13. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). + 6. Click **Save rule and exit** in the top right corner of your screen to save and activate your alert. + 7. Optionally, check that your configuration works by temporarily lowering the threshold. This will trigger the alert and notify your [contact point](/cockpit/concepts/#contact-points). -You can view your firing alerts in the **Alert rules** section of your Grafana (**Home** > **Alerting** > **Alert rules**). + **You can configure up to a maximum of 10 alerts** for the `Scaleway Metrics` data source. + + + You can also build your alerts from the Grafana-managed tab, with the metrics explorer (book icon) and **+ Operations** button. However, remember to click the **Data source-managed** tab once you are done building them to receive alert notifications. + + - +## View firing alerts - - You can configure up to a **maximum of 10 alerts** for the `Scaleway Metrics` data source. - + 1. [Log in to Grafana](/cockpit/how-to/access-grafana-and-managed-dashboards/) using your credentials. + 2. Click the Grafana icon in the top left side of your screen to open the menu. + 3. Click the arrow next to **Alerting** on the left-side menu, then click **Alert rules**. + 4. Click the **Firing** tab under the **State** section to filter for firing rules. + 5. Click the **Alert** tab under the **Rule type** section to filter for alerting rules. + + Your firing alerts should display. + Find out how to send Cockpit's alert notifications to Slack using a webhook URL in our [dedicated documentation](/tutorials/configure-slack-alerting/). diff --git a/tutorials/configure-slack-alerting/index.mdx b/tutorials/configure-slack-alerting/index.mdx index 933978ac87..1ec08b9ec9 100644 --- a/tutorials/configure-slack-alerting/index.mdx +++ b/tutorials/configure-slack-alerting/index.mdx @@ -5,10 +5,10 @@ categories: - cockpit tags: cockpit alerts monitoring notifications slack dates: - validation: 2025-02-18 + validation: 2025-07-18 posted: 2025-02-18 --- -import image from './assets/scaleway-slack-alerts-firing.webp' +import SlackNotification from './assets/scaleway-slack-alerts-firing.webp' import Requirements from '@macros/iam/requirements.mdx' @@ -52,11 +52,11 @@ As **we do not support Grafana managed alerts**, this documentation only shows y ## Creating your Slack integration in Grafana using the Scaleway Alerting alert manager -1. [Access](/cockpit/how-to/access-grafana-and-managed-dashboards/) your Grafana dashboard. -2. Click the **Toggle menu** icon next to **Home** in the top left corner of your screen. -3. Click **Alerting**, then click **Contact points**. +1. [Log in to Grafana](/cockpit/how-to/access-grafana-and-managed-dashboards/) using your credentials. +2. Click the Grafana icon in the top left side of your screen to open the menu. +3. Click the arrow next to **Alerting** on the left-side menu, then click **Contact points**. 4. Choose the **Scaleway Alerting** alert manager in the drop-down next to the **Choose Alertmanager** field. -5. Click **+ Add contact point**. +5. Click **+ Create contact point**. 6. Enter a contact point name. For example, `Slack alerts`. 7. Select **Slack** from the **Integration** drop-down. 8. In the **Channel** field, paste the name of the Slack channel or user to send notifications to. @@ -64,22 +64,20 @@ As **we do not support Grafana managed alerts**, this documentation only shows y 10. Click **Save contact point**. - If you have already created contact points, you need to [configure a nested notification policy](#configuring-a-notification-policy) to define which contact point should receive which alert. + If you have already created contact points, you need to [configure a child notification policy](#configuring-a-notification-policy) to define which contact point should receive which alert. ## Configuring a notification policy If you have created multiple contact points in Grafana, the default contact point will receive alerts by default. To make sure your alerts are sent to the desired contact point, you need to define notification policies. -1. [Access](/cockpit/how-to/access-grafana-and-managed-dashboards/) your Grafana dashboard. -2. Click the **Toggle menu** icon next to **Home** in the top left corner of your screen. -3. Click **Alerting**, then click **Notification policies**. -4. Click **+ New nested policy**. +1. Click **Alerting**, then click **Notification policies**. +4. Click **+ New child policy**. 5. In the **Label** field, enter the label `alertname`. 6. In the **Operator** field, select **Equals**. 7. In the **Value** field, enter the name of an existing alert you want your contact point to be notified for. For example, `alert-for-high-cpu-usage`. 8. Optionally, click **+Add matcher** to add more labels. -9. In the **Contact point** field, select the contact point you have configured for Slack. +9. In the **Contact point** drop-down, select the contact point you have configured for Slack. 10. Click **Save policy**. Your nested policy displays. You should now get notified on Slack. - \ No newline at end of file + \ No newline at end of file diff --git a/tutorials/silence-grafana-alerts/index.mdx b/tutorials/silence-grafana-alerts/index.mdx index 1c4a59c5a1..6f617c9e86 100644 --- a/tutorials/silence-grafana-alerts/index.mdx +++ b/tutorials/silence-grafana-alerts/index.mdx @@ -5,7 +5,7 @@ tags: cockpit alerts mute silence monitoring notifications categories: - cockpit dates: - validation: 2025-06-16 + validation: 2025-07-18 posted: 2025-06-16 --- import image from './assets/scaleway-silenced-alert.webp' @@ -27,8 +27,8 @@ This page shows you how to temporarily mute (silence) alerts without disabling t 1. [Access](/cockpit/how-to/access-grafana-and-managed-dashboards/) your Grafana dashboard. -2. Click the **Toggle menu** icon next to **Home** in the top left corner of your screen. -3. Click **Alerting**, then **Silences**. Make sure that **Scaleway Alerting** is selected in the **Choose Alertmanager** drop-down. +2. Click the **Grafana** icon in the top left corner of your screen to open the menu. +3. Click the arrow next to **Alerting** then **Silences**. Make sure that **Scaleway Alerting** is selected in the **Choose Alertmanager** drop-down. 4. Click **Create silence**. 5. Click the drop-down in the **Silence start and end** section. 6. Click the **Calendar** icon and select a time range during which you want to silence alerts.