-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathservices_catchall.tf
60 lines (52 loc) · 1.79 KB
/
services_catchall.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# During implementation, it can be useful to utilise the Catch-All rules at the
# bottom of the rulesets to create suppressed alerts (or incidents) for events not yet handled
# with event rules.
#
# These suppressed alerts should likely be info severity and reviewed regularly
# If alerts are choosen not be suppressed and incidents are created, they should
# possibly not assigned to a real escalation policy.
variable "catch_all_services" {
type = map(object({
name = string
description = string
}))
default = {
"datadog" = { name = "Datadog Catch-all", description = "Catch-all service for triage of events from Datadog not currently matched with event rules." },
}
}
# Catch All - note PagerDuty actively drops emails to example.com domain or domains ending .invalid
resource "pagerduty_user" "_noone_nowhere" {
name = "No One Nowhere"
email = "[email protected]"
role = "limited_user"
job_title = "Dummy User for Catch All purposes"
}
resource "pagerduty_escalation_policy" "catchall" {
name = "Catchall Escalation Policy"
num_loops = 2
teams = [pagerduty_team.teams["cloud"].id]
rule {
escalation_delay_in_minutes = 30
target {
id = pagerduty_user._noone_nowhere.id
type = "user_reference"
}
}
}
resource "pagerduty_service" "catch_all" {
for_each = var.catch_all_services
name = each.value.name
description = each.value.description
escalation_policy = pagerduty_escalation_policy.catchall.id
alert_grouping_parameters {
type = "intelligent"
config {}
}
incident_urgency_rule {
type = "constant"
urgency = "severity_based"
}
alert_creation = "create_alerts_and_incidents"
auto_resolve_timeout = "null"
acknowledgement_timeout = "null"
}