From 343897e95bc38adf9c367570523f5a0b5ecf3215 Mon Sep 17 00:00:00 2001 From: Madhvi Date: Fri, 5 Jul 2024 18:52:40 +0530 Subject: [PATCH] Auto Discovery Feature Added And Doc Update --- README.md | 2 + examples/datasource/datasource.tf | 31 ++- examples/datasource/resource.tf | 31 ++- examples/website/datasource.tf | 1 + examples/website/resource.tf | 1 + .../auto_discovery_configuration_schema.go | 123 +++++++++++ .../schemata/auto_discovery_filter_schema.go | 69 ++++++ .../schemata/auto_discovery_method_schema.go | 42 ++++ logicmonitor/schemata/datasource_schema.go | 57 ++++- ...ipt_e_r_i_discovery_attribute_v2_schema.go | 96 +++++++++ logicmonitor/utils/helper_functions.go | 25 ++- models/auto_discovery_configuration.go | 200 ++++++++++++++++++ models/auto_discovery_filter.go | 94 ++++++++ models/auto_discovery_method.go | 71 +++++++ models/datasource.go | 90 +++++++- models/script_e_r_i_discovery_attribute_v2.go | 89 ++++++++ website/docs/index.markdown | 43 +++- website/docs/r/alert_rule.markdown | 16 +- website/docs/r/collector.markdown | 48 +++-- website/docs/r/collector_group.markdown | 11 +- website/docs/r/dashboard.markdown | 17 +- website/docs/r/dashboard_group.markdown | 9 +- website/docs/r/datasource.markdown | 55 ++++- website/docs/r/device.markdown | 27 +-- website/docs/r/device_group.markdown | 19 +- website/docs/r/escalation_chain.markdown | 12 +- website/docs/r/website.markdown | 50 +++-- website/docs/r/website_group.markdown | 11 +- 28 files changed, 1216 insertions(+), 124 deletions(-) create mode 100644 logicmonitor/schemata/auto_discovery_configuration_schema.go create mode 100644 logicmonitor/schemata/auto_discovery_filter_schema.go create mode 100644 logicmonitor/schemata/auto_discovery_method_schema.go create mode 100644 logicmonitor/schemata/script_e_r_i_discovery_attribute_v2_schema.go create mode 100644 models/auto_discovery_configuration.go create mode 100644 models/auto_discovery_filter.go create mode 100644 models/auto_discovery_method.go create mode 100644 models/script_e_r_i_discovery_attribute_v2.go diff --git a/README.md b/README.md index 9deed2eb..c5dcf815 100644 --- a/README.md +++ b/README.md @@ -74,6 +74,8 @@ provider "logicmonitor" { api_id = var.logicmonitor_api_id api_key = var.logicmonitor_api_key company = var.logicmonitor_company + bulk_resource = true/false + "When working with bulk resources, this feature is optional to handle the API's rate limit" } ``` Example usage can be found in the /terraform-integration/examples directory. diff --git a/examples/datasource/datasource.tf b/examples/datasource/datasource.tf index 2661fbe2..ddebf6d9 100644 --- a/examples/datasource/datasource.tf +++ b/examples/datasource/datasource.tf @@ -1,11 +1,40 @@ resource "logicmonitor_datasource" "my_datasource"{ collect_interval = 100 + has_multi_instances = true applies_to = "system.deviceId == \"22\"" description = "test" collect_method = "script" - eri_discovery_interval = 1 + eri_discovery_interval = 15 + enable_auto_discovery = true enable_eri_discovery = true + eri_discovery_config { + name = "ad_script" + win_script = "string" + groovy_script = "string" + type = "string" + linux_cmdline = "string" + linux_script = "string" + win_cmdline = "string" + } name = "Amazon Website test" + auto_discovery_config { + persistent_instance = false + schedule_interval = 0 + delete_inactive_instance = true + method { + name = "ad_script" + } + instance_auto_group_method = "none" + instance_auto_group_method_params = "" + filters = [ + { + comment = "test" + value = "test" + operation = "string" + attribute = "string" + }] + disable_instance = true + } data_points = [{ name = "CallCountTotal_mean8" description = "test" diff --git a/examples/datasource/resource.tf b/examples/datasource/resource.tf index d4f16204..4856cd0c 100644 --- a/examples/datasource/resource.tf +++ b/examples/datasource/resource.tf @@ -2,12 +2,41 @@ resource "logicmonitor_datasource" "my_datasource"{ collect_interval = 100 + has_multi_instances = true applies_to = "system.deviceId == \"22\"" description = "test" collect_method = "script" - eri_discovery_interval = 1 + eri_discovery_interval = 15 + enable_auto_discovery = true enable_eri_discovery = true + eri_discovery_config { + name = "ad_script" + win_script = "string" + groovy_script = "string" + type = "string" + linux_cmdline = "string" + linux_script = "string" + win_cmdline = "string" + } name = "Amazon Website test" + auto_discovery_config { + persistent_instance = false + schedule_interval = 0 + delete_inactive_instance = true + method { + name = "ad_script" + } + instance_auto_group_method = "none" + instance_auto_group_method_params = "" + filters = [ + { + comment = "test" + value = "test" + operation = "string" + attribute = "string" + }] + disable_instance = true + } data_points = [{ name = "CallCountTotal_mean8" description = "test" diff --git a/examples/website/datasource.tf b/examples/website/datasource.tf index 5b049475..125c00b7 100644 --- a/examples/website/datasource.tf +++ b/examples/website/datasource.tf @@ -6,6 +6,7 @@ resource "logicmonitor_website" "my_website"{ description = "website test" disable_alerting = true stop_monitoring = true + schema = "http" user_permission = "string" test_location = [ { diff --git a/examples/website/resource.tf b/examples/website/resource.tf index 71f810cd..94c3d401 100644 --- a/examples/website/resource.tf +++ b/examples/website/resource.tf @@ -6,6 +6,7 @@ resource "logicmonitor_website" "my_website"{ description = "website test" disable_alerting = true stop_monitoring = true + schema = "http" user_permission = "string" test_location = [ { diff --git a/logicmonitor/schemata/auto_discovery_configuration_schema.go b/logicmonitor/schemata/auto_discovery_configuration_schema.go new file mode 100644 index 00000000..d26553d4 --- /dev/null +++ b/logicmonitor/schemata/auto_discovery_configuration_schema.go @@ -0,0 +1,123 @@ +package schemata + +import ( + "terraform-provider-logicmonitor/logicmonitor/utils" + "terraform-provider-logicmonitor/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func AutoDiscoveryConfigurationSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "data_source_name": { + Type: schema.TypeString, + Computed: true, + }, + + "delete_inactive_instance": { + Type: schema.TypeBool, + Optional: true, + }, + + "disable_instance": { + Type: schema.TypeBool, + Optional: true, + }, + + "filters": { + Type: schema.TypeList, //GoType: []*AutoDiscoveryFilter + Elem: &schema.Resource{ + Schema: AutoDiscoveryFilterSchema(), + }, + ConfigMode: schema.SchemaConfigModeAttr, + Optional: true, + }, + + "instance_auto_group_method": { + Type: schema.TypeString, + Optional: true, + }, + + "instance_auto_group_method_params": { + Type: schema.TypeString, + Optional: true, + }, + + "method": { + Type: schema.TypeList, //GoType: AutoDiscoveryMethod + Elem: &schema.Resource{ + Schema: AutoDiscoveryMethodSchema(), + }, + Required: true, + }, + + "persistent_instance": { + Type: schema.TypeBool, + Optional: true, + }, + + "schedule_interval": { + Type: schema.TypeInt, + Optional: true, + }, + + } +} + +func SetAutoDiscoveryConfigurationSubResourceData(m []*models.AutoDiscoveryConfiguration) (d []*map[string]interface{}) { + for _, autoDiscoveryConfiguration := range m { + if autoDiscoveryConfiguration != nil { + properties := make(map[string]interface{}) + properties["data_source_name"] = autoDiscoveryConfiguration.DataSourceName + properties["delete_inactive_instance"] = autoDiscoveryConfiguration.DeleteInactiveInstance + properties["disable_instance"] = autoDiscoveryConfiguration.DisableInstance + properties["filters"] = SetAutoDiscoveryFilterSubResourceData(autoDiscoveryConfiguration.Filters) + properties["instance_auto_group_method"] = autoDiscoveryConfiguration.InstanceAutoGroupMethod + properties["instance_auto_group_method_params"] = autoDiscoveryConfiguration.InstanceAutoGroupMethodParams + properties["method"] = SetAutoDiscoveryMethodSubResourceData([]*models.AutoDiscoveryMethod{autoDiscoveryConfiguration.Method}) + properties["persistent_instance"] = autoDiscoveryConfiguration.PersistentInstance + properties["schedule_interval"] = autoDiscoveryConfiguration.ScheduleInterval + d = append(d, &properties) + } + } + return +} + +func AutoDiscoveryConfigurationModel(d map[string]interface{}) *models.AutoDiscoveryConfiguration { + // assume that the incoming map only contains the relevant resource data + deleteInactiveInstance := d["delete_inactive_instance"].(bool) + disableInstance := d["disable_instance"].(bool) + filters := utils.GetFilters(d["filters"].([]interface{})) + instanceAutoGroupMethod := d["instance_auto_group_method"].(string) + instanceAutoGroupMethodParams := d["instance_auto_group_method_params"].(string) + var method *models.AutoDiscoveryMethod = nil + methodList := d["method"].([]interface{}) + if len(methodList) > 0 { // len(nil) = 0 + method = AutoDiscoveryMethodModel(methodList[0].(map[string]interface{})) + } + persistentInstance := d["persistent_instance"].(bool) + scheduleInterval := int32(d["schedule_interval"].(int)) + + return &models.AutoDiscoveryConfiguration { + DeleteInactiveInstance: deleteInactiveInstance, + DisableInstance: disableInstance, + Filters: filters, + InstanceAutoGroupMethod: instanceAutoGroupMethod, + InstanceAutoGroupMethodParams: instanceAutoGroupMethodParams, + Method: method, + PersistentInstance: persistentInstance, + ScheduleInterval: scheduleInterval, + } +} + +func GetAutoDiscoveryConfigurationPropertyFields() (t []string) { + return []string{ + "delete_inactive_instance", + "disable_instance", + "filters", + "instance_auto_group_method", + "instance_auto_group_method_params", + "method", + "persistent_instance", + "schedule_interval", + } +} \ No newline at end of file diff --git a/logicmonitor/schemata/auto_discovery_filter_schema.go b/logicmonitor/schemata/auto_discovery_filter_schema.go new file mode 100644 index 00000000..ce6f269e --- /dev/null +++ b/logicmonitor/schemata/auto_discovery_filter_schema.go @@ -0,0 +1,69 @@ +package schemata + +import ( + "terraform-provider-logicmonitor/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func AutoDiscoveryFilterSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "attribute": { + Type: schema.TypeString, + Required: true, + }, + + "comment": { + Type: schema.TypeString, + Optional: true, + }, + + "operation": { + Type: schema.TypeString, + Required: true, + }, + + "value": { + Type: schema.TypeString, + Optional: true, + }, + + } +} + +func SetAutoDiscoveryFilterSubResourceData(m []*models.AutoDiscoveryFilter) (d []*map[string]interface{}) { + for _, autoDiscoveryFilter := range m { + if autoDiscoveryFilter != nil { + properties := make(map[string]interface{}) + properties["attribute"] = autoDiscoveryFilter.Attribute + properties["comment"] = autoDiscoveryFilter.Comment + properties["operation"] = autoDiscoveryFilter.Operation + properties["value"] = autoDiscoveryFilter.Value + d = append(d, &properties) + } + } + return +} + +func AutoDiscoveryFilterModel(d map[string]interface{}) *models.AutoDiscoveryFilter { + // assume that the incoming map only contains the relevant resource data + attribute := d["attribute"].(string) + comment := d["comment"].(string) + operation := d["operation"].(string) + value := d["value"].(string) + + return &models.AutoDiscoveryFilter { + Attribute: &attribute, + Comment: comment, + Operation: &operation, + Value: value, + } +} + +func GetAutoDiscoveryFilterPropertyFields() (t []string) { + return []string{ + "attribute", + "comment", + "operation", + "value", + } +} \ No newline at end of file diff --git a/logicmonitor/schemata/auto_discovery_method_schema.go b/logicmonitor/schemata/auto_discovery_method_schema.go new file mode 100644 index 00000000..4bc123df --- /dev/null +++ b/logicmonitor/schemata/auto_discovery_method_schema.go @@ -0,0 +1,42 @@ +package schemata + +import ( + "terraform-provider-logicmonitor/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func AutoDiscoveryMethodSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + } +} + +func SetAutoDiscoveryMethodSubResourceData(m []*models.AutoDiscoveryMethod) (d []*map[string]interface{}) { + for _, autoDiscoveryMethod := range m { + if autoDiscoveryMethod != nil { + properties := make(map[string]interface{}) + properties["name"] = autoDiscoveryMethod.Name + d = append(d, &properties) + } + } + return +} + +func AutoDiscoveryMethodModel(d map[string]interface{}) *models.AutoDiscoveryMethod { + // assume that the incoming map only contains the relevant resource data + name := d["name"].(string) + + return &models.AutoDiscoveryMethod { + Name: &name, + } +} + +func GetAutoDiscoveryMethodPropertyFields() (t []string) { + return []string{ + "name", + } +} \ No newline at end of file diff --git a/logicmonitor/schemata/datasource_schema.go b/logicmonitor/schemata/datasource_schema.go index 32406136..943fbd95 100644 --- a/logicmonitor/schemata/datasource_schema.go +++ b/logicmonitor/schemata/datasource_schema.go @@ -19,6 +19,14 @@ func DatasourceSchema() map[string]*schema.Schema { Computed: true, }, + "auto_discovery_config": { + Type: schema.TypeList, //GoType: AutoDiscoveryConfiguration + Elem: &schema.Resource{ + Schema: AutoDiscoveryConfigurationSchema(), + }, + Optional: true, + }, + "checksum": { Type: schema.TypeString, Computed: true, @@ -72,6 +80,14 @@ func DatasourceSchema() map[string]*schema.Schema { Optional: true, }, + "eri_discovery_config": { + Type: schema.TypeList, //GoType: ScriptERIDiscoveryAttributeV2 + Elem: &schema.Resource{ + Schema: ScriptERIDiscoveryAttributeV2Schema(), + }, + Optional: true, + }, + "eri_discovery_interval": { Type: schema.TypeInt, Optional: true, @@ -84,7 +100,7 @@ func DatasourceSchema() map[string]*schema.Schema { "has_multi_instances": { Type: schema.TypeBool, - Computed: true, + Optional: true, }, "id": { @@ -145,6 +161,14 @@ func DataSourceDatasourceSchema() map[string]*schema.Schema { Optional: true, }, + "auto_discovery_config": { + Type: schema.TypeList, //GoType: AutoDiscoveryConfiguration + Elem: &schema.Resource{ + Schema: AutoDiscoveryConfigurationSchema(), + }, + Optional: true, + }, + "checksum": { Type: schema.TypeString, Optional: true, @@ -197,6 +221,14 @@ func DataSourceDatasourceSchema() map[string]*schema.Schema { Optional: true, }, + "eri_discovery_config": { + Type: schema.TypeList, //GoType: ScriptERIDiscoveryAttributeV2 + Elem: &schema.Resource{ + Schema: ScriptERIDiscoveryAttributeV2Schema(), + }, + Optional: true, + }, + "eri_discovery_interval": { Type: schema.TypeInt, Optional: true, @@ -263,6 +295,7 @@ func DataSourceDatasourceSchema() map[string]*schema.Schema { func SetDatasourceResourceData(d *schema.ResourceData, m *models.Datasource) { d.Set("applies_to", m.AppliesTo) d.Set("audit_version", m.AuditVersion) + d.Set("auto_discovery_config", SetAutoDiscoveryConfigurationSubResourceData([]*models.AutoDiscoveryConfiguration{m.AutoDiscoveryConfig})) d.Set("checksum", m.Checksum) d.Set("collect_interval", m.CollectInterval) d.Set("collect_method", m.CollectMethod) @@ -272,6 +305,7 @@ func SetDatasourceResourceData(d *schema.ResourceData, m *models.Datasource) { d.Set("display_name", m.DisplayName) d.Set("enable_auto_discovery", m.EnableAutoDiscovery) d.Set("enable_eri_discovery", m.EnableEriDiscovery) + d.Set("eri_discovery_config", SetScriptERIDiscoveryAttributeV2SubResourceData([]*models.ScriptERIDiscoveryAttributeV2{m.EriDiscoveryConfig})) d.Set("eri_discovery_interval", m.EriDiscoveryInterval) d.Set("group", m.Group) d.Set("has_multi_instances", m.HasMultiInstances) @@ -291,6 +325,7 @@ func SetDatasourceSubResourceData(m []*models.Datasource) (d []*map[string]inter properties := make(map[string]interface{}) properties["applies_to"] = datasource.AppliesTo properties["audit_version"] = datasource.AuditVersion + properties["auto_discovery_config"] = SetAutoDiscoveryConfigurationSubResourceData([]*models.AutoDiscoveryConfiguration{datasource.AutoDiscoveryConfig}) properties["checksum"] = datasource.Checksum properties["collect_interval"] = datasource.CollectInterval properties["collect_method"] = datasource.CollectMethod @@ -300,6 +335,7 @@ func SetDatasourceSubResourceData(m []*models.Datasource) (d []*map[string]inter properties["display_name"] = datasource.DisplayName properties["enable_auto_discovery"] = datasource.EnableAutoDiscovery properties["enable_eri_discovery"] = datasource.EnableEriDiscovery + properties["eri_discovery_config"] = SetScriptERIDiscoveryAttributeV2SubResourceData([]*models.ScriptERIDiscoveryAttributeV2{datasource.EriDiscoveryConfig}) properties["eri_discovery_interval"] = datasource.EriDiscoveryInterval properties["group"] = datasource.Group properties["has_multi_instances"] = datasource.HasMultiInstances @@ -319,6 +355,12 @@ func SetDatasourceSubResourceData(m []*models.Datasource) (d []*map[string]inter func DatasourceModel(d *schema.ResourceData) *models.Datasource { appliesTo := d.Get("applies_to").(string) + var autoDiscoveryConfig *models.AutoDiscoveryConfiguration = nil + autoDiscoveryConfigInterface, autoDiscoveryConfigIsSet := d.GetOk("auto_discovery_config") + if autoDiscoveryConfigIsSet { + autoDiscoveryConfigMap := autoDiscoveryConfigInterface.([]interface{})[0].(map[string]interface{}) + autoDiscoveryConfig = AutoDiscoveryConfigurationModel(autoDiscoveryConfigMap) + } collectInterval := int32(d.Get("collect_interval").(int)) collectMethod := d.Get("collect_method").(string) var collectorAttribute *models.CollectorAttribute = nil @@ -332,8 +374,15 @@ func DatasourceModel(d *schema.ResourceData) *models.Datasource { displayName := d.Get("display_name").(string) enableAutoDiscovery := d.Get("enable_auto_discovery").(bool) enableEriDiscovery := d.Get("enable_eri_discovery").(bool) + var eriDiscoveryConfig *models.ScriptERIDiscoveryAttributeV2 = nil + eriDiscoveryConfigInterface, eriDiscoveryConfigIsSet := d.GetOk("eri_discovery_config") + if eriDiscoveryConfigIsSet { + eriDiscoveryConfigMap := eriDiscoveryConfigInterface.([]interface{})[0].(map[string]interface{}) + eriDiscoveryConfig = ScriptERIDiscoveryAttributeV2Model(eriDiscoveryConfigMap) + } eriDiscoveryInterval := int32(d.Get("eri_discovery_interval").(int)) group := d.Get("group").(string) + hasMultiInstances := d.Get("has_multi_instances").(bool) id, _ := strconv.Atoi(d.Get("id").(string)) name := d.Get("name").(string) tags := d.Get("tags").(string) @@ -341,6 +390,7 @@ func DatasourceModel(d *schema.ResourceData) *models.Datasource { return &models.Datasource { AppliesTo: appliesTo, + AutoDiscoveryConfig: autoDiscoveryConfig, CollectInterval: &collectInterval, CollectMethod: &collectMethod, CollectorAttribute: collectorAttribute, @@ -349,8 +399,10 @@ func DatasourceModel(d *schema.ResourceData) *models.Datasource { DisplayName: displayName, EnableAutoDiscovery: enableAutoDiscovery, EnableEriDiscovery: enableEriDiscovery, + EriDiscoveryConfig: eriDiscoveryConfig, EriDiscoveryInterval: eriDiscoveryInterval, Group: group, + HasMultiInstances: hasMultiInstances, ID: int32(id), Name: &name, Tags: tags, @@ -360,6 +412,7 @@ func DatasourceModel(d *schema.ResourceData) *models.Datasource { func GetDatasourcePropertyFields() (t []string) { return []string{ "applies_to", + "auto_discovery_config", "collect_interval", "collect_method", "collector_attribute", @@ -368,8 +421,10 @@ func GetDatasourcePropertyFields() (t []string) { "display_name", "enable_auto_discovery", "enable_eri_discovery", + "eri_discovery_config", "eri_discovery_interval", "group", + "has_multi_instances", "id", "name", "tags", diff --git a/logicmonitor/schemata/script_e_r_i_discovery_attribute_v2_schema.go b/logicmonitor/schemata/script_e_r_i_discovery_attribute_v2_schema.go new file mode 100644 index 00000000..f35f64eb --- /dev/null +++ b/logicmonitor/schemata/script_e_r_i_discovery_attribute_v2_schema.go @@ -0,0 +1,96 @@ +package schemata + +import ( + "terraform-provider-logicmonitor/models" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ScriptERIDiscoveryAttributeV2Schema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "groovy_script": { + Type: schema.TypeString, + Optional: true, + }, + + "linux_cmdline": { + Type: schema.TypeString, + Optional: true, + }, + + "linux_script": { + Type: schema.TypeString, + Optional: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + }, + + "type": { + Type: schema.TypeString, + Optional: true, + }, + + "win_cmdline": { + Type: schema.TypeString, + Optional: true, + }, + + "win_script": { + Type: schema.TypeString, + Optional: true, + }, + + } +} + +func SetScriptERIDiscoveryAttributeV2SubResourceData(m []*models.ScriptERIDiscoveryAttributeV2) (d []*map[string]interface{}) { + for _, scriptERIDiscoveryAttributeV2 := range m { + if scriptERIDiscoveryAttributeV2 != nil { + properties := make(map[string]interface{}) + properties["groovy_script"] = scriptERIDiscoveryAttributeV2.GroovyScript + properties["linux_cmdline"] = scriptERIDiscoveryAttributeV2.LinuxCmdline + properties["linux_script"] = scriptERIDiscoveryAttributeV2.LinuxScript + properties["name"] = scriptERIDiscoveryAttributeV2.Name + properties["type"] = scriptERIDiscoveryAttributeV2.Type + properties["win_cmdline"] = scriptERIDiscoveryAttributeV2.WinCmdline + properties["win_script"] = scriptERIDiscoveryAttributeV2.WinScript + d = append(d, &properties) + } + } + return +} + +func ScriptERIDiscoveryAttributeV2Model(d map[string]interface{}) *models.ScriptERIDiscoveryAttributeV2 { + // assume that the incoming map only contains the relevant resource data + groovyScript := d["groovy_script"].(string) + linuxCmdline := d["linux_cmdline"].(string) + linuxScript := d["linux_script"].(string) + name := d["name"].(string) + typeVar := d["type"].(string) + winCmdline := d["win_cmdline"].(string) + winScript := d["win_script"].(string) + + return &models.ScriptERIDiscoveryAttributeV2 { + GroovyScript: groovyScript, + LinuxCmdline: linuxCmdline, + LinuxScript: linuxScript, + Name: &name, + Type: typeVar, + WinCmdline: winCmdline, + WinScript: winScript, + } +} + +func GetScriptERIDiscoveryAttributeV2PropertyFields() (t []string) { + return []string{ + "groovy_script", + "linux_cmdline", + "linux_script", + "name", + "type", + "win_cmdline", + "win_script", + } +} \ No newline at end of file diff --git a/logicmonitor/utils/helper_functions.go b/logicmonitor/utils/helper_functions.go index 1abd2bd6..01ec03d5 100644 --- a/logicmonitor/utils/helper_functions.go +++ b/logicmonitor/utils/helper_functions.go @@ -462,6 +462,7 @@ func getPropFromDPInterface(r interface{}) (t []*models.DataPoint ) { var alertExpr = m["alert_expr"].(string) var alertExprNote = m["alert_expr_note"].(string) var typee = int32(m["type"].(int)) + var rawDataFieldName = m["raw_data_field_name"].(string) model := &models.DataPoint{ Name: &name, Description: description, @@ -479,6 +480,7 @@ func getPropFromDPInterface(r interface{}) (t []*models.DataPoint ) { AlertExpr: alertExpr, AlertExprNote: alertExprNote, Type: typee, + RawDataFieldName: rawDataFieldName, } t = append(t, model) } @@ -506,4 +508,25 @@ func getPropFromLocInterface(r interface{}) (t *models.WebsiteLocation) { } } return -} \ No newline at end of file +} +func GetFilters(d []interface{}) []*models.AutoDiscoveryFilter { + var filters []*models.AutoDiscoveryFilter + for _, i := range d { + if m, ok := i.(map[string]interface{}); ok { + attribute := m["attribute"].(string) + comment := m["comment"].(string) + operation := m["operation"].(string) + value := m["value"].(string) + + model := &models.AutoDiscoveryFilter{ + Attribute: &attribute, + Comment: comment, + Operation: &operation, + Value: value, + } + filters = append(filters, model) + } + } + return filters +} + diff --git a/models/auto_discovery_configuration.go b/models/auto_discovery_configuration.go new file mode 100644 index 00000000..9e6384ff --- /dev/null +++ b/models/auto_discovery_configuration.go @@ -0,0 +1,200 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AutoDiscoveryConfiguration auto discovery configuration +// +// swagger:model AutoDiscoveryConfiguration +type AutoDiscoveryConfiguration struct { + + // data source name + // Read Only: true + DataSourceName string `json:"dataSourceName,omitempty"` + + // delete inactive instance + DeleteInactiveInstance bool `json:"deleteInactiveInstance,omitempty"` + + // disable instance + DisableInstance bool `json:"disableInstance,omitempty"` + + // filters + Filters []*AutoDiscoveryFilter `json:"filters"` + + // instance auto group method + InstanceAutoGroupMethod string `json:"instanceAutoGroupMethod,omitempty"` + + // instance auto group method params + InstanceAutoGroupMethodParams string `json:"instanceAutoGroupMethodParams,omitempty"` + + // method + // Required: true + Method *AutoDiscoveryMethod `json:"method"` + + // persistent instance + PersistentInstance bool `json:"persistentInstance,omitempty"` + + // schedule interval + ScheduleInterval int32 `json:"scheduleInterval,omitempty"` +} + +// Validate validates this auto discovery configuration +func (m *AutoDiscoveryConfiguration) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFilters(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMethod(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AutoDiscoveryConfiguration) validateFilters(formats strfmt.Registry) error { + if swag.IsZero(m.Filters) { // not required + return nil + } + + for i := 0; i < len(m.Filters); i++ { + if swag.IsZero(m.Filters[i]) { // not required + continue + } + + if m.Filters[i] != nil { + if err := m.Filters[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *AutoDiscoveryConfiguration) validateMethod(formats strfmt.Registry) error { + + if err := validate.Required("method", "body", m.Method); err != nil { + return err + } + + if m.Method != nil { + if err := m.Method.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("method") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("method") + } + return err + } + } + + return nil +} + +// ContextValidate validate this auto discovery configuration based on the context it is used +func (m *AutoDiscoveryConfiguration) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateDataSourceName(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateFilters(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMethod(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AutoDiscoveryConfiguration) contextValidateDataSourceName(ctx context.Context, formats strfmt.Registry) error { + + if err := validate.ReadOnly(ctx, "dataSourceName", "body", string(m.DataSourceName)); err != nil { + return err + } + + return nil +} + +func (m *AutoDiscoveryConfiguration) contextValidateFilters(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Filters); i++ { + + if m.Filters[i] != nil { + if err := m.Filters[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("filters" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("filters" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *AutoDiscoveryConfiguration) contextValidateMethod(ctx context.Context, formats strfmt.Registry) error { + + if m.Method != nil { + if err := m.Method.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("method") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("method") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AutoDiscoveryConfiguration) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AutoDiscoveryConfiguration) UnmarshalBinary(b []byte) error { + var res AutoDiscoveryConfiguration + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/auto_discovery_filter.go b/models/auto_discovery_filter.go new file mode 100644 index 00000000..57f0493c --- /dev/null +++ b/models/auto_discovery_filter.go @@ -0,0 +1,94 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AutoDiscoveryFilter auto discovery filter +// +// swagger:model AutoDiscoveryFilter +type AutoDiscoveryFilter struct { + + // attribute + // Required: true + Attribute *string `json:"attribute"` + + // comment + Comment string `json:"comment,omitempty"` + + // operation + // Required: true + Operation *string `json:"operation"` + + // value + Value string `json:"value,omitempty"` +} + +// Validate validates this auto discovery filter +func (m *AutoDiscoveryFilter) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAttribute(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOperation(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AutoDiscoveryFilter) validateAttribute(formats strfmt.Registry) error { + + if err := validate.Required("attribute", "body", m.Attribute); err != nil { + return err + } + + return nil +} + +func (m *AutoDiscoveryFilter) validateOperation(formats strfmt.Registry) error { + + if err := validate.Required("operation", "body", m.Operation); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this auto discovery filter based on context it is used +func (m *AutoDiscoveryFilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AutoDiscoveryFilter) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AutoDiscoveryFilter) UnmarshalBinary(b []byte) error { + var res AutoDiscoveryFilter + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/auto_discovery_method.go b/models/auto_discovery_method.go new file mode 100644 index 00000000..b636c816 --- /dev/null +++ b/models/auto_discovery_method.go @@ -0,0 +1,71 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AutoDiscoveryMethod auto discovery method +// +// swagger:model AutoDiscoveryMethod +type AutoDiscoveryMethod struct { + + // name + // Required: true + Name *string `json:"name"` +} + +// Validate validates this auto discovery method +func (m *AutoDiscoveryMethod) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AutoDiscoveryMethod) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this auto discovery method based on context it is used +func (m *AutoDiscoveryMethod) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AutoDiscoveryMethod) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AutoDiscoveryMethod) UnmarshalBinary(b []byte) error { + var res AutoDiscoveryMethod + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/models/datasource.go b/models/datasource.go index 96a5a740..0db5886f 100644 --- a/models/datasource.go +++ b/models/datasource.go @@ -29,6 +29,9 @@ type Datasource struct { // Read Only: true AuditVersion int64 `json:"auditVersion,omitempty"` + // auto discovery config + AutoDiscoveryConfig *AutoDiscoveryConfiguration `json:"autoDiscoveryConfig,omitempty"` + // The metadata checksum for the LMModule content // Read Only: true Checksum string `json:"checksum,omitempty"` @@ -67,6 +70,9 @@ type Datasource struct { // Example: false EnableEriDiscovery bool `json:"enableEriDiscovery,omitempty"` + // eri discovery config + EriDiscoveryConfig *ScriptERIDiscoveryAttributeV2 `json:"eriDiscoveryConfig,omitempty"` + // The DataSource data collect interval // Example: 10 EriDiscoveryInterval int32 `json:"eriDiscoveryInterval,omitempty"` @@ -76,8 +82,7 @@ type Datasource struct { Group string `json:"group,omitempty"` // If the DataSource has multi instance: true|false - // Read Only: true - HasMultiInstances *bool `json:"hasMultiInstances,omitempty"` + HasMultiInstances bool `json:"hasMultiInstances,omitempty"` // The ID of the LMModule // Read Only: true @@ -117,6 +122,10 @@ type Datasource struct { func (m *Datasource) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateAutoDiscoveryConfig(formats); err != nil { + res = append(res, err) + } + if err := m.validateCollectInterval(formats); err != nil { res = append(res, err) } @@ -133,6 +142,10 @@ func (m *Datasource) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateEriDiscoveryConfig(formats); err != nil { + res = append(res, err) + } + if err := m.validateName(formats); err != nil { res = append(res, err) } @@ -143,6 +156,25 @@ func (m *Datasource) Validate(formats strfmt.Registry) error { return nil } +func (m *Datasource) validateAutoDiscoveryConfig(formats strfmt.Registry) error { + if swag.IsZero(m.AutoDiscoveryConfig) { // not required + return nil + } + + if m.AutoDiscoveryConfig != nil { + if err := m.AutoDiscoveryConfig.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("autoDiscoveryConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("autoDiscoveryConfig") + } + return err + } + } + + return nil +} + func (m *Datasource) validateCollectInterval(formats strfmt.Registry) error { if err := validate.Required("collectInterval", "body", m.CollectInterval); err != nil { @@ -207,6 +239,25 @@ func (m *Datasource) validateDataPoints(formats strfmt.Registry) error { return nil } +func (m *Datasource) validateEriDiscoveryConfig(formats strfmt.Registry) error { + if swag.IsZero(m.EriDiscoveryConfig) { // not required + return nil + } + + if m.EriDiscoveryConfig != nil { + if err := m.EriDiscoveryConfig.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("eriDiscoveryConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("eriDiscoveryConfig") + } + return err + } + } + + return nil +} + func (m *Datasource) validateName(formats strfmt.Registry) error { if err := validate.Required("name", "body", m.Name); err != nil { @@ -224,6 +275,10 @@ func (m *Datasource) ContextValidate(ctx context.Context, formats strfmt.Registr res = append(res, err) } + if err := m.contextValidateAutoDiscoveryConfig(ctx, formats); err != nil { + res = append(res, err) + } + if err := m.contextValidateChecksum(ctx, formats); err != nil { res = append(res, err) } @@ -236,7 +291,7 @@ func (m *Datasource) ContextValidate(ctx context.Context, formats strfmt.Registr res = append(res, err) } - if err := m.contextValidateHasMultiInstances(ctx, formats); err != nil { + if err := m.contextValidateEriDiscoveryConfig(ctx, formats); err != nil { res = append(res, err) } @@ -275,6 +330,22 @@ func (m *Datasource) contextValidateAuditVersion(ctx context.Context, formats st return nil } +func (m *Datasource) contextValidateAutoDiscoveryConfig(ctx context.Context, formats strfmt.Registry) error { + + if m.AutoDiscoveryConfig != nil { + if err := m.AutoDiscoveryConfig.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("autoDiscoveryConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("autoDiscoveryConfig") + } + return err + } + } + + return nil +} + func (m *Datasource) contextValidateChecksum(ctx context.Context, formats strfmt.Registry) error { if err := validate.ReadOnly(ctx, "checksum", "body", string(m.Checksum)); err != nil { @@ -320,10 +391,17 @@ func (m *Datasource) contextValidateDataPoints(ctx context.Context, formats strf return nil } -func (m *Datasource) contextValidateHasMultiInstances(ctx context.Context, formats strfmt.Registry) error { +func (m *Datasource) contextValidateEriDiscoveryConfig(ctx context.Context, formats strfmt.Registry) error { - if err := validate.ReadOnly(ctx, "hasMultiInstances", "body", m.HasMultiInstances); err != nil { - return err + if m.EriDiscoveryConfig != nil { + if err := m.EriDiscoveryConfig.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("eriDiscoveryConfig") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("eriDiscoveryConfig") + } + return err + } } return nil diff --git a/models/script_e_r_i_discovery_attribute_v2.go b/models/script_e_r_i_discovery_attribute_v2.go new file mode 100644 index 00000000..1df7c3f6 --- /dev/null +++ b/models/script_e_r_i_discovery_attribute_v2.go @@ -0,0 +1,89 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ScriptERIDiscoveryAttributeV2 script e r i discovery attribute v2 +// +// swagger:model ScriptERIDiscoveryAttributeV2 +type ScriptERIDiscoveryAttributeV2 struct { + + // groovy script + GroovyScript string `json:"groovyScript,omitempty"` + + // linux cmdline + LinuxCmdline string `json:"linuxCmdline,omitempty"` + + // linux script + LinuxScript string `json:"linuxScript,omitempty"` + + // name + // Required: true + Name *string `json:"name"` + + // type + Type string `json:"type,omitempty"` + + // win cmdline + WinCmdline string `json:"winCmdline,omitempty"` + + // win script + WinScript string `json:"winScript,omitempty"` +} + +// Validate validates this script e r i discovery attribute v2 +func (m *ScriptERIDiscoveryAttributeV2) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ScriptERIDiscoveryAttributeV2) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this script e r i discovery attribute v2 based on context it is used +func (m *ScriptERIDiscoveryAttributeV2) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ScriptERIDiscoveryAttributeV2) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ScriptERIDiscoveryAttributeV2) UnmarshalBinary(b []byte) error { + var res ScriptERIDiscoveryAttributeV2 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/website/docs/index.markdown b/website/docs/index.markdown index ceae5afb..157a53d2 100644 --- a/website/docs/index.markdown +++ b/website/docs/index.markdown @@ -114,13 +114,42 @@ resource "logicmonitor_data_resource_aws_external_id" "my_data_resource_aws_exte # create a new LogicMonitor datasource resource "logicmonitor_datasource" "my_datasource" { collect_interval = 100 + has_multi_instances = true applies_to = "system.deviceId == \"22\"" description = "test" collect_method = "script" - eri_discovery_interval = 1 + eri_discovery_interval = 15 + enable_auto_discovery = true enable_eri_discovery = true + eri_discovery_config { + name = "ad_script" + win_script = "string" + groovy_script = "string" + type = "string" + linux_cmdline = "string" + linux_script = "string" + win_cmdline = "string" + } name = "Amazon Website test" - data_points = [{ + auto_discovery_config { + persistent_instance = false + schedule_interval = 0 + delete_inactive_instance = true + method { + name = "ad_script" + } + instance_auto_group_method = "none" + instance_auto_group_method_params = "" + filters = [ + { + comment = "test" + value = "test" + operation = "string" + attribute = "string" + }] + disable_instance = true + } + data_points = [{ name = "CallCountTotal_mean8" description = "test" alert_for_no_data = 1 @@ -138,11 +167,11 @@ resource "logicmonitor_datasource" "my_datasource" { raw_data_field_name = "string" post_processor_method = "aggregation" post_processor_param = "{\"version\":\"1.0\",\"expression\":{\"funcName\":\"mean\",\"dataSourceName\":\"AWS_Cognito_GlobalAPICallStats\",\"dataPointName\":\"CallCountTotal\"},\"dataLack\":\"ignore\"}" - }] - display_name = "Test demo" - collector_attribute { - name = "script" - } + }] + display_name = "Testdemo" + collector_attribute { + name = "script" + } } ``` diff --git a/website/docs/r/alert_rule.markdown b/website/docs/r/alert_rule.markdown index 2a27c6c9..2d66ab76 100644 --- a/website/docs/r/alert_rule.markdown +++ b/website/docs/r/alert_rule.markdown @@ -37,19 +37,27 @@ resource "logicmonitor_alert_rule" "my_alert_rule" { The following arguments are **required**: * `datapoint` - The datapoint the alert rule is configured to match + (string) * `datasource` - The datasource the alert rule is configured to match + (string) * `device_groups` - The device groups and service groups the alert rule is configured to match + ([]string) * `devices` - The device names and service names the alert rule is configured to match + ([]string) * `escalating_chain_id` - The id of the escalation chain associated with the alert rule + (int32) * `instance` - The instance the alert rule is configured to match + (string) * `name` - The name of the alert rule + (string) * `priority` - The priority associated with the alert rule + (int32) The following arguments are **optional**: -* `escalation_interval` - The escalation interval associated with the alert rule, in minutes -* `level_str` - The alert severity levels the alert rule is configured to match. Acceptable values are: All, Warn, Error, Critical -* `suppress_alert_ack_sdt` - Whether or not status notifications for acknowledgements and SDTs should be sent to the alert rule -* `suppress_alert_clear` - Whether or not alert clear notifications should be sent to the alert rule +* `escalation_interval` - The escalation interval associated with the alert rule, in minutes (int32) +* `level_str` - The alert severity levels the alert rule is configured to match. Acceptable values are: All, Warn, Error, Critical (string) +* `suppress_alert_ack_sdt` - Whether or not status notifications for acknowledgements and SDTs should be sent to the alert rule (bool) +* `suppress_alert_clear` - Whether or not alert clear notifications should be sent to the alert rule (bool) ## Import diff --git a/website/docs/r/collector.markdown b/website/docs/r/collector.markdown index 70b9d55e..06b114a3 100644 --- a/website/docs/r/collector.markdown +++ b/website/docs/r/collector.markdown @@ -79,46 +79,50 @@ output "myInstallerURL" { * The argument `company` (i.e. the user's portal name) is required if the environment variable `LM_COMPANY` is not set. The following arguments are **optional**: -* `arch` - The collector architecture (Windows | Linux platform followed by 32 | 64 bit), the default value is linux64 -* `automatic_upgrade_info` - The details of the Collector's automatic upgrade schedule, if one exists +* `arch` - The collector architecture (Windows | Linux platform followed by 32 | 64 bit) (string), the default value is linux64 +* `automatic_upgrade_info` - The details of the Collector's automatic upgrade schedule, if one exists (AutomaticUpgradeInfo) + `dayOfWeek` (required) - Options include `SUN`, `MON`, `TUE`, `WED`, `THU`, `FRI`, `SAT` + `description` + `hour` (required) + `minute` (required) + `occurrence` (required) - Options inlcude `FIRST`, `SECOND`, `THIRD`, `FOURTH`, `ANY` + `timezone` - + `version` (required) - Options include `ED`, `GD`, `MGD` -* `backup_agent_id` - The Id of the backup Collector assigned to the Collector -* `build` - The Collector version -* `collector_group_id` - The Id of the group the Collector is in -* `collector_size` - The size of the collector, the default value is medium -* `company` - The user's company (portal) name, this field is required if the environment variable LM_COMPANY is not set -* `custom_properties` - The custom properties defined for the Collector + + `version` (required) - Options include `EA`, `GD`, `MGD` + Early Access (EA) – EA releases are often the first to debut new functionality. We sometimes release a major feature in batches through EA release. So, EA is not recommended for your entire infrastructure. They occur 9-10 times per year. If there are major bug fixes, we patch EA, and it is referred as EA patch release. A stable EA version is designated as an optional general release (GD). + Optional General Releases (GD) – GD releases are stable collector updates that may have new features. However, it is not mandatory to update collectors with GD releases. They occur twice a year. If there are major bug fixes, we patch GD, and it is referred as GD patch release. A stable GD version is designated as Required General Release (MGD). + Required General Releases (MGD) – An MGD is released once a year. When we designate a GD as an MGD, we schedule and announce a date to auto-upgrade collectors to the MGD version. To let customers upgrade collectors as per their convenience, we send communication at least 30 days before the scheduled auto-upgrade date. On the auto-upgrade date, we upgrade only those collectors which are still below the MGD version. Thus, going forward, the MGD becomes the minimum required version. If there are major bug fixes, we patch MGD, and it is referred as MGD patch release. + For more details please refer: https://www.logicmonitor.com/support/collectors/collector-overview/collector-versions +* `backup_agent_id` - The Id of the backup Collector assigned to the Collector (int32) +* `build` - The Collector version (string) +* `collector_group_id` - The Id of the group the Collector is in (int32) +* `collector_size` - The size of the collector (string), the default value is medium +* `company` - The user's company (portal) name, this field is required if the environment variable LM_COMPANY is not set (string) +* `custom_properties` - The custom properties defined for the Collector ([]*NameAndValue) + `name` - The name of a property (required) + `value` - The value of a property (required) -* `description` - The Collector's description -* `ea` - Whether the collector is in EA version -* `enable_fail_back` - Whether or not automatic failback is enabled for the Collector, the default value is true -* `enable_fail_over_on_collector_device` - Whether or not the device the Collector is installed on is enabled for fail over -* `escalating_chain_id` - The Id of the escalation chain associated with this Collector -* `monitor_others` - Check if we shall monitor using local account (for windows), the default value is true -* `need_auto_create_collector_device` - Whether to create a collector device when instance collector, the default value is true -* `number_of_instances` - The number of instances that are monitored by this collector -* `onetime_downgrade_info` - The details of the Collector's automatic downgrade schedule, if one exists +* `description` - The Collector's description (string) +* `ea` - Whether the collector is in EA version (bool) +* `enable_fail_back` - Whether or not automatic failback is enabled for the Collector, the default value is true (bool) +* `enable_fail_over_on_collector_device` - Whether or not the device the Collector is installed on is enabled for fail over (bool) +* `escalating_chain_id` - The Id of the escalation chain associated with this Collector (int32) +* `monitor_others` - Check if we shall monitor using local account (for windows) (bool), the default value is true +* `need_auto_create_collector_device` - Whether to create a collector device when instance collector, the default value is true (bool) +* `number_of_instances` - The number of instances that are monitored by this collector (int32) +* `onetime_downgrade_info` - The details of the Collector's automatic downgrade schedule, if one exists (OnetimeUpgradeInfo) + `description` + `major_version` (required) + `minor_version` (required) + `start_epoch` (required) + `timezone` -* `onetime_upgrade_info` - The details of the Collector's one time upgrade, if one has been scheduled +* `onetime_upgrade_info` - The details of the Collector's one time upgrade, if one has been scheduled (OnetimeUpgradeInfo) + `description` + `major_version` (required) + `minor_version` (required) + `start_epoch` (required) + `timezone` -* `resend_ival` - The interval, in minutes, after which alert notifications for the Collector will be resent -* `specified_collector_device_group_id` - The collector device group id assigned when creating a new collector device -* `suppress_alert_clear` - Whether alert clear notifications are suppressed for the Collector +* `resend_ival` - The interval, in minutes, after which alert notifications for the Collector will be resent (int32) +* `specified_collector_device_group_id` - The collector device group id assigned when creating a new collector device (int32) +* `suppress_alert_clear` - Whether alert clear notifications are suppressed for the Collector (bool) ## Import diff --git a/website/docs/r/collector_group.markdown b/website/docs/r/collector_group.markdown index fb69de3c..6b2c9d09 100644 --- a/website/docs/r/collector_group.markdown +++ b/website/docs/r/collector_group.markdown @@ -36,15 +36,16 @@ resource "logicmonitor_collector_group" "my_collector_group" { The following arguments are **required**: * `name` - The name of the Collector Group + (string) The following arguments are **optional**: -* `auto_balance` - Denotes whether or not the collector group should be auto balanced -* `auto_balance_instance_count_threshold` - Threshold for instance count strategy to check if a collector has high load -* `auto_balance_strategy` - The auto balance strategy. Typically left blank or set to 'none'. -* `custom_properties` - The custom properties defined for the Collector group +* `auto_balance` - Denotes whether or not the collector group should be auto balanced (bool) +* `auto_balance_instance_count_threshold` - Threshold for instance count strategy to check if a collector has high load (int32) +* `auto_balance_strategy` - The auto balance strategy. Typically left blank or set to 'none'. (string) +* `custom_properties` - The custom properties defined for the Collector group ([]*NameAndValue) + `name` - The name of a property (required) + `value` - The value of a property (required) -* `description` - The description of the Collector Group +* `description` - The description of the Collector Group (string) ## Import diff --git a/website/docs/r/dashboard.markdown b/website/docs/r/dashboard.markdown index 51217640..be28597b 100644 --- a/website/docs/r/dashboard.markdown +++ b/website/docs/r/dashboard.markdown @@ -29,20 +29,21 @@ resource "logicmonitor_dashboard" "my_dashboard" { The following arguments are **required**: * `name` - The name of the dashboard + (string) The following arguments are **optional**: -* `description` - The description of the dashboard -* `group_id` - The id of the group the dashboard belongs to -* `group_name` - The name of group where created dashboard will reside -* `owner` - This field will be empty unless the dashboard is a private dashboard, in which case the owner will be listed -* `sharable` - Whether or not the dashboard is sharable. This value will always be true unless the dashboard is a private dashboard -* `template` - The template which is used for import dashboard -* `widget_tokens` - If useDynamicWidget=true, this field must at least contain tokens defaultDeviceGroup and defaultServiceGroup +* `description` - The description of the dashboard (string) +* `group_id` - The id of the group the dashboard belongs to (int32) +* `group_name` - The name of group where created dashboard will reside (string) +* `owner` - This field will be empty unless the dashboard is a private dashboard, in which case the owner will be listed (string) +* `sharable` - Whether or not the dashboard is sharable. This value will always be true unless the dashboard is a private dashboard (bool) +* `template` - The template which is used for import dashboard (interface{}) +* `widget_tokens` - If useDynamicWidget=true, this field must at least contain tokens defaultDeviceGroup and defaultServiceGroup ([]*WidgetToken) + `name` (required) + `value` (required) + `type` (required) - Need to pass 'null' value + `inherit_list` (required) - Need to pass 'null' value -* `widgets_config` - Information about widget configuration used by the UI, this field can remain empty for terraform. +* `widgets_config` - Information about widget configuration used by the UI (interface{}), this field can remain empty for terraform. ## Import diff --git a/website/docs/r/dashboard_group.markdown b/website/docs/r/dashboard_group.markdown index ceeb5486..e0d5f58b 100644 --- a/website/docs/r/dashboard_group.markdown +++ b/website/docs/r/dashboard_group.markdown @@ -25,12 +25,13 @@ resource "logicmonitor_dashboard_group" "my_dashboard_group" { The following arguments are **required**: * `name` - The name of the dashboard group + (string) The following arguments are **optional**: -* `description` - This is a description of the dashboard group -* `parent_id` - The Id of the parent dashboard group -* `template` - The template which is used for import dashboard group -* `widget_tokens` - The tokens assigned at the group level +* `description` - This is a description of the dashboard group (string) +* `parent_id` - The Id of the parent dashboard group (int32) +* `template` - The template which is used for import dashboard group (interface{}) +* `widget_tokens` - The tokens assigned at the group level ([]*WidgetToken) + `name` (required) + `value` (required) + `type` (required) - Need to pass 'null' value diff --git a/website/docs/r/datasource.markdown b/website/docs/r/datasource.markdown index f92c8513..016b8f94 100644 --- a/website/docs/r/datasource.markdown +++ b/website/docs/r/datasource.markdown @@ -15,6 +15,24 @@ Provides a LogicMonitor datasource resource. This can be used to create and mana # Create a LogicMonitor datasource resource "logicmonitor_datasource" "my_datasource" { applies_to = "system.deviceId == \"22\"" + auto_discovery_config { + persistent_instance = false + schedule_interval = 0 + delete_inactive_instance = true + method { + name = "ad_script" + } + instance_auto_group_method = "none" + instance_auto_group_method_params = "" + filters = [ + { + comment = "test" + value = "test" + operation = "string" + attribute = "string" + }] + disable_instance = true + } collect_interval = 100 collect_method = "script" collector_attribute { @@ -46,8 +64,18 @@ resource "logicmonitor_datasource" "my_datasource" { display_name = "test" enable_auto_discovery = false enable_eri_discovery = false + eri_discovery_config { + name = "ad_script" + win_script = "string" + groovy_script = "string" + type = "string" + linux_cmdline = "string" + linux_script = "string" + win_cmdline = "string" + } eri_discovery_interval = 10 group = "string" + has_multi_instances = name = "datasource test" tags = "string" technology = "string" @@ -58,15 +86,20 @@ resource "logicmonitor_datasource" "my_datasource" { The following arguments are **required**: * `collect_interval` - The DataSource data collect interval + (int32) * `collect_method` - The method to collect data. The values can be snmp|ping|exs|webpage|wmi|cim|datadump|dns|ipmi|jdbb|script|udp|tcp|xen + (string) * `collector_attribute` - Data collector's attributes to collector data. e.g. a ping data source has a ping collector attribute. PingCollectorAttributeV1 has two fields. the ip to ping, the data size send to ping + (CollectorAttribute) + `name` - The data collectors name * `name` - The data source name + (string) The following arguments are **optional**: -* `applies_to` - The Applies To for the LMModule -* `data_points` - The data point list +* `applies_to` - The Applies To for the LMModule (string) +* `auto_discovery_config` - (AutoDiscoveryConfiguration) +* `data_points` - The data point list ([]*DataPoint) + `alertForNoData` - The triggered alert level if we cannot collect data for this datapoint. The values can be 0-4 (0:unused alert, 1:alert ok, 2:warn alert, 2:error alert, 4:critical alert) + `postProcessorParam` - The post processor parameter, e.g. dataPoint1*2 + `postProcessorMethod` - The post processor method for the data value. Currently support complex expression and groovy. @@ -84,14 +117,16 @@ The following arguments are **optional**: + `alertExprNote` - alert expression note + `name` - The datapoint name + `alertExpr` - The alert threshold define for the datapoint. e.g. 60 80 90 mean it will: trigger warn alert if value 60 trigger error alert if value 80 trigger critical alert if value 90 -* `description` - The description for the LMModule -* `display_name` - The data source display name -* `enable_auto_discovery` - Enable Auto Discovery or not when this data source has multi instance: false|true -* `enable_eri_discovery` - Enable ERI Discovery or not: false|true -* `eri_discovery_interval` - The DataSource data collect interval -* `group` - The group the LMModule is in -* `tags` - The Tags for the LMModule -* `technology` - The Technical Notes for the LMModule +* `description` - The description for the LMModule (string) +* `display_name` - The data source display name (string) +* `enable_auto_discovery` - Enable Auto Discovery or not when this data source has multi instance: false|true (bool) +* `enable_eri_discovery` - Enable ERI Discovery or not: false|true (bool) +* `eri_discovery_config` - (ScriptERIDiscoveryAttributeV2) +* `eri_discovery_interval` - The DataSource data collect interval (int32) +* `group` - The group the LMModule is in (string) +* `has_multi_instances` - If the DataSource has multi instance: true|false (bool) +* `tags` - The Tags for the LMModule (string) +* `technology` - The Technical Notes for the LMModule (string) ## Import diff --git a/website/docs/r/device.markdown b/website/docs/r/device.markdown index 72cf8840..92c508c6 100644 --- a/website/docs/r/device.markdown +++ b/website/docs/r/device.markdown @@ -44,24 +44,27 @@ resource "logicmonitor_device" "my_device" { The following arguments are **required**: * `display_name` - The display name of the device + (string) * `name` - The host name or IP address of the device + (string) * `preferred_collector_id` - The Id of the preferred collector assigned to monitor the device + (int32) The following arguments are **optional**: -* `auto_balanced_collector_group_id` - The Auto Balanced Collector Group id. 0 means not monitored by ABCG -* `current_collector_id` - The id of the collector currently monitoring the device and discovering instances -* `custom_properties` - Any non-system properties (aside from system.categories) defined for this device +* `auto_balanced_collector_group_id` - The Auto Balanced Collector Group id. 0 means not monitored by ABCG (int32) +* `current_collector_id` - The id of the collector currently monitoring the device and discovering instances (int32) +* `custom_properties` - Any non-system properties (aside from system.categories) defined for this device ([]*NameAndValue) + `name` - The name of a property (required) + `value` - The value of a property (required) -* `description` - The device description -* `device_type` - The type of device: 0 indicates a regular device, 2 indicates an AWS device, 4 indicates an Azure device -* `disable_alerting` - Indicates whether alerting is disabled (true) or enabled (false) for this device -* `enable_netflow` - Indicates whether Netflow is enabled (true) or disabled (false) for the device -* `host_group_ids` - The Id(s) of the groups the device is in, where multiple group ids are comma separated -* `link` - The URL link associated with the device -* `netflow_collector_id` - The Id of the netflow collector associated with the device -* `preferred_collector_group_id` - The id of the Collector Group associated with the device's preferred collector, It can be 0 for auto balanced collector group . -* `related_device_id` - The Id of the AWS EC2 instance related to this device, if one exists in the LogicMonitor account. This value defaults to -1, which indicates that there are no related devices +* `description` - The device description (string) +* `device_type` - The type of device: 0 indicates a regular device, 2 indicates an AWS device, 4 indicates an Azure device (int32) +* `disable_alerting` - Indicates whether alerting is disabled (true) or enabled (false) for this device (bool) +* `enable_netflow` - Indicates whether Netflow is enabled (true) or disabled (false) for the device (bool) +* `host_group_ids` - The Id(s) of the groups the device is in, where multiple group ids are comma separated (string) +* `link` - The URL link associated with the device (string) +* `netflow_collector_id` - The Id of the netflow collector associated with the device (int32) +* `preferred_collector_group_id` - The id of the Collector Group associated with the device's preferred collector (int32), It can be 0 for auto balanced collector group . +* `related_device_id` - The Id of the AWS EC2 instance related to this device, if one exists in the LogicMonitor account. This value defaults to -1, which indicates that there are no related devices (int32) ## Import diff --git a/website/docs/r/device_group.markdown b/website/docs/r/device_group.markdown index 1b92cbb8..1be42a77 100644 --- a/website/docs/r/device_group.markdown +++ b/website/docs/r/device_group.markdown @@ -154,17 +154,18 @@ resource "logicmonitor_device_group" "my_aws_device_group" { The following arguments are **required**: * `name` - The name of the device group + (string) The following arguments are **optional**: -* `applies_to` - The Applies to custom query for this group (only for dynamic groups) -* `custom_properties` - The properties associated with this device group +* `applies_to` - The Applies to custom query for this group (only for dynamic groups) (string) +* `custom_properties` - The properties associated with this device group ([]*NameAndValue) + `name` - The name of a property (required) + `value` - The value of a property (required) -* `default_collector_id` - The Id of the default collector assigned to the device group -* `description` - The description of the device group -* `disable_alerting` - Indicates whether alerting is disabled (true) or enabled (false) for this device group -* `enable_netflow` - Indicates whether Netflow is enabled (true) or disabled (false) for the device group, the default value is true -* `extra` - The extra setting for cloud group +* `default_collector_id` - The Id of the default collector assigned to the device group (int32) +* `description` - The description of the device group (string) +* `disable_alerting` - Indicates whether alerting is disabled (true) or enabled (false) for this device group (bool) +* `enable_netflow` - Indicates whether Netflow is enabled (true) or disabled (false) for the device group, the default value is true (bool) +* `extra` - The extra setting for cloud group (CloudAccountExtra) + `account` - cloud account information (currently only supports AWS) + `accountId` - LogicMonitor's Account ID + `assumedRoleArn` - ARN of the role created for LogicMonitor to use while monitoring AWS (required) @@ -199,8 +200,8 @@ The following arguments are **optional**: + `deviceType` - Cloud device type (2 for AWS Device) (int) + `requiredProps` - Required device properties (array of strings) + `services` - Cloud account services to monitor. This is an object with keys for each AWS service that have the same arguments as `default`. See example above (all service keys alternate letters and underscores, but not numbers; SQS=s_q_s, SAGEMAKER=s_a_g_e_m_a_k_e_r, EC2=e_c2 etc.) -* `group_type` - The type of device group: normal and dynamic device groups will have groupType=Normal, and AWS groups will have a groupType value of AWS/SERVICE (e.g. AWS/AwsRoot, AWS/S3, etc.) -* `parent_id` - The id of the parent group for this device group (the root device group has an Id of 1) +* `group_type` - The type of device group: normal and dynamic device groups will have groupType=Normal, and AWS groups will have a groupType value of AWS/SERVICE (e.g. AWS/AwsRoot, AWS/S3, etc.) (string) +* `parent_id` - The id of the parent group for this device group (the root device group has an Id of 1) (int32) ## Import diff --git a/website/docs/r/escalation_chain.markdown b/website/docs/r/escalation_chain.markdown index 74a87b11..15eb5767 100644 --- a/website/docs/r/escalation_chain.markdown +++ b/website/docs/r/escalation_chain.markdown @@ -66,6 +66,7 @@ resource "logicmonitor_escalation_chain" "my_escalation_chain" { The following arguments are **required**: * `destinations` - + ([]*Chain) + `period` - + `weekDays` - the list of week day of this period (required) + `timezone` - the timezone for this period (required) @@ -78,17 +79,18 @@ The following arguments are **required**: + `type` (required) - email | sms | voice, where type must be email if method = arbitrary + `type` - single (required) * `name` - the chain name + (string) The following arguments are **optional**: -* `cc_destinations` - +* `cc_destinations` - ([]*Recipient) + `addr` - the user name if method = admin, or the email address if method = arbitrary + `contact` - contact details, email address or phone number + `method` (required) - Admin | Arbitrary, where Admin = a user, and Arbitrary = an arbitrary email + `type` (required) - email | sms | voice, where type must be email if method = arbitrary -* `description` - -* `enable_throttling` - if throttle needs to be enabled then true if not then false. -* `throttling_alerts` - max number of alert can send during a throttle period -* `throttling_period` - the throttle period +* `description` - (string) +* `enable_throttling` - if throttle needs to be enabled then true if not then false. (bool) +* `throttling_alerts` - max number of alert can send during a throttle period (int32) +* `throttling_period` - the throttle period (int32) ## Import diff --git a/website/docs/r/website.markdown b/website/docs/r/website.markdown index aad26ae9..6116a7b3 100644 --- a/website/docs/r/website.markdown +++ b/website/docs/r/website.markdown @@ -28,6 +28,7 @@ resource "logicmonitor_website" "my_website" { name = "Ebay" overall_alert_level = "warn" polling_interval = 5 + schema = "https" steps = [ { schema = "https" @@ -88,34 +89,37 @@ resource "logicmonitor_website" "my_website" { The following arguments are **required**: * `name` - The name of the website + (string) * `type` - The type of the website. Acceptable values are: pingcheck, webcheck + (string) The following arguments are **optional**: -* `alert_expr` - The threshold (in days) for triggering SSL certification alerts -* `description` - The description of the website +* `alert_expr` - The threshold (in days) for triggering SSL certification alerts (string) +* `description` - The description of the website (string) * `disable_alerting` - true: alerting is disabled for the website false: alerting is enabled for the website -If stopMonitoring=true, then alerting will also by default be disabled for the website -* `domain` - Required for type=webcheck , The domain of the service. This is the base URL of the service +If stopMonitoring=true, then alerting will also by default be disabled for the website (bool) +* `domain` - Required for type=webcheck , The domain of the service. This is the base URL of the service (string) * `global_sm_alert_cond` - The number of test locations that checks must fail at to trigger an alert, where the alert triggered will be consistent with the value of overallAlertLevel. Possible values and corresponding number of Site Monitor locations are 0 : all 1 : half 2 : more than one -3 : any -* `group_id` - The id of the group the website is in +3 : any (int32) +* `group_id` - The id of the group the website is in (int32) * `host` - The URL to check, without the scheme or protocol (e.g http or https) -E.g. if the URL is "http://www.google.com, then the host="www.google.com" -* `ignore_s_s_l` - Whether or not SSL should be ignored, the default value is true +E.g. if the URL is "http://www.google.com, then the host="www.google.com" (string) +* `ignore_s_s_l` - Whether or not SSL should be ignored, the default value is true (bool) * `individual_alert_level` - warn | error | critical -The level of alert to trigger if the website fails a check from an individual test location +The level of alert to trigger if the website fails a check from an individual test location (string) * `individual_sm_alert_enable` - true: an alert will be triggered if a check fails from an individual test location -false: an alert will not be triggered if a check fails from an individual test location -* `is_internal` - Whether or not the website is internal +false: an alert will not be triggered if a check fails from an individual test location (bool) +* `is_internal` - Whether or not the website is internal (bool) * `overall_alert_level` - warn | error | critical -The level of alert to trigger if the website fails the number of checks specified by transition from the test locations specified by globalSmAlertCond +The level of alert to trigger if the website fails the number of checks specified by transition from the test locations specified by globalSmAlertCond (string) * `polling_interval` - 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 -The polling interval for the website, in units of minutes. This value indicates how often the website is checked. The minimum is 1 minute, and the maximum is 10 minutes -* `steps` - Required for type=webcheck , An object comprising one or more steps, see the table below for the properties included in each step +The polling interval for the website, in units of minutes. This value indicates how often the website is checked. The minimum is 1 minute, and the maximum is 10 minutes (int32) +* `schema` - The scheme or protocol associated with the URL to check. Acceptable values are: http, https (string) +* `steps` - Required for type=webcheck , An object comprising one or more steps, see the table below for the properties included in each step ([]*WebCheckStep) + `schema` - HTTP schema + `matchType` - Body match type(plain | JSON | XML | Glob Expression | Multi-line key-value pairs) + `description` - The description of the Step @@ -149,28 +153,28 @@ The polling interval for the website, in units of minutes. This value indicates + `statusCode` - The expected status code * `stop_monitoring` - true: monitoring is disabled for the website false: monitoring is enabled for the website -If stopMonitoring=true, then alerting will also by default be disabled for the website -* `template` - The website template +If stopMonitoring=true, then alerting will also by default be disabled for the website (bool) +* `template` - The website template (interface{}) * `test_location` - The locations from which the website is monitored. If the website is internal, this field should include Collectors. If Non-Internal, possible test locations are: 1 : US - LA 2 : US - DC 3 : US - SF 4 : Europe - Dublin 5 : Asia - Singapore -6 : Australia - Sydney +6 : Australia - Sydney (WebsiteLocation) + `all` - (true | false) Indicates that the service will be monitored from all checkpoint locations + `collectorIds` - indicates that the service will be monitored from checkpoint locations 1, 2 and 3 + `collectors` - Need to pass 'null' value + `smgIds` - indicates that the service will be monitored by Collectors 85 and 90 * `transition` - 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 30 | 60 -The number of checks that must fail before an alert is triggered -* `trigger_s_s_l_expiration_alert` - Whether or not SSL expiration alerts should be triggered -* `trigger_s_s_l_status_alert` - Whether or not SSL status alerts should be triggered +The number of checks that must fail before an alert is triggered (int32) +* `trigger_s_s_l_expiration_alert` - Whether or not SSL expiration alerts should be triggered (bool) +* `trigger_s_s_l_status_alert` - Whether or not SSL status alerts should be triggered (bool) * `use_default_alert_setting` - true: The alert settings configured in the website Default Settings will be used -false: Service Default Settings will not be used, and you will need to specify individualSMAlertEnable, individualAlertLevel, globalSmAlertConf, overallAlertLevel and pollingInterval +false: Service Default Settings will not be used, and you will need to specify individualSMAlertEnable, individualAlertLevel, globalSmAlertConf, overallAlertLevel and pollingInterval (bool) * `use_default_location_setting` - true: The checkpoint locations configured in the website Default Settings will be used -false: The checkpoint locations specified in the testLocation will be used -* `user_permission` - write | read | ack. The permission level of the user that made the API request +false: The checkpoint locations specified in the testLocation will be used (bool) +* `user_permission` - write | read | ack. The permission level of the user that made the API request (string) ## Import diff --git a/website/docs/r/website_group.markdown b/website/docs/r/website_group.markdown index 094e55b7..e00b2194 100644 --- a/website/docs/r/website_group.markdown +++ b/website/docs/r/website_group.markdown @@ -32,19 +32,20 @@ resource "logicmonitor_website_group" "my_website_group" { The following arguments are **required**: * `name` - The name of the group + (string) The following arguments are **optional**: -* `description` - The description of the group +* `description` - The description of the group (string) * `disable_alerting` - true: alerting is disabled for the websites in the group false: alerting is enabled for the websites in the group -If stopMonitoring=true, then alerting will also by default be disabled for the websites in the group -* `parent_id` - The Id of the parent group. If parentId=1 then the group exists under the root group -* `properties` - +If stopMonitoring=true, then alerting will also by default be disabled for the websites in the group (bool) +* `parent_id` - The Id of the parent group. If parentId=1 then the group exists under the root group (int32) +* `properties` - ([]*NameAndValue) + `name` - The name of a property (required) + `value` - The value of a property (required) * `stop_monitoring` - true: monitoring is disabled for the websites in the group false: monitoring is enabled for the websites in the group -If stopMonitoring=true, then alerting will also by default be disabled for the websites in the group +If stopMonitoring=true, then alerting will also by default be disabled for the websites in the group (bool) ## Import