From 5769366dd31da43a95ff94947878baeaed540e2b Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Tue, 27 Aug 2024 14:33:16 +0200 Subject: [PATCH 01/12] feat: terraform to use coraglogix sdk --- coralogix/clientset/actions-client.go | 81 --------------------- coralogix/clientset/apikeys-client.go | 80 -------------------- coralogix/clientset/clientset.go | 18 ----- coralogix/data_source_coralogix_action.go | 19 +++-- coralogix/data_source_coralogix_api_key.go | 25 +++---- coralogix/data_source_coralogix_group.go | 32 +++++--- coralogix/resource_coralogix_action.go | 60 +++++++-------- coralogix/resource_coralogix_api_key.go | 85 ++++++++++------------ coralogix/resource_coralogix_group.go | 34 ++++----- go.mod | 19 +++-- go.sum | 38 +++++----- 11 files changed, 154 insertions(+), 337 deletions(-) delete mode 100644 coralogix/clientset/actions-client.go delete mode 100644 coralogix/clientset/apikeys-client.go diff --git a/coralogix/clientset/actions-client.go b/coralogix/clientset/actions-client.go deleted file mode 100644 index d11382a3..00000000 --- a/coralogix/clientset/actions-client.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientset - -import ( - "context" - - actions "terraform-provider-coralogix/coralogix/clientset/grpc/actions/v2" -) - -type ActionsClient struct { - callPropertiesCreator *CallPropertiesCreator -} - -func (a ActionsClient) CreateAction(ctx context.Context, req *actions.CreateActionRequest) (*actions.CreateActionResponse, error) { - callProperties, err := a.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := actions.NewActionsServiceClient(conn) - - return client.CreateAction(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func (a ActionsClient) GetAction(ctx context.Context, req *actions.GetActionRequest) (*actions.GetActionResponse, error) { - callProperties, err := a.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := actions.NewActionsServiceClient(conn) - - return client.GetAction(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func (a ActionsClient) UpdateAction(ctx context.Context, req *actions.ReplaceActionRequest) (*actions.ReplaceActionResponse, error) { - callProperties, err := a.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := actions.NewActionsServiceClient(conn) - - return client.ReplaceAction(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func (a ActionsClient) DeleteAction(ctx context.Context, req *actions.DeleteActionRequest) (*actions.DeleteActionResponse, error) { - callProperties, err := a.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := actions.NewActionsServiceClient(conn) - - return client.DeleteAction(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func NewActionsClient(c *CallPropertiesCreator) *ActionsClient { - return &ActionsClient{callPropertiesCreator: c} -} diff --git a/coralogix/clientset/apikeys-client.go b/coralogix/clientset/apikeys-client.go deleted file mode 100644 index 6da0a8f3..00000000 --- a/coralogix/clientset/apikeys-client.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientset - -import ( - "context" - apikeys "terraform-provider-coralogix/coralogix/clientset/grpc/apikeys" -) - -type ApikeysClient struct { - callPropertiesCreator *CallPropertiesCreator -} - -func (t ApikeysClient) CreateApiKey(ctx context.Context, req *apikeys.CreateApiKeyRequest) (*apikeys.CreateApiKeyResponse, error) { - callProperties, err := t.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := apikeys.NewApiKeysServiceClient(conn) - - return client.CreateApiKey(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func (t ApikeysClient) GetApiKey(ctx context.Context, req *apikeys.GetApiKeyRequest) (*apikeys.GetApiKeyResponse, error) { - callProperties, err := t.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := apikeys.NewApiKeysServiceClient(conn) - - return client.GetApiKey(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func (t ApikeysClient) UpdateApiKey(ctx context.Context, req *apikeys.UpdateApiKeyRequest) (*apikeys.UpdateApiKeyResponse, error) { - callProperties, err := t.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := apikeys.NewApiKeysServiceClient(conn) - - return client.UpdateApiKey(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func (t ApikeysClient) DeleteApiKey(ctx context.Context, req *apikeys.DeleteApiKeyRequest) (*apikeys.DeleteApiKeyResponse, error) { - callProperties, err := t.callPropertiesCreator.GetCallProperties(ctx) - if err != nil { - return nil, err - } - - conn := callProperties.Connection - defer conn.Close() - client := apikeys.NewApiKeysServiceClient(conn) - - return client.DeleteApiKey(callProperties.Ctx, req, callProperties.CallOptions...) -} - -func NewApiKeysClient(c *CallPropertiesCreator) *ApikeysClient { - return &ApikeysClient{callPropertiesCreator: c} -} diff --git a/coralogix/clientset/clientset.go b/coralogix/clientset/clientset.go index 4f86e2d1..98ae9a90 100644 --- a/coralogix/clientset/clientset.go +++ b/coralogix/clientset/clientset.go @@ -21,12 +21,10 @@ type ClientSet struct { dataSet *DataSetClient dashboards *DashboardsClient grafana *GrafanaClient - actions *ActionsClient recordingRuleGroups *RecordingRulesGroupsSetsClient tcoPolicies *TCOPoliciesClient webhooks *WebhooksClient events2Metrics *Events2MetricsClient - slis *SLIClient archiveRetentions *ArchiveRetentionsClient archiveMetrics *ArchiveMetricsClient archiveLogs *ArchiveLogsClient @@ -34,7 +32,6 @@ type ClientSet struct { teams *TeamsClient slos *SLOsClient dahboardsFolders *DashboardsFoldersClient - apiKeys *ApikeysClient groups *GroupsClient users *UsersClient customRole *RolesClient @@ -66,10 +63,6 @@ func (c *ClientSet) Grafana() *GrafanaClient { return c.grafana } -func (c *ClientSet) Actions() *ActionsClient { - return c.actions -} - func (c *ClientSet) RecordingRuleGroupsSets() *RecordingRulesGroupsSetsClient { return c.recordingRuleGroups } @@ -86,10 +79,6 @@ func (c *ClientSet) Events2Metrics() *Events2MetricsClient { return c.events2Metrics } -func (c *ClientSet) SLIs() *SLIClient { - return c.slis -} - func (c *ClientSet) ArchiveRetentions() *ArchiveRetentionsClient { return c.archiveRetentions } @@ -110,10 +99,6 @@ func (c *ClientSet) Teams() *TeamsClient { return c.teams } -func (c *ClientSet) ApiKeys() *ApikeysClient { - return c.apiKeys -} - func (c *ClientSet) CustomRoles() *RolesClient { return c.customRole } @@ -153,11 +138,9 @@ func NewClientSet(targetUrl, apiKey string) *ClientSet { dataSet: NewDataSetClient(apikeyCPC), dashboards: NewDashboardsClient(apikeyCPC), grafana: NewGrafanaClient(apikeyCPC), - actions: NewActionsClient(apikeyCPC), recordingRuleGroups: NewRecordingRuleGroupsClient(apikeyCPC), tcoPolicies: NewTCOPoliciesClient(apikeyCPC), webhooks: NewWebhooksClient(apikeyCPC), - slis: NewSLIsClient(apikeyCPC), archiveRetentions: NewArchiveRetentionsClient(apikeyCPC), archiveMetrics: NewArchiveMetricsClient(apikeyCPC), archiveLogs: NewArchiveLogsClient(apikeyCPC), @@ -165,7 +148,6 @@ func NewClientSet(targetUrl, apiKey string) *ClientSet { teams: NewTeamsClient(apikeyCPC), slos: NewSLOsClient(apikeyCPC), dahboardsFolders: NewDashboardsFoldersClient(apikeyCPC), - apiKeys: NewApiKeysClient(apikeyCPC), groups: NewGroupsClient(apikeyCPC), users: NewUsersClient(apikeyCPC), customRole: NewRolesClient(apikeyCPC), diff --git a/coralogix/data_source_coralogix_action.go b/coralogix/data_source_coralogix_action.go index 4e836553..b1673ad8 100644 --- a/coralogix/data_source_coralogix_action.go +++ b/coralogix/data_source_coralogix_action.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -21,8 +21,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" - "terraform-provider-coralogix/coralogix/clientset" - actions "terraform-provider-coralogix/coralogix/clientset/grpc/actions/v2" + cxsdk "github.com/coralogix/coralogix-management-sdk/go" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -39,7 +38,7 @@ func NewActionDataSource() datasource.DataSource { } type ActionDataSource struct { - client *clientset.ActionsClient + client *cxsdk.ActionsClient } func (d *ActionDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { @@ -51,7 +50,7 @@ func (d *ActionDataSource) Configure(_ context.Context, req datasource.Configure return } - clientSet, ok := req.ProviderData.(*clientset.ClientSet) + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", @@ -81,8 +80,8 @@ func (d *ActionDataSource) Read(ctx context.Context, req datasource.ReadRequest, //Get refreshed Action value from Coralogix id := data.ID.ValueString() log.Printf("[INFO] Reading Action: %s", id) - getActionReq := &actions.GetActionRequest{Id: wrapperspb.String(id)} - getActionResp, err := d.client.GetAction(ctx, getActionReq) + getActionReq := &cxsdk.GetActionRequest{Id: wrapperspb.String(id)} + getActionResp, err := d.client.Get(ctx, getActionReq) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) if status.Code(err) == codes.NotFound { @@ -91,7 +90,7 @@ func (d *ActionDataSource) Read(ctx context.Context, req datasource.ReadRequest, } else { resp.Diagnostics.AddError( "Error reading Action", - formatRpcErrors(err, getActionURL, protojson.Format(getActionReq)), + formatRpcErrors(err, cxsdk.GetActionRpc, protojson.Format(getActionReq)), ) } return diff --git a/coralogix/data_source_coralogix_api_key.go b/coralogix/data_source_coralogix_api_key.go index c4c54506..b12aa5e3 100644 --- a/coralogix/data_source_coralogix_api_key.go +++ b/coralogix/data_source_coralogix_api_key.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,12 +19,9 @@ import ( "fmt" "log" - apikeys "terraform-provider-coralogix/coralogix/clientset/grpc/apikeys" - + cxsdk "github.com/coralogix/coralogix-management-sdk/go" "google.golang.org/protobuf/encoding/protojson" - "terraform-provider-coralogix/coralogix/clientset" - "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/resource" "google.golang.org/grpc/codes" @@ -38,7 +35,7 @@ func NewApiKeyDataSource() datasource.DataSource { } type ApiKeyDataSource struct { - client *clientset.ApikeysClient + client *cxsdk.ApikeysClient } func (d *ApiKeyDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { @@ -50,16 +47,16 @@ func (d *ApiKeyDataSource) Configure(_ context.Context, req datasource.Configure return } - clientSet, ok := req.ProviderData.(*clientset.ClientSet) + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *cxsdk.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return } - d.client = clientSet.ApiKeys() + d.client = clientSet.APIKeys() } func (d *ApiKeyDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { @@ -82,11 +79,11 @@ func (d *ApiKeyDataSource) Read(ctx context.Context, req datasource.ReadRequest, //Get refreshed API Keys value from Coralogix id := data.ID.ValueString() log.Printf("[INFO] Reading ApiKey: %s", id) - getApiKey := &apikeys.GetApiKeyRequest{ + getApiKey := &cxsdk.GetAPIKeyRequest{ KeyId: id, } - getApiKeyResponse, err := d.client.GetApiKey(ctx, getApiKey) + getApiKeyResponse, err := d.client.Get(ctx, getApiKey) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) if status.Code(err) == codes.NotFound { @@ -96,7 +93,7 @@ func (d *ApiKeyDataSource) Read(ctx context.Context, req datasource.ReadRequest, } else { resp.Diagnostics.AddError( "Error reading API Keys", - formatRpcErrors(err, getApiKeyPath, protojson.Format(getApiKey)), + formatRpcErrors(err, cxsdk.GetAPIKeyRpc, protojson.Format(getApiKey)), ) } return diff --git a/coralogix/data_source_coralogix_group.go b/coralogix/data_source_coralogix_group.go index ff3104ab..58c78e62 100644 --- a/coralogix/data_source_coralogix_group.go +++ b/coralogix/data_source_coralogix_group.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,12 +19,14 @@ import ( "encoding/json" "fmt" "log" + "strconv" + cxsdk "github.com/coralogix/coralogix-management-sdk/go" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/resource" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "terraform-provider-coralogix/coralogix/clientset" + "google.golang.org/protobuf/encoding/protojson" ) var _ datasource.DataSourceWithConfigure = &GroupDataSource{} @@ -34,7 +36,7 @@ func NewGroupDataSource() datasource.DataSource { } type GroupDataSource struct { - client *clientset.GroupsClient + client *cxsdk.GroupsClient } func (d *GroupDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { @@ -46,11 +48,11 @@ func (d *GroupDataSource) Configure(_ context.Context, req datasource.ConfigureR return } - clientSet, ok := req.ProviderData.(*clientset.ClientSet) + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *cxsdk.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return } @@ -74,9 +76,16 @@ func (d *GroupDataSource) Read(ctx context.Context, req datasource.ReadRequest, return } //Get refreshed Group value from Coralogix - id := data.ID.ValueString() + id, _ := strconv.ParseUint(data.ID.ValueString(), 10, 32) + + request := cxsdk.GetTeamGroupRequest{ + GroupId: &cxsdk.GroupsTeamGroupID{ + Id: id, + }, + } + log.Printf("[INFO] Reading Group: %s", id) - getGroupResp, err := d.client.GetGroup(ctx, id) + getGroupResp, err := d.client.Get(ctx, request) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) if status.Code(err) == codes.NotFound { @@ -85,9 +94,10 @@ func (d *GroupDataSource) Read(ctx context.Context, req datasource.ReadRequest, fmt.Sprintf("Group %q is in state, but no longer exists in Coralogix backend", id), ) } else { + resp.Diagnostics.AddError( - "Error reading Group", - formatRpcErrors(err, fmt.Sprintf("%s/%s", d.client.TargetUrl, id), ""), + "Error reading API Keys", + formatRpcErrors(err, cxsdk.GetTeamGroupRpc, protojson.Format(request)), ) } return diff --git a/coralogix/resource_coralogix_action.go b/coralogix/resource_coralogix_action.go index c9c8a5a0..bca34015 100644 --- a/coralogix/resource_coralogix_action.go +++ b/coralogix/resource_coralogix_action.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,9 +19,7 @@ import ( "fmt" "log" - "terraform-provider-coralogix/coralogix/clientset" - actions "terraform-provider-coralogix/coralogix/clientset/grpc/actions/v2" - + cxsdk "github.com/coralogix/coralogix-management-sdk/go" "google.golang.org/protobuf/encoding/protojson" "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" @@ -43,16 +41,12 @@ import ( var ( _ resource.ResourceWithConfigure = &ActionResource{} _ resource.ResourceWithImportState = &ActionResource{} - actionSchemaSourceTypeToProtoSourceType = map[string]actions.SourceType{ - "Log": actions.SourceType_SOURCE_TYPE_LOG, - "DataMap": actions.SourceType_SOURCE_TYPE_DATA_MAP, + actionSchemaSourceTypeToProtoSourceType = map[string]cxsdk.SourceType{ + "Log": cxsdk.SourceTypeSourceTypeLog, + "DataMap": cxsdk.SourceTypeSourceTypeDataMap, } actionProtoSourceTypeToSchemaSourceType = ReverseMap(actionSchemaSourceTypeToProtoSourceType) actionValidSourceTypes = GetKeys(actionSchemaSourceTypeToProtoSourceType) - createActionURL = "com.coralogixapis.actions.v2.ActionsService/CreateAction" - updateActionURL = "com.coralogixapis.actions.v2.ActionsService/ReplaceAction" - getActionURL = "com.coralogixapis.actions.v2.ActionsService/GetAction" - deleteActionURL = "com.coralogixapis.actions.v2.ActionsService/DeleteAction" ) func NewActionResource() resource.Resource { @@ -60,7 +54,7 @@ func NewActionResource() resource.Resource { } type ActionResource struct { - client *clientset.ActionsClient + client *cxsdk.ActionsClient } func (r *ActionResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { @@ -72,7 +66,7 @@ func (r *ActionResource) Configure(_ context.Context, req resource.ConfigureRequ return } - clientSet, ok := req.ProviderData.(*clientset.ClientSet) + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", @@ -175,11 +169,11 @@ func (r *ActionResource) Create(ctx context.Context, req resource.CreateRequest, } actionStr := protojson.Format(createActionRequest) log.Printf("[INFO] Creating new action: %s", actionStr) - createResp, err := r.client.CreateAction(ctx, createActionRequest) + createResp, err := r.client.Create(ctx, createActionRequest) if err != nil { log.Printf("[ERROR] Received error: %s", err) resp.Diagnostics.AddError("Error creating Action", - formatRpcErrors(err, createActionURL, actionStr), + formatRpcErrors(err, cxsdk.CreateActionRpc, actionStr), ) return } @@ -193,7 +187,7 @@ func (r *ActionResource) Create(ctx context.Context, req resource.CreateRequest, resp.Diagnostics.Append(diags...) } -func flattenAction(action *actions.Action) ActionResourceModel { +func flattenAction(action *cxsdk.Action) ActionResourceModel { return ActionResourceModel{ ID: types.StringValue(action.GetId().GetValue()), Name: types.StringValue(action.GetName().GetValue()), @@ -218,8 +212,8 @@ func (r *ActionResource) Read(ctx context.Context, req resource.ReadRequest, res //Get refreshed Action value from Coralogix id := state.ID.ValueString() log.Printf("[INFO] Reading Action: %s", id) - getActionReq := &actions.GetActionRequest{Id: wrapperspb.String(id)} - getActionResp, err := r.client.GetAction(ctx, getActionReq) + getActionReq := &cxsdk.GetActionRequest{Id: wrapperspb.String(id)} + getActionResp, err := r.client.Get(ctx, getActionReq) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) if status.Code(err) == codes.NotFound { @@ -231,7 +225,7 @@ func (r *ActionResource) Read(ctx context.Context, req resource.ReadRequest, res } else { resp.Diagnostics.AddError( "Error reading Action", - formatRpcErrors(err, getActionURL, protojson.Format(getActionReq)), + formatRpcErrors(err, cxsdk.GetActionRpc, protojson.Format(getActionReq)), ) } return @@ -260,12 +254,12 @@ func (r ActionResource) Update(ctx context.Context, req resource.UpdateRequest, return } log.Printf("[INFO] Updating Action: %s", protojson.Format(actionUpdateReq)) - actionUpdateResp, err := r.client.UpdateAction(ctx, actionUpdateReq) + actionUpdateResp, err := r.client.Replace(ctx, actionUpdateReq) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) resp.Diagnostics.AddError( "Error updating Action", - formatRpcErrors(err, updateActionURL, protojson.Format(actionUpdateReq)), + formatRpcErrors(err, cxsdk.ReplaceActionRpc, protojson.Format(actionUpdateReq)), ) return } @@ -273,8 +267,8 @@ func (r ActionResource) Update(ctx context.Context, req resource.UpdateRequest, // Get refreshed Action value from Coralogix id := plan.ID.ValueString() - getActionReq := &actions.GetActionRequest{Id: wrapperspb.String(id)} - getActionResp, err := r.client.GetAction(ctx, getActionReq) + getActionReq := &cxsdk.GetActionRequest{Id: wrapperspb.String(id)} + getActionResp, err := r.client.Get(ctx, getActionReq) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) if status.Code(err) == codes.NotFound { @@ -310,11 +304,11 @@ func (r ActionResource) Delete(ctx context.Context, req resource.DeleteRequest, id := state.ID.ValueString() log.Printf("[INFO] Deleting Action %s", id) - deleteReq := &actions.DeleteActionRequest{Id: wrapperspb.String(id)} - if _, err := r.client.DeleteAction(ctx, deleteReq); err != nil { + deleteReq := &cxsdk.DeleteActionRequest{Id: wrapperspb.String(id)} + if _, err := r.client.Delete(ctx, deleteReq); err != nil { resp.Diagnostics.AddError( fmt.Sprintf("Error Deleting Action %s", id), - formatRpcErrors(err, deleteActionURL, protojson.Format(deleteReq)), + formatRpcErrors(err, cxsdk.DeleteActionRpc, protojson.Format(deleteReq)), ) return } @@ -333,7 +327,7 @@ type ActionResourceModel struct { IsHidden types.Bool `tfsdk:"is_hidden"` } -func extractCreateAction(ctx context.Context, plan ActionResourceModel) (*actions.CreateActionRequest, diag.Diagnostics) { +func extractCreateAction(ctx context.Context, plan ActionResourceModel) (*cxsdk.CreateActionRequest, diag.Diagnostics) { var diags diag.Diagnostics name := typeStringToWrapperspbString(plan.Name) url := typeStringToWrapperspbString(plan.URL) @@ -344,7 +338,7 @@ func extractCreateAction(ctx context.Context, plan ActionResourceModel) (*action subsystemNames, dgs := typeStringSliceToWrappedStringSlice(ctx, plan.Subsystems.Elements()) diags = append(diags, dgs...) - return &actions.CreateActionRequest{ + return &cxsdk.CreateActionRequest{ Name: name, Url: url, IsPrivate: isPrivate, @@ -354,7 +348,7 @@ func extractCreateAction(ctx context.Context, plan ActionResourceModel) (*action }, diags } -func extractUpdateAction(ctx context.Context, plan ActionResourceModel) (*actions.ReplaceActionRequest, diag.Diagnostics) { +func extractUpdateAction(ctx context.Context, plan ActionResourceModel) (*cxsdk.ReplaceActionRequest, diag.Diagnostics) { var diags diag.Diagnostics id := wrapperspb.String(plan.ID.ValueString()) name := typeStringToWrapperspbString(plan.Name) @@ -371,8 +365,8 @@ func extractUpdateAction(ctx context.Context, plan ActionResourceModel) (*action } isHidden := wrapperspb.Bool(plan.IsHidden.ValueBool()) - return &actions.ReplaceActionRequest{ - Action: &actions.Action{ + return &cxsdk.ReplaceActionRequest{ + Action: &cxsdk.Action{ Id: id, Name: name, Url: url, diff --git a/coralogix/resource_coralogix_api_key.go b/coralogix/resource_coralogix_api_key.go index de749a2f..28c9ea09 100644 --- a/coralogix/resource_coralogix_api_key.go +++ b/coralogix/resource_coralogix_api_key.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -22,9 +22,7 @@ import ( "strconv" "strings" - "terraform-provider-coralogix/coralogix/clientset" - apikeys "terraform-provider-coralogix/coralogix/clientset/grpc/apikeys" - + cxsdk "github.com/coralogix/coralogix-management-sdk/go" "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" @@ -43,19 +41,12 @@ import ( "google.golang.org/protobuf/encoding/protojson" ) -var ( - getApiKeyPath = apikeys.ApiKeysService_GetApiKey_FullMethodName - createApiKeyPath = apikeys.ApiKeysService_CreateApiKey_FullMethodName - deleteApiKeyPath = apikeys.ApiKeysService_DeleteApiKey_FullMethodName - updateApiKeyPath = apikeys.ApiKeysService_UpdateApiKey_FullMethodName -) - func NewApiKeyResource() resource.Resource { return &ApiKeyResource{} } type ApiKeyResource struct { - client *clientset.ApikeysClient + client *cxsdk.ApikeysClient } func (r *ApiKeyResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { @@ -68,16 +59,16 @@ func (r *ApiKeyResource) Configure(_ context.Context, req resource.ConfigureRequ return } - clientSet, ok := req.ProviderData.(*clientset.ClientSet) + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *cxsdk.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return } - r.client = clientSet.ApiKeys() + r.client = clientSet.APIKeys() } func (r *ApiKeyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { @@ -281,12 +272,12 @@ func (r *ApiKeyResource) Create(ctx context.Context, req resource.CreateRequest, return } log.Printf("[INFO] Creating new ApiKey: %s", protojson.Format(createApiKeyRequest)) - createApiKeyResp, err := r.client.CreateApiKey(ctx, createApiKeyRequest) + createApiKeyResp, err := r.client.Create(ctx, createApiKeyRequest) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) resp.Diagnostics.AddError( "Error creating Api Key", - formatRpcErrors(err, createApiKeyPath, protojson.Format(createApiKeyRequest)), + formatRpcErrors(err, cxsdk.CreateAPIKeyRpc, protojson.Format(createApiKeyRequest)), ) return } @@ -337,7 +328,7 @@ func (r *ApiKeyResource) Update(ctx context.Context, req resource.UpdateRequest, } id := currentState.ID.ValueString() - var updateApiKeyRequest = apikeys.UpdateApiKeyRequest{ + var updateApiKeyRequest = cxsdk.UpdateAPIKeyRequest{ KeyId: id, } if currentState.Name.ValueString() != desiredState.Name.ValueString() { @@ -350,7 +341,7 @@ func (r *ApiKeyResource) Update(ctx context.Context, req resource.UpdateRequest, resp.Diagnostics.Append(diags...) return } - updateApiKeyRequest.Permissions = &apikeys.UpdateApiKeyRequest_Permissions{ + updateApiKeyRequest.Permissions = &cxsdk.APIKeyPermissionsUpdate{ Permissions: permissions, } } @@ -361,7 +352,7 @@ func (r *ApiKeyResource) Update(ctx context.Context, req resource.UpdateRequest, resp.Diagnostics.Append(diags...) return } - updateApiKeyRequest.Presets = &apikeys.UpdateApiKeyRequest_Presets{ + updateApiKeyRequest.Presets = &cxsdk.APIKeyPresetsUpdate{ Presets: presets, } } @@ -379,12 +370,12 @@ func (r *ApiKeyResource) Update(ctx context.Context, req resource.UpdateRequest, } log.Printf("[INFO] Updating ApiKey %s to %s", id, protojson.Format(&updateApiKeyRequest)) - _, err := r.client.UpdateApiKey(ctx, &updateApiKeyRequest) + _, err := r.client.Update(ctx, &updateApiKeyRequest) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) resp.Diagnostics.AddError( "Error updating Api Key", - formatRpcErrors(err, updateApiKeyPath, protojson.Format(&updateApiKeyRequest)), + formatRpcErrors(err, cxsdk.UpdateAPIKeyRpc, protojson.Format(&updateApiKeyRequest)), ) return } @@ -411,13 +402,13 @@ func (r *ApiKeyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp.Diagnostics.Append(diags...) return } - _, err := r.client.DeleteApiKey(ctx, deleteApiKeyRequest) + _, err := r.client.Delete(ctx, deleteApiKeyRequest) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) resp.Diagnostics.AddError( "Error getting Api Key", - formatRpcErrors(err, deleteApiKeyPath, protojson.Format(deleteApiKeyRequest)), + formatRpcErrors(err, cxsdk.DeleteAPIKeyRpc, protojson.Format(deleteApiKeyRequest)), ) return } @@ -431,7 +422,7 @@ func (r *ApiKeyResource) getKeyInfo(ctx context.Context, id *string, keyValue *s return nil, diags } log.Printf("[INFO] Get api key with ID: %s", getApiKeyRequest) - getApiKeyResponse, err := r.client.GetApiKey(ctx, getApiKeyRequest) + getApiKeyResponse, err := r.client.Get(ctx, getApiKeyRequest) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) @@ -443,7 +434,7 @@ func (r *ApiKeyResource) getKeyInfo(ctx context.Context, id *string, keyValue *s } else { diags.AddError( "Error getting Api Key", - formatRpcErrors(err, getApiKeyPath, protojson.Format(getApiKeyRequest)), + formatRpcErrors(err, cxsdk.GetAPIKeyRpc, protojson.Format(getApiKeyRequest)), ) } return nil, diags @@ -456,19 +447,19 @@ func (r *ApiKeyResource) getKeyInfo(ctx context.Context, id *string, keyValue *s return key, nil } -func makeGetApiKeyRequest(apiKeyId *string) (*apikeys.GetApiKeyRequest, diag.Diagnostics) { - return &apikeys.GetApiKeyRequest{ +func makeGetApiKeyRequest(apiKeyId *string) (*cxsdk.GetAPIKeyRequest, diag.Diagnostics) { + return &cxsdk.GetAPIKeyRequest{ KeyId: *apiKeyId, }, nil } -func makeDeleteApi(apiKeyId *string) (*apikeys.DeleteApiKeyRequest, diag.Diagnostics) { - return &apikeys.DeleteApiKeyRequest{ +func makeDeleteApi(apiKeyId *string) (*cxsdk.DeleteAPIKeyRequest, diag.Diagnostics) { + return &cxsdk.DeleteAPIKeyRequest{ KeyId: *apiKeyId, }, nil } -func flattenGetApiKeyResponse(ctx context.Context, apiKeyId *string, response *apikeys.GetApiKeyResponse, keyValue *string) (*ApiKeyModel, diag.Diagnostics) { +func flattenGetApiKeyResponse(ctx context.Context, apiKeyId *string, response *cxsdk.GetAPIKeyResponse, keyValue *string) (*ApiKeyModel, diag.Diagnostics) { var diags diag.Diagnostics permissions := stringSliceToTypeStringSet(response.KeyInfo.KeyPermissions.Permissions) @@ -508,7 +499,7 @@ func flattenGetApiKeyResponse(ctx context.Context, apiKeyId *string, response *a }, nil } -func makeCreateApiKeyRequest(ctx context.Context, apiKeyModel *ApiKeyModel) (*apikeys.CreateApiKeyRequest, diag.Diagnostics) { +func makeCreateApiKeyRequest(ctx context.Context, apiKeyModel *ApiKeyModel) (*cxsdk.CreateAPIKeyRequest, diag.Diagnostics) { permissions, diags := typeStringSliceToStringSlice(ctx, apiKeyModel.Permissions.Elements()) if diags.HasError() { return nil, diags @@ -524,10 +515,10 @@ func makeCreateApiKeyRequest(ctx context.Context, apiKeyModel *ApiKeyModel) (*ap return nil, diags } - return &apikeys.CreateApiKeyRequest{ + return &cxsdk.CreateAPIKeyRequest{ Name: apiKeyModel.Name.ValueString(), Owner: &owner, - KeyPermissions: &apikeys.CreateApiKeyRequest_KeyPermissions{ + KeyPermissions: &cxsdk.APIKeyPermissionsCreate{ Presets: presets, Permissions: permissions, }, @@ -535,18 +526,18 @@ func makeCreateApiKeyRequest(ctx context.Context, apiKeyModel *ApiKeyModel) (*ap }, nil } -func extractOwner(keyModel *ApiKeyModel) (apikeys.Owner, diag.Diagnostics) { +func extractOwner(keyModel *ApiKeyModel) (cxsdk.Owner, diag.Diagnostics) { var diags diag.Diagnostics if keyModel.Owner.UserId.ValueString() != "" { - return apikeys.Owner{ - Owner: &apikeys.Owner_UserId{ + return cxsdk.Owner{ + Owner: &cxsdk.OwnerUserID{ UserId: keyModel.Owner.UserId.ValueString(), }, }, diags } else { if keyModel.Owner.OrganisationId.ValueString() != "" { - return apikeys.Owner{ - Owner: &apikeys.Owner_OrganisationId{ + return cxsdk.Owner{ + Owner: &cxsdk.OwnerOrganisationID{ OrganisationId: keyModel.Owner.OrganisationId.ValueString(), }, }, diags @@ -555,8 +546,8 @@ func extractOwner(keyModel *ApiKeyModel) (apikeys.Owner, diag.Diagnostics) { if err != nil { diags.AddError("Invalid team id", "Team id must be a int") } - return apikeys.Owner{ - Owner: &apikeys.Owner_TeamId{ + return cxsdk.Owner{ + Owner: &cxsdk.OwnerTeamID{ TeamId: uint32(teamId), }, }, diags @@ -564,17 +555,17 @@ func extractOwner(keyModel *ApiKeyModel) (apikeys.Owner, diag.Diagnostics) { } } -func flattenOwner(owner *apikeys.Owner) Owner { +func flattenOwner(owner *cxsdk.Owner) Owner { switch owner.Owner.(type) { - case *apikeys.Owner_TeamId: + case *cxsdk.OwnerTeamID: return Owner{ TeamId: types.StringValue(strconv.Itoa(int(owner.GetTeamId()))), } - case *apikeys.Owner_UserId: + case *cxsdk.OwnerUserID: return Owner{ UserId: types.StringValue(owner.GetUserId()), } - case *apikeys.Owner_OrganisationId: + case *cxsdk.OwnerOrganisationID: return Owner{ OrganisationId: types.StringValue(owner.GetOrganisationId()), } diff --git a/coralogix/resource_coralogix_group.go b/coralogix/resource_coralogix_group.go index 49138f96..9b1e1d7f 100644 --- a/coralogix/resource_coralogix_group.go +++ b/coralogix/resource_coralogix_group.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,8 +20,7 @@ import ( "fmt" "log" - "terraform-provider-coralogix/coralogix/clientset" - + cxsdk "github.com/coralogix/coralogix-management-sdk/go" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" @@ -35,6 +34,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" ) func NewGroupResource() resource.Resource { @@ -42,7 +42,7 @@ func NewGroupResource() resource.Resource { } type GroupResource struct { - client *clientset.GroupsClient + client *cxsdk.GroupsClient } func (r *GroupResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { @@ -54,11 +54,11 @@ func (r *GroupResource) Configure(_ context.Context, req resource.ConfigureReque return } - clientSet, ok := req.ProviderData.(*clientset.ClientSet) + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *cxsdk.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return } @@ -119,12 +119,12 @@ func (r *GroupResource) Create(ctx context.Context, req resource.CreateRequest, } groupStr, _ := json.Marshal(createGroupRequest) log.Printf("[INFO] Creating new group: %s", string(groupStr)) - createResp, err := r.client.CreateGroup(ctx, createGroupRequest) + createResp, err := r.client.Create(ctx, createGroupRequest) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) resp.Diagnostics.AddError( "Error creating Group", - formatRpcErrors(err, r.client.TargetUrl, string(groupStr)), + formatRpcErrors(err, cxsdk.CreateTeamGroupRpc, protojson.Format(groupStr)), ) return } @@ -142,7 +142,7 @@ func (r *GroupResource) Create(ctx context.Context, req resource.CreateRequest, resp.Diagnostics.Append(diags...) } -func flattenSCIMGroup(group *clientset.SCIMGroup) (*GroupResourceModel, diag.Diagnostics) { +func flattenSCIMGroup(group *cxsdk.GroupsTeamGroup) (*GroupResourceModel, diag.Diagnostics) { members, diags := flattenSCIMGroupMembers(group.Members) if diags.HasError() { return nil, diags @@ -162,7 +162,7 @@ func flattenSCIMGroup(group *clientset.SCIMGroup) (*GroupResourceModel, diag.Dia }, nil } -func flattenSCIMGroupMembers(members []clientset.SCIMGroupMember) (types.Set, diag.Diagnostics) { +func flattenSCIMGroupMembers(members []cxsdk.SCIMGroupMember) (types.Set, diag.Diagnostics) { if len(members) == 0 { return types.SetNull(types.StringType), nil } @@ -311,13 +311,13 @@ type GroupResourceModel struct { ScopeID types.String `tfsdk:"scope_id"` } -func extractGroup(ctx context.Context, plan *GroupResourceModel) (*clientset.SCIMGroup, diag.Diagnostics) { +func extractGroup(ctx context.Context, plan *GroupResourceModel) (*cxsdk.SCIMGroup, diag.Diagnostics) { members, diags := extractGroupMembers(ctx, plan.Members) if diags.HasError() { return nil, diags } - return &clientset.SCIMGroup{ + return &cxsdk.SCIMGroup{ DisplayName: plan.DisplayName.ValueString(), Members: members, Role: plan.Role.ValueString(), @@ -325,9 +325,9 @@ func extractGroup(ctx context.Context, plan *GroupResourceModel) (*clientset.SCI }, nil } -func extractGroupMembers(ctx context.Context, members types.Set) ([]clientset.SCIMGroupMember, diag.Diagnostics) { +func extractGroupMembers(ctx context.Context, members types.Set) ([]cxsdk.SCIMGroupMember, diag.Diagnostics) { membersElements := members.Elements() - groupMembers := make([]clientset.SCIMGroupMember, 0, len(membersElements)) + groupMembers := make([]cxsdk.SCIMGroupMember, 0, len(membersElements)) var diags diag.Diagnostics for _, member := range membersElements { val, err := member.ToTerraformValue(ctx) @@ -341,7 +341,7 @@ func extractGroupMembers(ctx context.Context, members types.Set) ([]clientset.SC diags.AddError("Failed to convert value to string", err.Error()) continue } - groupMembers = append(groupMembers, clientset.SCIMGroupMember{Value: str}) + groupMembers = append(groupMembers, cxsdk.SCIMGroupMember{Value: str}) } if diags.HasError() { return nil, diags diff --git a/go.mod b/go.mod index dac9effc..3bdd6870 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,12 @@ module terraform-provider-coralogix -go 1.21 +go 1.22.5 + +toolchain go1.23.0 require ( github.com/ahmetalpbalkan/go-linq v3.0.0+incompatible + github.com/coralogix/coralogix-management-sdk v0.2.1 github.com/google/uuid v1.6.0 github.com/grafana/grafana-api-golang-client v0.27.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 @@ -16,7 +19,7 @@ require ( github.com/hashicorp/terraform-plugin-testing v1.9.0 github.com/nsf/jsondiff v0.0.0-20230430225905-43f6cf3098c1 golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 + google.golang.org/genproto/googleapis/api v0.0.0-20240823204242-4ba0660f739c google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 @@ -78,15 +81,15 @@ require ( github.com/yuin/goldmark-meta v1.1.0 // indirect github.com/zclconf/go-cty v1.14.4 // indirect go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect - golang.org/x/crypto v0.25.0 // indirect + golang.org/x/crypto v0.26.0 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.3.0 // indirect ) diff --git a/go.sum b/go.sum index 98797dcc..2d84d2b7 100644 --- a/go.sum +++ b/go.sum @@ -38,6 +38,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coralogix/coralogix-management-sdk v0.2.1 h1:5g5F37DGfZ3AL91S3J1vtmAI2YPU4zjMKymJe6dQ86A= +github.com/coralogix/coralogix-management-sdk v0.2.1/go.mod h1:1aa/coMEMe5M1NvnRymOrBF2iCdefaWR0CMaMjPu0oI= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -227,8 +229,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -260,8 +262,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231219180239-dc181d75b848 h1:+iq7lrkxmFNBM7xx+Rae2W6uyPfhPeDWD+n+JgppptE= golang.org/x/exp v0.0.0-20231219180239-dc181d75b848/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= @@ -285,8 +287,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -294,8 +296,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -314,21 +316,21 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -352,10 +354,10 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/api v0.0.0-20240823204242-4ba0660f739c h1:e0zB268kOca6FbuJkYUGxfwG4DKFZG/8DLyv9Zv66cE= +google.golang.org/genproto/googleapis/api v0.0.0-20240823204242-4ba0660f739c/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= From d60b75970677bae7ebe9b93b8db0d38b94d7d723 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Tue, 3 Sep 2024 17:49:42 +0200 Subject: [PATCH 02/12] feat: alerts v3 schema (WIP) --- coralogix/data_source_coralogix_alertv3.go | 92 + .../data_source_coralogix_alertv3_test.go | 32 + coralogix/resource_coralogix_alert.go | 7479 ++++++++++------- coralogix/resource_coralogix_alert.go.old | 3517 ++++++++ coralogix/resource_coralogix_alert_test.go | 3689 +++++--- .../resource_coralogix_alert_test.go.old | 1286 +++ ...ource_coralogix_alerts_schedulerv3_test.go | 117 + coralogix/resource_coralogix_group.go | 2 +- coralogix/utils copy.go.nope | 787 ++ coralogix/utils.go | 15 +- go.mod | 4 +- go.sum | 2 + 12 files changed, 13125 insertions(+), 3897 deletions(-) create mode 100644 coralogix/data_source_coralogix_alertv3.go create mode 100644 coralogix/data_source_coralogix_alertv3_test.go create mode 100644 coralogix/resource_coralogix_alert.go.old create mode 100644 coralogix/resource_coralogix_alert_test.go.old create mode 100644 coralogix/resource_coralogix_alerts_schedulerv3_test.go create mode 100644 coralogix/utils copy.go.nope diff --git a/coralogix/data_source_coralogix_alertv3.go b/coralogix/data_source_coralogix_alertv3.go new file mode 100644 index 00000000..d604eb95 --- /dev/null +++ b/coralogix/data_source_coralogix_alertv3.go @@ -0,0 +1,92 @@ +package coralogix + +import ( + "context" + "fmt" + "log" + + "terraform-provider-coralogix/coralogix/clientset" + alerts "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v3" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/resource" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var _ datasource.DataSourceWithConfigure = &AlertDataSource{} + +func NewAlertDataSource() datasource.DataSource { + return &AlertDataSource{} +} + +type AlertDataSource struct { + client *clientset.AlertsClient +} + +func (d *AlertDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_alertv3" +} + +func (d *AlertDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + + clientSet, ok := req.ProviderData.(*clientset.ClientSet) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = clientSet.Alerts() +} + +func (d *AlertDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + var r AlertV3Resource + var resourceResp resource.SchemaResponse + r.Schema(ctx, resource.SchemaRequest{}, &resourceResp) + + resp.Schema = frameworkDatasourceSchemaFromFrameworkResourceSchema(resourceResp.Schema) +} + +func (d *AlertDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *AlertV3ResourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + //Get refreshed Alert value from Coralogix + id := data.ID.ValueString() + log.Printf("[INFO] Reading Alert: %s", id) + getAlertReq := &alerts.GetAlertDefRequest{Id: wrapperspb.String(id)} + getAlertResp, err := d.client.GetAlert(ctx, getAlertReq) + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + if status.Code(err) == codes.NotFound { + resp.Diagnostics.AddWarning(err.Error(), + fmt.Sprintf("Alert %q is in state, but no longer exists in Coralogix backend", id)) + } else { + resp.Diagnostics.AddError( + "Error reading Alert", + formatRpcErrors(err, getAlertURL, protojson.Format(getAlertReq)), + ) + } + return + } + log.Printf("[INFO] Received Alert: %s", protojson.Format(getAlertResp)) + + data, diags := flattenAlert(ctx, getAlertResp.GetAlertDef()) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} diff --git a/coralogix/data_source_coralogix_alertv3_test.go b/coralogix/data_source_coralogix_alertv3_test.go new file mode 100644 index 00000000..362290c5 --- /dev/null +++ b/coralogix/data_source_coralogix_alertv3_test.go @@ -0,0 +1,32 @@ +package coralogix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +var alertDataSourceName = "data." + alertResourceName + +func TestAccCoralogixDataSourceAlert(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckActionDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertLogsImmediate() + + testAccCoralogixDataSourceAlert_read(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertDataSourceName, "name", "logs immediate alert"), + ), + }, + }, + }) +} +func testAccCoralogixDataSourceAlert_read() string { + return `data "coralogix_alert" "test" { + id = coralogix_alert.test.id +} +` +} diff --git a/coralogix/resource_coralogix_alert.go b/coralogix/resource_coralogix_alert.go index 9e766044..141f65a0 100644 --- a/coralogix/resource_coralogix_alert.go +++ b/coralogix/resource_coralogix_alert.go @@ -1,3517 +1,5214 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package coralogix import ( "context" "fmt" "log" - "regexp" "strconv" - "strings" - "time" - - "terraform-provider-coralogix/coralogix/clientset" - alerts "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v2" + cxsdk "github.com/coralogix/coralogix-management-sdk/go" + + "github.com/hashicorp/terraform-plugin-framework-validators/boolvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64default" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" "google.golang.org/protobuf/encoding/protojson" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - - . "github.com/ahmetalpbalkan/go-linq" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/protobuf/types/known/wrapperspb" ) var ( - validAlertTypes = []string{ - "standard", "ratio", "new_value", "unique_count", "time_relative", "metric", "tracing", "flow"} - alertSchemaSeverityToProtoSeverity = map[string]string{ - "Info": "ALERT_SEVERITY_INFO_OR_UNSPECIFIED", - "Warning": "ALERT_SEVERITY_WARNING", - "Critical": "ALERT_SEVERITY_CRITICAL", - "Error": "ALERT_SEVERITY_ERROR", - } - alertProtoSeverityToSchemaSeverity = reverseMapStrings(alertSchemaSeverityToProtoSeverity) - alertValidSeverities = getKeysStrings(alertSchemaSeverityToProtoSeverity) - alertSchemaLogSeverityToProtoLogSeverity = map[string]string{ - "Debug": "LOG_SEVERITY_DEBUG_OR_UNSPECIFIED", - "Verbose": "LOG_SEVERITY_VERBOSE", - "Info": "LOG_SEVERITY_INFO", - "Warning": "LOG_SEVERITY_WARNING", - "Error": "LOG_SEVERITY_ERROR", - "Critical": "LOG_SEVERITY_CRITICAL", - } - alertProtoLogSeverityToSchemaLogSeverity = reverseMapStrings(alertSchemaLogSeverityToProtoLogSeverity) - alertValidLogSeverities = getKeysStrings(alertSchemaLogSeverityToProtoLogSeverity) - alertSchemaDayOfWeekToProtoDayOfWeek = map[string]string{ - "Monday": "DAY_OF_WEEK_MONDAY_OR_UNSPECIFIED", - "Tuesday": "DAY_OF_WEEK_TUESDAY", - "Wednesday": "DAY_OF_WEEK_WEDNESDAY", - "Thursday": "DAY_OF_WEEK_THURSDAY", - "Friday": "DAY_OF_WEEK_FRIDAY", - "Saturday": "DAY_OF_WEEK_SATURDAY", - "Sunday": "DAY_OF_WEEK_SUNDAY", - } - alertProtoDayOfWeekToSchemaDayOfWeek = reverseMapStrings(alertSchemaDayOfWeekToProtoDayOfWeek) - alertValidDaysOfWeek = getKeysStrings(alertSchemaDayOfWeekToProtoDayOfWeek) - alertSchemaTimeFrameToProtoTimeFrame = map[string]string{ - "5Min": "TIMEFRAME_5_MIN_OR_UNSPECIFIED", - "10Min": "TIMEFRAME_10_MIN", - "15Min": "TIMEFRAME_15_MIN", - "20Min": "TIMEFRAME_20_MIN", - "30Min": "TIMEFRAME_30_MIN", - "1H": "TIMEFRAME_1_H", - "2H": "TIMEFRAME_2_H", - "4H": "TIMEFRAME_4_H", - "6H": "TIMEFRAME_6_H", - "12H": "TIMEFRAME_12_H", - "24H": "TIMEFRAME_24_H", - "36H": "TIMEFRAME_36_H", - } - alertProtoTimeFrameToSchemaTimeFrame = reverseMapStrings(alertSchemaTimeFrameToProtoTimeFrame) - alertValidTimeFrames = getKeysStrings(alertSchemaTimeFrameToProtoTimeFrame) - alertSchemaUniqueCountTimeFrameToProtoTimeFrame = map[string]string{ - "1Min": "TIMEFRAME_1_MIN", - "5Min": "TIMEFRAME_5_MIN_OR_UNSPECIFIED", - "10Min": "TIMEFRAME_10_MIN", - "15Min": "TIMEFRAME_15_MIN", - "20Min": "TIMEFRAME_20_MIN", - "30Min": "TIMEFRAME_30_MIN", - "1H": "TIMEFRAME_1_H", - "2H": "TIMEFRAME_2_H", - "4H": "TIMEFRAME_4_H", - "6H": "TIMEFRAME_6_H", - "12H": "TIMEFRAME_12_H", - "24H": "TIMEFRAME_24_H", - } - alertProtoUniqueCountTimeFrameToSchemaTimeFrame = reverseMapStrings(alertSchemaUniqueCountTimeFrameToProtoTimeFrame) - alertValidUniqueCountTimeFrames = getKeysStrings(alertSchemaUniqueCountTimeFrameToProtoTimeFrame) - alertSchemaNewValueTimeFrameToProtoTimeFrame = map[string]string{ - "12H": "TIMEFRAME_12_H", - "24H": "TIMEFRAME_24_H", - "48H": "TIMEFRAME_48_H", - "72H": "TIMEFRAME_72_H", - "1W": "TIMEFRAME_1_W", - "1Month": "TIMEFRAME_1_M", - "2Month": "TIMEFRAME_2_M", - "3Month": "TIMEFRAME_3_M", - } - alertProtoNewValueTimeFrameToSchemaTimeFrame = reverseMapStrings(alertSchemaNewValueTimeFrameToProtoTimeFrame) - alertValidNewValueTimeFrames = getKeysStrings(alertSchemaNewValueTimeFrameToProtoTimeFrame) - alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame = map[string]protoTimeFrameAndRelativeTimeFrame{ - "Previous_hour": {timeFrame: alerts.Timeframe_TIMEFRAME_1_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_HOUR_OR_UNSPECIFIED}, - "Same_hour_yesterday": {timeFrame: alerts.Timeframe_TIMEFRAME_1_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_DAY}, - "Same_hour_last_week": {timeFrame: alerts.Timeframe_TIMEFRAME_1_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_WEEK}, - "Yesterday": {timeFrame: alerts.Timeframe_TIMEFRAME_24_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_DAY}, - "Same_day_last_week": {timeFrame: alerts.Timeframe_TIMEFRAME_24_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_WEEK}, - "Same_day_last_month": {timeFrame: alerts.Timeframe_TIMEFRAME_24_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_MONTH}, - } - alertProtoTimeFrameAndRelativeTimeFrameToSchemaRelativeTimeFrame = reverseMapRelativeTimeFrame(alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame) - alertValidRelativeTimeFrames = getKeysRelativeTimeFrame(alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame) - alertSchemaArithmeticOperatorToProtoArithmetic = map[string]string{ - "Avg": "ARITHMETIC_OPERATOR_AVG_OR_UNSPECIFIED", - "Min": "ARITHMETIC_OPERATOR_MIN", - "Max": "ARITHMETIC_OPERATOR_MAX", - "Sum": "ARITHMETIC_OPERATOR_SUM", - "Count": "ARITHMETIC_OPERATOR_COUNT", - "Percentile": "ARITHMETIC_OPERATOR_PERCENTILE", - } - alertProtoArithmeticOperatorToSchemaArithmetic = reverseMapStrings(alertSchemaArithmeticOperatorToProtoArithmetic) - alertValidArithmeticOperators = getKeysStrings(alertSchemaArithmeticOperatorToProtoArithmetic) - alertValidFlowOperator = getKeysInt32(alerts.FlowOperator_value) - alertSchemaMetricTimeFrameToMetricProtoTimeFrame = map[string]string{ - "1Min": "TIMEFRAME_1_MIN", - "5Min": "TIMEFRAME_5_MIN_OR_UNSPECIFIED", - "10Min": "TIMEFRAME_10_MIN", - "15Min": "TIMEFRAME_15_MIN", - "20Min": "TIMEFRAME_20_MIN", - "30Min": "TIMEFRAME_30_MIN", - "1H": "TIMEFRAME_1_H", - "2H": "TIMEFRAME_2_H", - "4H": "TIMEFRAME_4_H", - "6H": "TIMEFRAME_6_H", - "12H": "TIMEFRAME_12_H", - "24H": "TIMEFRAME_24_H", - } - alertProtoMetricTimeFrameToMetricSchemaTimeFrame = reverseMapStrings(alertSchemaMetricTimeFrameToMetricProtoTimeFrame) - alertValidMetricTimeFrames = getKeysStrings(alertSchemaMetricTimeFrameToMetricProtoTimeFrame) - alertSchemaDeadmanRatiosToProtoDeadmanRatios = map[string]string{ - "Never": "CLEANUP_DEADMAN_DURATION_NEVER_OR_UNSPECIFIED", - "5Min": "CLEANUP_DEADMAN_DURATION_5MIN", - "10Min": "CLEANUP_DEADMAN_DURATION_10MIN", - "1H": "CLEANUP_DEADMAN_DURATION_1H", - "2H": "CLEANUP_DEADMAN_DURATION_2H", - "6H": "CLEANUP_DEADMAN_DURATION_6H", - "12H": "CLEANUP_DEADMAN_DURATION_12H", - "24H": "CLEANUP_DEADMAN_DURATION_24H", - } - alertProtoDeadmanRatiosToSchemaDeadmanRatios = reverseMapStrings(alertSchemaDeadmanRatiosToProtoDeadmanRatios) - alertValidDeadmanRatioValues = getKeysStrings(alertSchemaDeadmanRatiosToProtoDeadmanRatios) - validTimeZones = []string{"UTC-11", "UTC-10", "UTC-9", "UTC-8", "UTC-7", "UTC-6", "UTC-5", "UTC-4", "UTC-3", "UTC-2", "UTC-1", - "UTC+0", "UTC+1", "UTC+2", "UTC+3", "UTC+4", "UTC+5", "UTC+6", "UTC+7", "UTC+8", "UTC+9", "UTC+10", "UTC+11", "UTC+12", "UTC+13", "UTC+14"} - alertSchemaNotifyOnToProtoNotifyOn = map[string]alerts.NotifyOn{ - "Triggered_only": alerts.NotifyOn_TRIGGERED_ONLY, - "Triggered_and_resolved": alerts.NotifyOn_TRIGGERED_AND_RESOLVED, - } - alertProtoNotifyOnToSchemaNotifyOn = map[alerts.NotifyOn]string{ - alerts.NotifyOn_TRIGGERED_ONLY: "Triggered_only", - alerts.NotifyOn_TRIGGERED_AND_RESOLVED: "Triggered_and_resolved", - } - validNotifyOn = []string{"Triggered_only", "Triggered_and_resolved"} - alertSchemaToProtoEvaluationWindow = map[string]alerts.EvaluationWindow{ - "Rolling": alerts.EvaluationWindow_EVALUATION_WINDOW_ROLLING_OR_UNSPECIFIED, - "Dynamic": alerts.EvaluationWindow_EVALUATION_WINDOW_DYNAMIC, - } - alertProtoToSchemaEvaluationWindow = map[alerts.EvaluationWindow]string{ - alerts.EvaluationWindow_EVALUATION_WINDOW_ROLLING_OR_UNSPECIFIED: "Rolling", - alerts.EvaluationWindow_EVALUATION_WINDOW_DYNAMIC: "Dynamic", - } - validEvaluationWindow = []string{"Rolling", "Dynamic"} - createAlertURL = "com.coralogix.alerts.v2.AlertService/CreateAlert" - getAlertURL = "com.coralogix.alerts.v2.AlertService/GetAlertByUniqueId" - updateAlertURL = "com.coralogix.alerts.v2.AlertService/UpdateAlertByUniqueId" - deleteAlertURL = "com.coralogix.alerts.v2.AlertService/DeleteAlertByUniqueId" + _ resource.ResourceWithConfigure = &AlertResource{} + _ resource.ResourceWithImportState = &AlertResource{} + createAlertURL = cxsdk.CreateAlertDefRpc + updateAlertURL = cxsdk.ReplaceAlertDefRpc + getAlertURL = cxsdk.GetAlertDefRpc + deleteAlertURL = cxsdk.DeleteAlertDefRpc + + alertPriorityProtoToSchemaMap = map[cxsdk.AlertDefPriority]string{ + cxsdk.AlertDefPriorityP5OrUnspecified: "P5", + cxsdk.AlertDefPriorityP4: "P4", + cxsdk.AlertDefPriorityP3: "P3", + cxsdk.AlertDefPriorityP2: "P2", + cxsdk.AlertDefPriorityP1: "P1", + } + alertPrioritySchemaToProtoMap = ReverseMap(alertPriorityProtoToSchemaMap) + validAlertPriorities = GetKeys(alertPrioritySchemaToProtoMap) + + notifyOnProtoToSchemaMap = map[cxsdk.AlertNotifyOn]string{ + cxsdk.AlertNotifyOnTriggeredOnlyUnspecified: "Triggered Only", + cxsdk.AlertNotifyOnTriggeredAndResolved: "Triggered and Resolved", + } + notifyOnSchemaToProtoMap = ReverseMap(notifyOnProtoToSchemaMap) + validNotifyOn = GetKeys(notifyOnSchemaToProtoMap) + + daysOfWeekProtoToSchemaMap = map[cxsdk.AlertDayOfWeek]string{ + cxsdk.AlertDayOfWeekMonday: "Monday", + cxsdk.AlertDayOfWeekTuesday: "Tuesday", + cxsdk.AlertDayOfWeekWednesday: "Wednesday", + cxsdk.AlertDayOfWeekThursday: "Thursday", + cxsdk.AlertDayOfWeekFriday: "Friday", + cxsdk.AlertDayOfWeekSaturday: "Saturday", + cxsdk.AlertDayOfWeekSunday: "Sunday", + } + daysOfWeekSchemaToProtoMap = ReverseMap(daysOfWeekProtoToSchemaMap) + validDaysOfWeek = GetKeys(daysOfWeekSchemaToProtoMap) + + logFilterOperationTypeProtoToSchemaMap = map[cxsdk.LogFilterOperationType]string{ + cxsdk.LogFilterOperationIsOrUnspecified: "IS", + cxsdk.LogFilterOperationIncludes: "NOT", // includes? + cxsdk.LogFilterOperationEndsWith: "ENDS_WITH", + cxsdk.LogFilterOperationStartsWith: "STARTS_WITH", + } + logFilterOperationTypeSchemaToProtoMap = ReverseMap(logFilterOperationTypeProtoToSchemaMap) + validLogFilterOperationType = GetKeys(logFilterOperationTypeSchemaToProtoMap) + + logSeverityProtoToSchemaMap = map[cxsdk.LogSeverity]string{ + cxsdk.LogSeverityVerboseUnspecified: "Unspecified", + cxsdk.LogSeverityDebug: "Debug", + cxsdk.LogSeverityInfo: "Info", + cxsdk.LogSeverityWarning: "Warning", + cxsdk.LogSeverityError: "Error", + cxsdk.LogSeverityCritical: "Critical", + } + logSeveritySchemaToProtoMap = ReverseMap(logSeverityProtoToSchemaMap) + validLogSeverities = GetKeys(logSeveritySchemaToProtoMap) + + // DELETEME + // evaluationWindowTypeProtoToSchemaMap = map[cxsdk.AlertEvaluationWindow]string{ + // cxsdk.AlertEvaluationWindowRollingOrUnspecified: "Rolling", + // cxsdk.AlertEvaluationWindowDynamic: "Dynamic", + // } + // evaluationWindowTypeSchemaToProtoMap = ReverseMap(evaluationWindowTypeProtoToSchemaMap) + // validEvaluationWindowTypes = GetKeys(evaluationWindowTypeSchemaToProtoMap) + + logsTimeWindowValueProtoToSchemaMap = map[cxsdk.LogsTimeWindowValue]string{ + cxsdk.LogsTimeWindow5MinutesOrUnspecified: "5_MINUTES", + cxsdk.LogsTimeWindow10Minutes: "10_MINUTES", + cxsdk.LogsTimeWindow15Minutes: "15_MINUTES", + cxsdk.LogsTimeWindow30Minutes: "30_MINUTES", + cxsdk.LogsTimeWindow1Hour: "1_HOUR", + cxsdk.LogsTimeWindow2Hours: "2_HOURS", + cxsdk.LogsTimeWindow4Hours: "4_HOURS", + cxsdk.LogsTimeWindow6Hours: "6_HOURS", + cxsdk.LogsTimeWindow12Hours: "12_HOURS", + cxsdk.LogsTimeWindow24Hours: "24_HOURS", + cxsdk.LogsTimeWindow36Hours: "36_HOURS", + } + logsTimeWindowValueSchemaToProtoMap = ReverseMap(logsTimeWindowValueProtoToSchemaMap) + validLogsTimeWindowValues = GetKeys(logsTimeWindowValueSchemaToProtoMap) + + autoRetireTimeframeProtoToSchemaMap = map[cxsdk.AutoRetireTimeframe]string{ + cxsdk.AutoRetireTimeframeNeverOrUnspecified: "Never", + cxsdk.AutoRetireTimeframe5Minutes: "5_Minutes", + cxsdk.AutoRetireTimeframe10Minutes: "10_Minutes", + cxsdk.AutoRetireTimeframe1Hour: "1_Hour", + cxsdk.AutoRetireTimeframe2Hours: "2_Hours", + cxsdk.AutoRetireTimeframe6Hours: "6_Hours", + cxsdk.AutoRetireTimeframe12Hours: "12_Hours", + cxsdk.AutoRetireTimeframe24Hours: "24_Hours", + } + autoRetireTimeframeSchemaToProtoMap = ReverseMap(autoRetireTimeframeProtoToSchemaMap) + validAutoRetireTimeframes = GetKeys(autoRetireTimeframeSchemaToProtoMap) + + logsRatioTimeWindowValueProtoToSchemaMap = map[cxsdk.LogsRatioTimeWindowValue]string{ + cxsdk.LogsRatioTimeWindowValue5MinutesOrUnspecified: "5_MINUTES", + cxsdk.LogsRatioTimeWindowValue10Minutes: "10_MINUTES", + cxsdk.LogsRatioTimeWindowValue15Minutes: "15_MINUTES", + cxsdk.LogsRatioTimeWindowValue30Minutes: "30_MINUTES", + cxsdk.LogsRatioTimeWindowValue1Hour: "1_HOUR", + cxsdk.LogsRatioTimeWindowValue2Hours: "2_HOURS", + cxsdk.LogsRatioTimeWindowValue4Hours: "4_HOURS", + cxsdk.LogsRatioTimeWindowValue6Hours: "6_HOURS", + cxsdk.LogsRatioTimeWindowValue12Hours: "12_HOURS", + cxsdk.LogsRatioTimeWindowValue24Hours: "24_HOURS", + cxsdk.LogsRatioTimeWindowValue36Hours: "36_HOURS", + } + logsRatioTimeWindowValueSchemaToProtoMap = ReverseMap(logsRatioTimeWindowValueProtoToSchemaMap) + validLogsRatioTimeWindowValues = GetKeys(logsRatioTimeWindowValueSchemaToProtoMap) + + logsRatioGroupByForProtoToSchemaMap = map[cxsdk.LogsRatioGroupByFor]string{ + cxsdk.LogsRatioGroupByForBothOrUnspecified: "Both", + cxsdk.LogsRatioGroupByForNumeratorOnly: "Numerator Only", + cxsdk.LogsRatioGroupByForDenumeratorOnly: "Denominator Only", + } + logsRatioGroupByForSchemaToProtoMap = ReverseMap(logsRatioGroupByForProtoToSchemaMap) + validLogsRatioGroupByFor = GetKeys(logsRatioGroupByForSchemaToProtoMap) + + logsNewValueTimeWindowValueProtoToSchemaMap = map[cxsdk.LogsNewValueTimeWindowValue]string{ + cxsdk.LogsNewValueTimeWindowValue12HoursOrUnspecified: "12_HOURS", + cxsdk.LogsNewValueTimeWindowValue24Hours: "24_HOURS", + cxsdk.LogsNewValueTimeWindowValue48Hours: "48_HOURS", + cxsdk.LogsNewValueTimeWindowValue72Hours: "72_HOURS", + cxsdk.LogsNewValueTimeWindowValue1Week: "1_WEEK", + cxsdk.LogsNewValueTimeWindowValue1Month: "1_MONTH", + cxsdk.LogsNewValueTimeWindowValue2Months: "2_MONTHS", + cxsdk.LogsNewValueTimeWindowValue_3Months: "3_MONTHS", + } + logsNewValueTimeWindowValueSchemaToProtoMap = ReverseMap(logsNewValueTimeWindowValueProtoToSchemaMap) + validLogsNewValueTimeWindowValues = GetKeys(logsNewValueTimeWindowValueSchemaToProtoMap) + + logsUniqueCountTimeWindowValueProtoToSchemaMap = map[cxsdk.LogsUniqueValueTimeWindowValue]string{ + cxsdk.LogsUniqueValueTimeWindowValue1MinuteOrUnspecified: "1_MINUTE", + cxsdk.LogsUniqueValueTimeWindowValue15Minutes: "5_MINUTES", + cxsdk.LogsUniqueValueTimeWindowValue20Minutes: "20_MINUTES", + cxsdk.LogsUniqueValueTimeWindowValue30Minutes: "30_MINUTES", + cxsdk.LogsUniqueValueTimeWindowValue1Hour: "1_HOUR", + cxsdk.LogsUniqueValueTimeWindowValue2Hours: "2_HOURS", + cxsdk.LogsUniqueValueTimeWindowValue4Hours: "4_HOURS", + cxsdk.LogsUniqueValueTimeWindowValue6Hours: "6_HOURS", + cxsdk.LogsUniqueValueTimeWindowValue12Hours: "12_HOURS", + cxsdk.LogsUniqueValueTimeWindowValue24Hours: "24_HOURS", + } + logsUniqueCountTimeWindowValueSchemaToProtoMap = ReverseMap(logsUniqueCountTimeWindowValueProtoToSchemaMap) + validLogsUniqueCountTimeWindowValues = GetKeys(logsUniqueCountTimeWindowValueSchemaToProtoMap) + + logsTimeRelativeComparedToProtoToSchemaMap = map[cxsdk.LogsTimeRelativeComparedTo]string{ + cxsdk.LogsTimeRelativeComparedToPreviousHourOrUnspecified: "Previous Hour", + cxsdk.LogsTimeRelativeComparedToSameHourYesterday: "Same Hour Yesterday", + cxsdk.LogsTimeRelativeComparedToSameHourLastWeek: "Same Hour Last Week", + cxsdk.LogsTimeRelativeComparedToYesterday: "Yesterday", + cxsdk.LogsTimeRelativeComparedToSameDayLastWeek: "Same Day Last Week", + cxsdk.LogsTimeRelativeComparedToSameDayLastMonth: "Same Day Last Month", + } + logsTimeRelativeComparedToSchemaToProtoMap = ReverseMap(logsTimeRelativeComparedToProtoToSchemaMap) + validLogsTimeRelativeComparedTo = GetKeys(logsTimeRelativeComparedToSchemaToProtoMap) + + metricFilterOperationTypeProtoToSchemaMap = map[cxsdk.MetricTimeWindowValue]string{ + cxsdk.MetricTimeWindowValue1MinuteOrUnspecified: "1_MINUTE", + cxsdk.MetricTimeWindowValue5Minutes: "5_MINUTES", + cxsdk.MetricTimeWindowValue10Minutes: "10_MINUTES", + cxsdk.MetricTimeWindowValue15Minutes: "15_MINUTES", + cxsdk.MetricTimeWindowValue30Minutes: "30_MINUTES", + cxsdk.MetricTimeWindowValue1Hour: "1_HOUR", + cxsdk.MetricTimeWindowValue2Hours: "2_HOURS", + cxsdk.MetricTimeWindowValue4Hours: "4_HOURS", + cxsdk.MetricTimeWindowValue6Hours: "6_HOURS", + cxsdk.MetricTimeWindowValue12Hours: "12_HOURS", + cxsdk.MetricTimeWindowValue24Hours: "24_HOURS", + } + metricTimeWindowValueSchemaToProtoMap = ReverseMap(metricFilterOperationTypeProtoToSchemaMap) + validMetricTimeWindowValues = GetKeys(metricTimeWindowValueSchemaToProtoMap) + + tracingTimeWindowProtoToSchemaMap = map[cxsdk.TracingTimeWindowValue]string{ + cxsdk.TracingTimeWindowValue5MinutesOrUnspecified: "5_MINUTES", + cxsdk.TracingTimeWindowValue10Minutes: "10_MINUTES", + cxsdk.TracingTimeWindowValue15Minutes: "15_MINUTES", + cxsdk.TracingTimeWindowValue30Minutes: "30_MINUTES", + cxsdk.TracingTimeWindowValue1Hour: "1_HOUR", + cxsdk.TracingTimeWindowValue2Hours: "2_HOURS", + cxsdk.TracingTimeWindowValue4Hours: "4_HOURS", + cxsdk.TracingTimeWindowValue6Hours: "6_HOURS", + cxsdk.TracingTimeWindowValue12Hours: "12_HOURS", + cxsdk.TracingTimeWindowValue24Hours: "24_HOURS", + cxsdk.TracingTimeWindowValue36Hours: "36_HOURS", + } + tracingTimeWindowSchemaToProtoMap = ReverseMap(tracingTimeWindowProtoToSchemaMap) + validTracingTimeWindow = GetKeys(tracingTimeWindowSchemaToProtoMap) + + tracingFilterOperationProtoToSchemaMap = map[cxsdk.TracingFilterOperationType]string{ + cxsdk.TracingFilterOperationTypeIsOrUnspecified: "IS", + cxsdk.TracingFilterOperationTypeIncludes: "NOT", + cxsdk.TracingFilterOperationTypeEndsWith: "ENDS_WITH", + cxsdk.TracingFilterOperationTypeStartsWith: "STARTS_WITH", + } + tracingFilterOperationSchemaToProtoMap = ReverseMap(tracingFilterOperationProtoToSchemaMap) + validTracingFilterOperations = GetKeys(tracingFilterOperationSchemaToProtoMap) + flowStageTimeFrameTypeProtoToSchemaMap = map[cxsdk.TimeframeType]string{ + cxsdk.TimeframeTypeUnspecified: "Unspecified", + cxsdk.TimeframeTypeUpTo: "Up To", + } + flowStageTimeFrameTypeSchemaToProtoMap = ReverseMap(flowStageTimeFrameTypeProtoToSchemaMap) + validFlowStageTimeFrameTypes = GetKeys(flowStageTimeFrameTypeSchemaToProtoMap) + + flowStagesGroupNextOpProtoToSchemaMap = map[cxsdk.NextOp]string{ + cxsdk.NextOpAndOrUnspecified: "AND", + cxsdk.NextOpOr: "OR", + } + flowStagesGroupNextOpSchemaToProtoMap = ReverseMap(flowStagesGroupNextOpProtoToSchemaMap) + validFlowStagesGroupNextOps = GetKeys(flowStagesGroupNextOpSchemaToProtoMap) + + flowStagesGroupAlertsOpProtoToSchemaMap = map[cxsdk.AlertsOp]string{ + cxsdk.AlertsOpAndOrUnspecified: "AND", + cxsdk.AlertsOpOr: "OR", + } + flowStagesGroupAlertsOpSchemaToProtoMap = ReverseMap(flowStagesGroupAlertsOpProtoToSchemaMap) + validFlowStagesGroupAlertsOps = GetKeys(flowStagesGroupAlertsOpSchemaToProtoMap) + + logsThresholdConditionMap = map[cxsdk.LogsThresholdConditionType]string{ + cxsdk.LogsThresholdConditionTypeMoreThanOrUnspecified: "MORE_THAN", + cxsdk.LogsThresholdConditionTypeLessThan: "LESS_THAN", + } + logsThresholdConditionValues = GetValues(logsThresholdConditionMap) + + logsRatioConditionConditionMap = map[cxsdk.LogsRatioConditionType]string{ + cxsdk.LogsRatioConditionTypeMoreThanOrUnspecified: "MORE_THAN", + cxsdk.LogsRatioConditionTypeLessThan: "LESS_THAN", + } + logsRatioConditionConditionMapValues = GetValues(logsRatioConditionConditionMap) ) -type alertParams struct { - Condition *alerts.AlertCondition - Filters *alerts.AlertFilters +func NewAlertResource() resource.Resource { + return &AlertResource{} } -type protoTimeFrameAndRelativeTimeFrame struct { - timeFrame alerts.Timeframe - relativeTimeFrame alerts.RelativeTimeframe +type AlertResource struct { + client *cxsdk.AlertsClient } -func resourceCoralogixAlert() *schema.Resource { - return &schema.Resource{ - CreateContext: resourceCoralogixAlertCreate, - ReadContext: resourceCoralogixAlertRead, - UpdateContext: resourceCoralogixAlertUpdate, - DeleteContext: resourceCoralogixAlertDelete, +type AlertResourceModel struct { + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Enabled types.Bool `tfsdk:"enabled"` + Priority types.String `tfsdk:"priority"` + Schedule types.Object `tfsdk:"schedule"` // AlertScheduleModel + Type types.Object `tfsdk:"type"` // AlertTypeDefinitionModel - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, + GroupBy types.Set `tfsdk:"group_by"` // []types.String + IncidentsSettings types.Object `tfsdk:"incidents_settings"` // IncidentsSettingsModel + NotificationGroup types.Object `tfsdk:"notification_group"` // NotificationGroupModel + Labels types.Map `tfsdk:"labels"` // map[string]string +} - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(60 * time.Second), - Read: schema.DefaultTimeout(30 * time.Second), - Update: schema.DefaultTimeout(60 * time.Second), - Delete: schema.DefaultTimeout(30 * time.Second), - }, +type AlertScheduleModel struct { + ActiveOn types.Object `tfsdk:"active_on"` // ActiveOnModel +} - Schema: AlertSchema(), +type AlertTypeDefinitionModel struct { + LogsImmediate types.Object `tfsdk:"logs_immediate"` // LogsImmediateModel + LogsThreshold types.Object `tfsdk:"logs_threshold"` // LogsThresholdModel + LogsUnusual types.Object `tfsdk:"logs_unusual"` // LogsUnusualModel + LogsRatioThreshold types.Object `tfsdk:"logs_ratio_threshold"` // LogsRatioThresholdModel + LogsNewValue types.Object `tfsdk:"logs_new_value"` // LogsNewValueModel + LogsUniqueCount types.Object `tfsdk:"logs_unique_count"` // LogsUniqueCountModel + LogsTimeRelativeThreshold types.Object `tfsdk:"logs_time_relative_threshold"` // LogsTimeRelativeThresholdModel + LogsMetricThreshold types.Object `tfsdk:"logs_metric_threshold"` // LogsMetricThresholdModel + LogsMetricUnusual types.Object `tfsdk:"logs_metric_unusual"` // LogsMetricUnusualModel + LogsTracingImmediate types.Object `tfsdk:"logs_tracing_immediate"` // LogsTracingImmediateModel + LogsTracingThreshold types.Object `tfsdk:"logs_tracing_threshold"` // LogsTracingThresholdModel +} - Description: "Coralogix alert. More info: https://coralogix.com/docs/alerts-api/ .", - } +type IncidentsSettingsModel struct { + NotifyOn types.String `tfsdk:"notify_on"` + RetriggeringPeriod types.Object `tfsdk:"retriggering_period"` // RetriggeringPeriodModel } -func AlertSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: true, - Description: "Determines whether the alert will be active. True by default.", - }, - "name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsNotEmpty, - Description: "Alert name.", - }, - "description": { - Type: schema.TypeString, - Optional: true, - Description: "Alert description.", - }, - "severity": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidSeverities, false), - Description: fmt.Sprintf("Determines the alert's severity. Can be one of %q", alertValidSeverities), - }, - "meta_labels": { - Type: schema.TypeMap, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Optional: true, - Description: "Labels allow you to easily filter by alert type and create views. Insert a new label or use an existing one. You can nest a label using key:value.", - ValidateDiagFunc: validation.MapKeyMatch(regexp.MustCompile(`^[A-Za-z\d_-]*$`), "not valid key for meta_label"), - }, - "expiration_date": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "day": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, 31), - Description: `Day of a month. Must be from 1 to 31 and valid for the year and month.`, - }, - "month": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, 12), - Description: `Month of a year. Must be from 1 to 12.`, - }, - "year": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, 9999), - Description: `Year of the date. Must be from 1 to 9999.`, - }, - }, - }, - Description: "The expiration date of the alert (if declared).", - }, - "notifications_group": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: notificationGroupSchema(), - Set: schema.HashResource(notificationGroupSchema()), - Description: "Defines notifications settings over list of group-by keys (or on empty list).", - }, - "payload_filters": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "A list of log fields out of the log example which will be included with the alert notification.", - Set: schema.HashString, - }, - "incident_settings": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "retriggering_period_minutes": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(1), - }, - "notify_on": { - Type: schema.TypeString, - Optional: true, - Default: "Triggered_only", - ValidateFunc: validation.StringInSlice(validNotifyOn, false), - Description: fmt.Sprintf("Defines the alert's triggering logic. Can be one of %q. Triggered_and_resolved conflicts with new_value, unique_count and flow alerts, and with immediately and more_than_usual conditions", validNotifyOn), - }, - }, - }, - //AtLeastOneOf: []string{"notifications_group", "show_in_insights", "incident_settings"}, - }, - "scheduling": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: schedulingSchema(), - }, - MaxItems: 1, - Description: "Limit the triggering of this alert to specific time frames. Active always by default.", - }, - "standard": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: standardSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert based on number of log occurrences.", - }, - "ratio": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: ratioSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert based on the ratio between queries.", - }, - "new_value": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: newValueSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert on never before seen log value.", - }, - "unique_count": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: uniqueCountSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert based on unique value count per key.", - }, - "time_relative": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: timeRelativeSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert based on ratio between timeframes.", - }, - "metric": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: metricSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert based on arithmetic operators for metrics.", - }, - "tracing": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: tracingSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert based on tracing latency.", - }, - "flow": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: flowSchema(), - }, - MaxItems: 1, - ExactlyOneOf: validAlertTypes, - Description: "Alert based on a combination of alerts in a specific timeframe.", - }, - } +type NotificationGroupModel struct { + GroupByFields types.List `tfsdk:"group_by_fields"` // []types.String + AdvancedTargetSettings types.Set `tfsdk:"advanced_target_settings"` // AdvancedTargetSettingsModel + SimpleTargetSettings types.Set `tfsdk:"simple_target_settings"` // SimpleTargetSettingsModel } -func notificationGroupSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "group_by_fields": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "List of group-by fields to apply the notification logic on (can be empty). Every notification should contain unique group_by_fields permutation (the order doesn't matter).", - }, - "notification": { - Type: schema.TypeSet, - Optional: true, - Elem: notificationSubgroupSchema(), - Set: schema.HashResource(notificationSubgroupSchema()), - Description: "Defines notification logic with optional recipients. Can contain single webhook or email recipients list.", - }, - }, - } +type AdvancedTargetSettingsModel struct { + RetriggeringPeriod types.Object `tfsdk:"retriggering_period"` // RetriggeringPeriodModel + NotifyOn types.String `tfsdk:"notify_on"` + IntegrationID types.String `tfsdk:"integration_id"` + Recipients types.Set `tfsdk:"recipients"` //[]types.String } -func notificationSubgroupSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "retriggering_period_minutes": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1), - Description: "By default, retriggering_period_minutes will be populated with min for immediate," + - " more_than and more_than_usual alerts. For less_than alert it will be populated with the chosen time" + - " frame for the less_than condition (in minutes). You may choose to change the suppress window so the " + - "alert will be suppressed for a longer period.", - ExactlyOneOf: []string{"incident_settings"}, - }, - "notify_on": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(validNotifyOn, false), - Description: fmt.Sprintf("Defines the alert's triggering logic. Can be one of %q. Triggered_and_resolved conflicts with new_value, unique_count and flow alerts, and with immediately and more_than_usual conditions", validNotifyOn), - ExactlyOneOf: []string{"incident_settings"}, - }, - "integration_id": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringIsNotEmpty, - Description: "Conflicts with emails.", - }, - "email_recipients": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - //ValidateDiagFunc: mailValidationFunc(), - }, - Set: schema.HashString, - Description: "Conflicts with integration_id.", - }, - }, - } +type SimpleTargetSettingsModel struct { + IntegrationID types.String `tfsdk:"integration_id"` + Recipients types.Set `tfsdk:"recipients"` //[]types.String } -func schedulingSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "time_zone": { - Type: schema.TypeString, - Optional: true, - Default: "UTC+0", - ValidateFunc: validation.StringInSlice(validTimeZones, false), - Description: fmt.Sprintf("Specifies the time zone to be used in interpreting the schedule. Can be one of %q", validTimeZones), - }, - "time_frame": { - Type: schema.TypeSet, - MaxItems: 1, - Required: true, - Elem: timeFrames(), - Set: hashTimeFrames(), - Description: "time_frame is a set of days and hours when the alert will be active. ***Currently, supported only for one time_frame***", - }, - } +type ActiveOnModel struct { + DaysOfWeek types.List `tfsdk:"days_of_week"` // []types.String + StartTime types.Object `tfsdk:"start_time"` // TimeOfDayModel + EndTime types.Object `tfsdk:"end_time"` // TimeOfDayModel } -func timeFrames() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "days_enabled": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(alertValidDaysOfWeek, false), - }, - Description: fmt.Sprintf("Days of week. Can be one of %q", alertValidDaysOfWeek), - Set: schema.HashString, - }, - "start_time": timeInDaySchema(`Limit the triggering of this alert to start at specific hour.`), - "end_time": timeInDaySchema(`Limit the triggering of this alert to end at specific hour.`), - }, +type TimeOfDayModel struct { + Hours types.Int64 `tfsdk:"hours"` + Minutes types.Int64 `tfsdk:"minutes"` +} + +type RetriggeringPeriodModel struct { + Minutes types.Int64 `tfsdk:"minutes"` +} + +// Alert Types: + +type LogsImmediateModel struct { + LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String +} + +type LogsThresholdModel struct { + Rules types.List `tfsdk:"rules"` // []RuleModel + LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String + UndetectedValuesManagement types.Object `tfsdk:"undetected_values_management"` // UndetectedValuesManagementModel +} + +type LogsUnusualModel struct { + Rules types.List `tfsdk:"rules"` // []RuleModel + LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String +} + +type LogsRatioThresholdModel struct { + Rules types.List `tfsdk:"rules"` // []RuleModel + Numerator types.Object `tfsdk:"numerator"` // AlertsLogsFilterModel + NumeratorAlias types.String `tfsdk:"numerator_alias"` + Denominator types.Object `tfsdk:"denominator"` // AlertsLogsFilterModel + DenominatorAlias types.String `tfsdk:"denominator_alias"` + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String + GroupByFor types.String `tfsdk:"group_by_for"` +} + +type LogsNewValueModel struct { + Rules types.List `tfsdk:"rules"` // []NewValueRulesModel + LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String +} + +type LogsUniqueCountModel struct { + Rules types.List `tfsdk:"rules"` // []LogsUniqueCountRulesModel + LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String +} + +type LogsUniqueCountRulesModel struct { + MaxUniqueCountPerGroupByKey types.Int64 `tfsdk:"max_unique_count_per_group_by_key"` + MaxUniqueCount types.Int64 `tfsdk:"max_unique_count"` + TimeWindow types.Object `tfsdk:"time_window"` // LogsUniqueCountTimeWindowModel + UniqueCountKeypath types.String `tfsdk:"unique_count_keypath"` +} + +type LogsTimeRelativeThresholdModel struct { + Rules types.List `tfsdk:"rules"` // []RuleModel + LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String + UndetectedValuesManagement types.Object `tfsdk:"undetected_values_management"` // UndetectedValuesManagementModel +} + +type MetricThresholdModel struct { + Rules types.List `tfsdk:"rules"` // []MetricRule + MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel + UndetectedValuesManagement types.Object `tfsdk:"undetected_values_management"` // UndetectedValuesManagementModel +} + +type MetricRule struct { + Threshold types.Float64 `tfsdk:"threshold"` + ForOverPct types.Int64 `tfsdk:"for_over_pct"` + OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel + Condition types.String `tfsdk:"condition"` + MinNonNullValuesPct types.Int32 `tfsdk:"min_non_null_values_pct"` + MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel +} + +// DELETEME +// type MetricLessThanModel struct { +// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel +// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel +// MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel +// Threshold types.Float64 `tfsdk:"threshold"` +// ForOverPct types.Int64 `tfsdk:"for_over_pct"` +// } + +type MetricUnusualModel struct { + MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel + Rules types.List `tfsdk:"rules"` // []MetricRule +} + +type MetricImmediateModel struct { + MetricFilter types.Object `tfsdk:"metric_filter"` // TracingFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String +} + +type TracingImmediateModel struct { + TracingFilter types.Object `tfsdk:"tracing_filter"` // TracingFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String +} + +type TracingThresholdModel struct { + TracingFilter types.Object `tfsdk:"tracing_filter"` // TracingFilterModel + NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String + TimeWindow types.Object `tfsdk:"time_window"` // TracingTimeWindowModel + SpanAmount types.Int64 `tfsdk:"span_amount"` +} + +type FlowModel struct { + Stages types.List `tfsdk:"stages"` // FlowStageModel + EnforceSuppression types.Bool `tfsdk:"enforce_suppression"` +} + +type FlowStageModel struct { + FlowStagesGroups types.List `tfsdk:"flow_stages_groups"` // FlowStagesGroupModel + TimeframeMs types.Int64 `tfsdk:"timeframe_ms"` + TimeframeType types.String `tfsdk:"timeframe_type"` +} + +type FlowStagesGroupModel struct { + AlertDefs types.List `tfsdk:"alert_defs"` // FlowStagesGroupsAlertDefsModel + NextOp types.String `tfsdk:"next_op"` + AlertsOp types.String `tfsdk:"alerts_op"` +} + +type FlowStagesGroupsAlertDefsModel struct { + Id types.String `tfsdk:"id"` + Not types.Bool `tfsdk:"not"` +} + +// DELETEME +// type MetricLessThanUsualModel struct { +// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel +// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel +// Threshold types.Int64 `tfsdk:"threshold"` +// ForOverPct types.Int64 `tfsdk:"for_over_pct"` +// MinNonNullValuesPct types.Int64 `tfsdk:"min_non_null_values_pct"` +// } + +// type MetricMoreThanOrEqualsModel struct { +// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel +// Threshold types.Float64 `tfsdk:"threshold"` +// ForOverPct types.Int64 `tfsdk:"for_over_pct"` +// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel +// MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel +// } + +// type MetricLessThanOrEqualsModel struct { +// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel +// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel +// MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel +// Threshold types.Float64 `tfsdk:"threshold"` +// ForOverPct types.Int64 `tfsdk:"for_over_pct"` // MetricMissingValuesModel +// UndetectedValuesManagement types.Object `tfsdk:"undetected_values_management"` // UndetectedValuesManagementModel +// } + +type AlertsLogsFilterModel struct { + SimpleFilter types.Object `tfsdk:"simple_filter"` // LuceneFilterModel +} + +type LogsTimeWindowModel struct { + SpecificValue types.String `tfsdk:"specific_value"` +} + +type SimpleFilterModel struct { + LuceneQuery types.String `tfsdk:"lucene_query"` + LabelFilters types.Object `tfsdk:"label_filters"` // LabelFiltersModel +} + +type LabelFiltersModel struct { + ApplicationName types.Set `tfsdk:"application_name"` // LabelFilterTypeModel + SubsystemName types.Set `tfsdk:"subsystem_name"` // LabelFilterTypeModel + Severities types.Set `tfsdk:"severities"` // []types.String +} + +type LabelFilterTypeModel struct { + Value types.String `tfsdk:"value"` + Operation types.String `tfsdk:"operation"` +} + +type NotificationPayloadFilterModel struct { + Filter types.String `tfsdk:"filter"` +} + +type UndetectedValuesManagementModel struct { + TriggerUndetectedValues types.Bool `tfsdk:"trigger_undetected_values"` + AutoRetireTimeframe types.String `tfsdk:"auto_retire_timeframe"` +} + +type LogsRatioTimeWindowModel struct { + SpecificValue types.String `tfsdk:"specific_value"` +} + +type LogsNewValueTimeWindowModel struct { + SpecificValue types.String `tfsdk:"specific_value"` +} + +type LogsUniqueCountTimeWindowModel struct { + SpecificValue types.String `tfsdk:"specific_value"` +} + +type MetricFilterModel struct { + Promql types.String `tfsdk:"promql"` +} + +type MetricTimeWindowModel struct { + SpecificValue types.String `tfsdk:"specific_value"` +} + +type MetricMissingValuesModel struct { + ReplaceWithZero types.Bool `tfsdk:"replace_with_zero"` + MinNonNullValuesPct types.Int64 `tfsdk:"min_non_null_values_pct"` +} + +type NewValueRulesModel struct { + TimeWindow types.Object `tfsdk:"time_window"` // LogsTimeWindowModel + KeypathToTrack types.String `tfsdk:"keypath_to_track"` +} + +type RuleModel struct { + CompareTo types.String `tfsdk:"compare_to"` + Condition types.String `tfsdk:"condition"` + Threshold types.Float64 `tfsdk:"threshold"` + TimeWindow types.Object `tfsdk:"time_window"` // LogsTimeWindowModel + IgnoreInfinity types.Bool `tfsdk:"ignore_infinity"` +} + +type TracingFilterModel struct { + LatencyThresholdMs types.Int64 `tfsdk:"latency_threshold_ms"` + TracingLabelFilters types.Object `tfsdk:"tracing_label_filters"` // TracingLabelFiltersModel +} + +type TracingLabelFiltersModel struct { + ApplicationName types.Set `tfsdk:"application_name"` // TracingFilterTypeModel + SubsystemName types.Set `tfsdk:"subsystem_name"` // TracingFilterTypeModel + ServiceName types.Set `tfsdk:"service_name"` // TracingFilterTypeModel + OperationName types.Set `tfsdk:"operation_name"` // TracingFilterTypeModel + SpanFields types.Set `tfsdk:"span_fields"` // TracingSpanFieldsFilterModel +} + +type TracingFilterTypeModel struct { + Values types.Set `tfsdk:"values"` // []types.String + Operation types.String `tfsdk:"operation"` +} + +type TracingSpanFieldsFilterModel struct { + Key types.String `tfsdk:"key"` + FilterType types.Object `tfsdk:"filter_type"` // TracingFilterTypeModel +} + +type TracingTimeWindowModel struct { + SpecificValue types.String `tfsdk:"specific_value"` +} + +func (r *AlertResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_alert" +} + +func (r *AlertResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return } + + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *cxsdk.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + r.client = clientSet.Alerts() } -func hashTimeFrames() schema.SchemaSetFunc { - return schema.HashResource(timeFrames()) +type advancedTargetSettingsPlanModifier struct{} + +func (a advancedTargetSettingsPlanModifier) Description(ctx context.Context) string { + return "Advanced target settings." } -func commonAlertSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "search_query": searchQuerySchema(), - "severities": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(alertValidLogSeverities, false), - }, - Description: fmt.Sprintf("An array of log severities that we interested in. Can be one of %q", alertValidLogSeverities), - Set: schema.HashString, - }, - "applications": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "An array that contains log’s application names that we want to be alerted on." + - " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - Set: schema.HashString, - }, - "subsystems": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "An array that contains log’s subsystem names that we want to be notified on. " + - "Subsystems can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - Set: schema.HashString, - }, - "categories": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "An array that contains log’s categories that we want to be notified on.", - Set: schema.HashString, - }, - "computers": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "An array that contains log’s computer names that we want to be notified on.", - Set: schema.HashString, - }, - "classes": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "An array that contains log’s class names that we want to be notified on.", - Set: schema.HashString, - }, - "methods": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "An array that contains log’s method names that we want to be notified on.", - Set: schema.HashString, - }, - "ip_addresses": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "An array that contains log’s IP addresses that we want to be notified on.", - Set: schema.HashString, - }, +func (a advancedTargetSettingsPlanModifier) MarkdownDescription(ctx context.Context) string { + return "Advanced target settings." +} + +func (a advancedTargetSettingsPlanModifier) PlanModifyObject(ctx context.Context, request planmodifier.ObjectRequest, response *planmodifier.ObjectResponse) { + if !request.ConfigValue.IsUnknown() { + return } + + response.PlanValue = request.StateValue } -func searchQuerySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Description: "The search_query that we wanted to be notified on.", +type requiredWhenGroupBySet struct { +} + +func (r requiredWhenGroupBySet) Description(ctx context.Context) string { + return "Required when group_by is set." +} + +func (r requiredWhenGroupBySet) MarkdownDescription(ctx context.Context) string { + return "Required when group_by is set." +} + +func (r requiredWhenGroupBySet) ValidateInt64(ctx context.Context, req validator.Int64Request, resp *validator.Int64Response) { + if !req.ConfigValue.IsNull() { + return + } + + var groupBy types.Set + diags := req.Config.GetAttribute(ctx, path.Root("group_by"), &groupBy) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + if !(groupBy.IsNull() || groupBy.IsUnknown()) { + resp.Diagnostics.Append(validatordiag.InvalidAttributeCombinationDiagnostic( + req.Path, + fmt.Sprintf("Attribute %q must be specified when %q is specified", req.Path, "group_by"), + )) } } -func standardSchema() map[string]*schema.Schema { - standardSchema := commonAlertSchema() - standardSchema["condition"] = &schema.Schema{ - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "immediately": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"standard.0.condition.0.immediately", - "standard.0.condition.0.more_than", - "standard.0.condition.0.less_than", - "standard.0.condition.0.more_than_usual"}, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than or more_than_usual.", - }, - "less_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"standard.0.condition.0.immediately", - "standard.0.condition.0.more_than", - "standard.0.condition.0.less_than", - "standard.0.condition.0.more_than_usual"}, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than or more_than_usual.", - RequiredWith: []string{"standard.0.condition.0.time_window", "standard.0.condition.0.threshold"}, - }, - "more_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"standard.0.condition.0.immediately", - "standard.0.condition.0.more_than", - "standard.0.condition.0.less_than", - "standard.0.condition.0.more_than_usual"}, - RequiredWith: []string{"standard.0.condition.0.time_window", "standard.0.condition.0.threshold"}, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than or more_than_usual.", +func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Version: 1, + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), }, - "more_than_usual": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"standard.0.condition.0.immediately", - "standard.0.condition.0.more_than", - "standard.0.condition.0.less_than", - "standard.0.condition.0.more_than_usual"}, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than or more_than_usual.", - }, - "threshold": { - Type: schema.TypeInt, - Optional: true, - ConflictsWith: []string{"standard.0.condition.0.immediately"}, - Description: "The number of log occurrences that is needed to trigger the alert.", - }, - "time_window": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(alertValidTimeFrames, false), - ConflictsWith: []string{"standard.0.condition.0.immediately"}, - Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidTimeFrames), - }, - "group_by": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - ConflictsWith: []string{"standard.0.condition.0.immediately"}, - Description: "The fields to 'group by' on. In case of immediately = true switch to group_by_key.", + MarkdownDescription: "Alert ID.", + }, + "name": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), }, - "group_by_key": { - Type: schema.TypeString, - Optional: true, - ConflictsWith: []string{"standard.0.condition.0.more_than", "standard.0.condition.0.less_than", "standard.0.condition.0.more_than_usual"}, - Description: "The key to 'group by' on. When immediately = true, 'group_by_key' (single string) can be set instead of 'group_by'.", + MarkdownDescription: "Alert name.", + }, + "description": schema.StringAttribute{ + Optional: true, + MarkdownDescription: "Alert description.", + }, + "enabled": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), + MarkdownDescription: "Alert enabled status. True by default.", + }, + "priority": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validAlertPriorities...), }, - "manage_undetected_values": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_triggering_on_undetected_values": { - Type: schema.TypeBool, + MarkdownDescription: fmt.Sprintf("Alert priority. Valid values: %q.", validAlertPriorities), + }, + "schedule": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "active_on": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "days_of_week": schema.ListAttribute{ Required: true, - Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", - }, - "auto_retire_ratio": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), - Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + ElementType: types.StringType, + Validators: []validator.List{ + listvalidator.ValueStringsAre( + stringvalidator.OneOf(validDaysOfWeek...), + ), + }, + MarkdownDescription: fmt.Sprintf("Days of the week. Valid values: %q.", validDaysOfWeek), }, + "start_time": timeOfDaySchema(), + "end_time": timeOfDaySchema(), }, }, - RequiredWith: []string{"standard.0.condition.0.less_than", "standard.0.condition.0.group_by"}, - Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", - }, - "evaluation_window": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(validEvaluationWindow, false), - RequiredWith: []string{"standard.0.condition.0.more_than"}, - Description: fmt.Sprintf("Defines the evaluation-window logic to determine if the threshold has been crossed. Relevant only for more_than condition. Can be one of %q.", validEvaluationWindow), }, + MarkdownDescription: "Alert schedule. Will be activated all the time if not specified.", }, - }, - Description: "Defines the conditions for triggering and notify by the alert", - } - return standardSchema -} - -func ratioSchema() map[string]*schema.Schema { - query1Schema := commonAlertSchema() - query1Schema["alias"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "Query 1", - Description: "Query1 alias.", - } - - return map[string]*schema.Schema{ - "query_1": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: query1Schema, - }, - }, - "query_2": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alias": { - Type: schema.TypeString, - Optional: true, - Default: "Query 2", - Description: "Query2 alias.", - }, - "search_query": searchQuerySchema(), - "severities": { - Type: schema.TypeSet, + "type_definition": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "logs_immediate": schema.SingleNestedAttribute{ Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(alertValidLogSeverities, false), + Attributes: map[string]schema.Attribute{ + "logs_filter": logsFilterSchema(), + "notification_payload_filter": notificationPayloadFilterSchema(), }, - Description: fmt.Sprintf("An array of log severities that we interested in. Can be one of %q", alertValidLogSeverities), - Set: schema.HashString, - }, - "applications": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Validators: []validator.Object{ + objectvalidator.ExactlyOneOf( + path.MatchRelative().AtParent().AtName("logs_threshold"), + path.MatchRelative().AtParent().AtName("logs_unusual"), + path.MatchRelative().AtParent().AtName("logs_ratio_threshold"), + path.MatchRelative().AtParent().AtName("logs_new_value"), + path.MatchRelative().AtParent().AtName("logs_unique_count"), + path.MatchRelative().AtParent().AtName("logs_time_relative_threshold"), + path.MatchRelative().AtParent().AtName("metric_threshold"), + path.MatchRelative().AtParent().AtName("metric_unusual"), + path.MatchRelative().AtParent().AtName("tracing_immediate"), + path.MatchRelative().AtParent().AtName("tracing_threshold"), + path.MatchRelative().AtParent().AtName("flow"), + ), }, - Description: "An array that contains log’s application names that we want to be alerted on." + - " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - Set: schema.HashString, }, - "subsystems": { - Type: schema.TypeSet, + "logs_threshold": schema.SingleNestedAttribute{ Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Attributes: map[string]schema.Attribute{ + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.LengthAtLeast(1)}, + Attributes: map[string]schema.Attribute{ + "threshold": schema.Float64Attribute{ + Required: true, + }, + "time_window": logsTimeWindowSchema(), + "condition": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(logsThresholdConditionValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", logsThresholdConditionValues), + }, + }, + "notification_payload_filter": notificationPayloadFilterSchema(), + "logs_filter": logsFilterSchema(), + // "evaluation_window": schema.StringAttribute{ + // Optional: true, + // Computed: true, + // Default: stringdefault.StaticString("Rolling"), + // Validators: []validator.String{ + // stringvalidator.OneOf(validEvaluationWindowTypes...), + // }, + // MarkdownDescription: fmt.Sprintf("Evaluation window type. Valid values: %q.", validEvaluationWindowTypes), + // }, }, - Description: "An array that contains log’s subsystem names that we want to be notified on. " + - "Subsystems can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - Set: schema.HashString, }, - }, - }, - }, - "condition": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "more_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"ratio.0.condition.0.more_than", "ratio.0.condition.0.less_than"}, - Description: "Determines the condition operator." + - " Must be one of - less_than or more_than.", - }, - "less_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"ratio.0.condition.0.more_than", "ratio.0.condition.0.less_than"}, + // "logs_less_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "logs_filter": logsFilterSchema(), + // "notification_payload_filter": notificationPayloadFilterSchema(), + // "time_window": logsTimeWindowSchema(), + // "threshold": schema.Int64Attribute{ + // Required: true, + // }, + // "undetected_values_management": undetectedValuesManagementSchema(), + // }, + // }, + // "logs_more_than_usual": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "minimum_threshold": schema.Int64Attribute{ + // Required: true, + // }, + // "time_window": logsTimeWindowSchema(), + // "logs_filter": logsFilterSchema(), + // "notification_payload_filter": notificationPayloadFilterSchema(), + // }, + // }, + // "logs_ratio_more_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "numerator_logs_filter": logsFilterSchema(), + // "numerator_alias": schema.StringAttribute{ + // Required: true, + // }, + // "denominator_logs_filter": logsFilterSchema(), + // "denominator_alias": schema.StringAttribute{ + // Required: true, + // }, + // "threshold": schema.Int64Attribute{ + // Required: true, + // }, + // "time_window": logsRatioTimeWindowSchema(), + // "ignore_infinity": schema.BoolAttribute{ + // Optional: true, + // Computed: true, + // Default: booldefault.StaticBool(false), + // }, + // "notification_payload_filter": notificationPayloadFilterSchema(), + // "group_by_for": logsRatioGroupByForSchema(), + // }, + // }, + // "logs_ratio_less_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "numerator_logs_filter": logsFilterSchema(), + // "numerator_alias": schema.StringAttribute{ + // Required: true, + // }, + // "denominator_logs_filter": logsFilterSchema(), + // "denominator_alias": schema.StringAttribute{ + // Required: true, + // }, + // "threshold": schema.Int64Attribute{ + // Required: true, + // }, + // "time_window": logsRatioTimeWindowSchema(), + // "ignore_infinity": schema.BoolAttribute{ + // Optional: true, + // Computed: true, + // Default: booldefault.StaticBool(false), + // }, + // "notification_payload_filter": notificationPayloadFilterSchema(), + // "group_by_for": logsRatioGroupByForSchema(), + // "undetected_values_management": undetectedValuesManagementSchema(), + // }, + // }, + // "logs_new_value": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "logs_filter": logsFilterSchema(), + // "keypath_to_track": schema.StringAttribute{Required: true}, + // "time_window": logsNewValueTimeWindowSchema(), + // "notification_payload_filter": notificationPayloadFilterSchema(), + // }, + // Validators: []validator.Object{ + // objectvalidator.ConflictsWith(path.MatchRoot("group_by")), + // }, + // }, + // "logs_unique_count": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "logs_filter": logsFilterSchema(), + // "notification_payload_filter": notificationPayloadFilterSchema(), + // "time_window": logsUniqueCountTimeWindowSchema(), + // "unique_count_keypath": schema.StringAttribute{Required: true}, + // "max_unique_count": schema.Int64Attribute{Required: true}, + // "max_unique_count_per_group_by_key": schema.Int64Attribute{ + // Optional: true, + // Validators: []validator.Int64{ + // int64validator.AlsoRequires(path.MatchRoot("group_by")), + // requiredWhenGroupBySet{}, + // }, + // }, + // }, + // }, + // "logs_time_relative_more_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "logs_filter": logsFilterSchema(), + // "notification_payload_filter": notificationPayloadFilterSchema(), + // "threshold": schema.Int64Attribute{Required: true}, + // "compared_to": timeRelativeCompareTo(), + // "ignore_infinity": schema.BoolAttribute{ + // Optional: true, + // Computed: true, + // Default: booldefault.StaticBool(false), + // }, + // }, + // }, + // "logs_time_relative_less_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "logs_filter": logsFilterSchema(), + // "threshold": schema.Int64Attribute{Required: true}, + // "notification_payload_filter": notificationPayloadFilterSchema(), + // "compared_to": schema.StringAttribute{ + // Required: true, + // Validators: []validator.String{ + // stringvalidator.OneOf(validLogsTimeRelativeComparedTo...), + // }, + // MarkdownDescription: fmt.Sprintf("Compared to. Valid values: %q.", validLogsTimeRelativeComparedTo), + // }, + // "ignore_infinity": schema.BoolAttribute{ + // Optional: true, + // Computed: true, + // Default: booldefault.StaticBool(false), + // }, + // "undetected_values_management": undetectedValuesManagementSchema(), + // }, + // }, + // "metric_more_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "metric_filter": metricFilterSchema(), + // "threshold": schema.Float64Attribute{ + // Required: true, + // }, + // "for_over_pct": schema.Int64Attribute{ + // Required: true, + // }, + // "of_the_last": metricTimeWindowSchema(), + // "missing_values": missingValuesSchema(), + // }, + // }, + // "metric_less_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "metric_filter": metricFilterSchema(), + // "threshold": schema.Float64Attribute{ + // Required: true, + // }, + // "for_over_pct": schema.Int64Attribute{ + // Required: true, + // }, + // "of_the_last": metricTimeWindowSchema(), + // "missing_values": missingValuesSchema(), + // "undetected_values_management": undetectedValuesManagementSchema(), + // }, + // }, + // "metric_less_than_usual": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "metric_filter": metricFilterSchema(), + // "of_the_last": metricTimeWindowSchema(), + // "threshold": schema.Int64Attribute{ + // Required: true, + // }, + // "for_over_pct": schema.Int64Attribute{ + // Required: true, + // }, + // "min_non_null_values_pct": schema.Int64Attribute{ + // Required: true, + // }, + // }, + // }, + // "metric_more_than_usual": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "metric_filter": metricFilterSchema(), + // "of_the_last": metricTimeWindowSchema(), + // "threshold": schema.Int64Attribute{ + // Required: true, + // }, + // "for_over_pct": schema.Int64Attribute{ + // Required: true, + // }, + // "min_non_null_values_pct": schema.Int64Attribute{ + // Required: true, + // }, + // }, + // }, + // "metric_more_than_or_equals": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "metric_filter": metricFilterSchema(), + // "threshold": schema.Float64Attribute{ + // Required: true, + // }, + // "for_over_pct": schema.Int64Attribute{ + // Required: true, + // }, + // "of_the_last": metricTimeWindowSchema(), + // "missing_values": missingValuesSchema(), + // }, + // }, + // "metric_less_than_or_equals": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "metric_filter": metricFilterSchema(), + // "threshold": schema.Float64Attribute{ + // Required: true, + // }, + // "for_over_pct": schema.Int64Attribute{ + // Required: true, + // }, + // "of_the_last": metricTimeWindowSchema(), + // "missing_values": missingValuesSchema(), + // "undetected_values_management": undetectedValuesManagementSchema(), + // }, + // }, + // "tracing_immediate": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "tracing_filter": tracingQuerySchema(), + // "notification_payload_filter": notificationPayloadFilterSchema(), + // }, + // }, + // "tracing_more_than": schema.SingleNestedAttribute{ + // Optional: true, + // Attributes: map[string]schema.Attribute{ + // "tracing_filter": tracingQuerySchema(), + // "notification_payload_filter": notificationPayloadFilterSchema(), + // "time_window": tracingTimeWindowSchema(), + // "span_amount": schema.Int64Attribute{ + // Required: true, + // }, + // }, + // }, + "flow": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "stages": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "flow_stages_groups": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "alert_defs": schema.ListNestedAttribute{ + Required: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + Required: true, + }, + "not": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + }, + }, + }, + "next_op": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validFlowStagesGroupNextOps...), + }, + MarkdownDescription: fmt.Sprintf("Next operation. Valid values: %q.", validFlowStagesGroupNextOps), + }, + "alerts_op": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validFlowStagesGroupAlertsOps...), + }, + MarkdownDescription: fmt.Sprintf("Alerts operation. Valid values: %q.", validFlowStagesGroupAlertsOps), + }, + }, + }, + }, + "timeframe_ms": schema.Int64Attribute{ + Optional: true, + Computed: true, + Default: int64default.StaticInt64(0), + }, + "timeframe_type": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validFlowStageTimeFrameTypes...), + }, + }, + }, + }, + }, + "enforce_suppression": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + }, }, - "ratio_threshold": { - Type: schema.TypeFloat, - Required: true, - Description: "The ratio(between the queries) threshold that is needed to trigger the alert.", + }, + MarkdownDescription: "Alert type definition. Exactly one of the following must be specified: logs_immediate, logs_more_than, logs_less_than, logs_more_than_usual, logs_ratio_more_than, logs_ratio_less_than, logs_new_value, logs_unique_count, logs_time_relative_more_than, logs_time_relative_less_than, metric_more_than, metric_less_than, metric_more_than_usual, metric_less_than_usual, metric_less_than_or_equals, metric_more_than_or_equals, tracing_immediate, tracing_more_than, flow.", + }, + "group_by": schema.SetAttribute{ + Optional: true, + ElementType: types.StringType, + MarkdownDescription: "Group by fields.", + }, + "incidents_settings": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "notify_on": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validNotifyOn...), + }, + MarkdownDescription: fmt.Sprintf("Notify on. Valid values: %q.", validNotifyOn), }, - "time_window": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidTimeFrames, false), - Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidTimeFrames), + "retriggering_period": schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "minutes": schema.Int64Attribute{ + Required: true, + }, + }, }, - "ignore_infinity": { - Type: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"ratio.0.condition.0.less_than"}, - Description: "Not triggered when threshold is infinity (divided by zero).", + }, + }, + "notification_group": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Default: objectdefault.StaticValue(types.ObjectValueMust(notificationGroupAttr(), map[string]attr.Value{ + "group_by_fields": types.ListNull(types.StringType), + "advanced_target_settings": types.SetNull(types.ObjectType{ + AttrTypes: advancedTargetSettingsAttr(), + }), + "simple_target_settings": types.SetNull(types.ObjectType{ + AttrTypes: simpleTargetSettingsAttr(), + }), + })), + Attributes: map[string]schema.Attribute{ + "group_by_fields": schema.ListAttribute{ + Optional: true, + ElementType: types.StringType, }, - "group_by": { - Type: schema.TypeList, + "advanced_target_settings": schema.SetNestedAttribute{ Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "retriggering_period": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Default: objectdefault.StaticValue(types.ObjectValueMust(retriggeringPeriodAttr(), map[string]attr.Value{ + "minutes": types.Int64Value(10), + })), + Attributes: map[string]schema.Attribute{ + "minutes": schema.Int64Attribute{ + Required: true, + }, + }, + MarkdownDescription: "Retriggering period in minutes. 10 minutes by default.", + }, + "notify_on": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString("Triggered Only"), + Validators: []validator.String{ + stringvalidator.OneOf(validNotifyOn...), + }, + MarkdownDescription: fmt.Sprintf("Notify on. Valid values: %q. Triggered Only by default.", validNotifyOn), + }, + "integration_id": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("recipients")), + }, + }, + "recipients": schema.SetAttribute{ + Optional: true, + ElementType: types.StringType, + }, + }, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + }, + Validators: []validator.Set{ + setvalidator.ExactlyOneOf( + path.MatchRelative().AtParent().AtName("simple_target_settings"), + ), }, - Description: "The fields to 'group by' on.", - }, - "group_by_q1": { - Type: schema.TypeBool, - Optional: true, - RequiredWith: []string{"ratio.0.condition.0.group_by"}, - ConflictsWith: []string{"ratio.0.condition.0.group_by_q2", - "ratio.0.condition.0.group_by_both"}, - }, - "group_by_q2": { - Type: schema.TypeBool, - Optional: true, - RequiredWith: []string{"ratio.0.condition.0.group_by"}, - ConflictsWith: []string{"ratio.0.condition.0.group_by_q1", - "ratio.0.condition.0.group_by_both"}, - }, - "group_by_both": { - Type: schema.TypeBool, - Optional: true, - RequiredWith: []string{"ratio.0.condition.0.group_by"}, - ConflictsWith: []string{"ratio.0.condition.0.group_by_q1", - "ratio.0.condition.0.group_by_q2"}, }, - "manage_undetected_values": { - Type: schema.TypeList, + "simple_target_settings": schema.SetNestedAttribute{ Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_triggering_on_undetected_values": { - Type: schema.TypeBool, - Required: true, - Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "integration_id": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("recipients")), + }, }, - "auto_retire_ratio": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), - Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + "recipients": schema.SetAttribute{ + Optional: true, + ElementType: types.StringType, }, }, }, - RequiredWith: []string{"ratio.0.condition.0.less_than", "ratio.0.condition.0.group_by"}, - Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", }, }, }, - Description: "Defines the conditions for triggering and notify by the alert", + "labels": schema.MapAttribute{ + Optional: true, + ElementType: types.StringType, + }, }, + MarkdownDescription: "Coralogix Alert. For more info please review - https://coralogix.com/docs/getting-started-with-coralogix-alerts/.", } } -func newValueSchema() map[string]*schema.Schema { - newValueSchema := commonAlertSchema() - newValueSchema["condition"] = &schema.Schema{ - Type: schema.TypeList, +func timeRelativeCompareTo() schema.StringAttribute { + return schema.StringAttribute{ Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "key_to_track": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsNotEmpty, - Description: "Select a key to track. Note, this key needs to have less than 50K unique values in" + - " the defined timeframe.", - }, - "time_window": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidNewValueTimeFrames, false), - Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidNewValueTimeFrames), + Validators: []validator.String{ + stringvalidator.OneOf(validLogsTimeRelativeComparedTo...), + }, + MarkdownDescription: fmt.Sprintf("Compared to. Valid values: %q.", validLogsTimeRelativeComparedTo), + } +} + +func logsRatioGroupByForSchema() schema.StringAttribute { + return schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString("Both"), + Validators: []validator.String{ + stringvalidator.OneOf(validLogsRatioGroupByFor...), + stringvalidator.AlsoRequires(path.MatchRoot("group_by")), + }, + MarkdownDescription: fmt.Sprintf("Group by for. Valid values: %q. 'Both' by default.", validLogsRatioGroupByFor), + } +} + +func missingValuesSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "replace_with_zero": schema.BoolAttribute{ + Optional: true, + Validators: []validator.Bool{ + boolvalidator.ExactlyOneOf(path.MatchRelative().AtParent().AtName("min_non_null_values_pct")), }, }, + "min_non_null_values_pct": schema.Int64Attribute{ + Optional: true, + }, }, - Description: "Defines the conditions for triggering and notify by the alert", } - return newValueSchema } -func uniqueCountSchema() map[string]*schema.Schema { - uniqueCountSchema := commonAlertSchema() - uniqueCountSchema["condition"] = &schema.Schema{ - Type: schema.TypeList, +func tracingQuerySchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "unique_count_key": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsNotEmpty, - Description: "Defines the key to match to track its unique count.", + Attributes: map[string]schema.Attribute{ + "latency_threshold_ms": schema.Int64Attribute{ + Required: true, + }, + "tracing_label_filters": tracingLabelFiltersSchema(), + }, + } +} + +func tracingTimeWindowSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "specific_value": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validTracingTimeWindow...), }, - "max_unique_values": { - Type: schema.TypeInt, + MarkdownDescription: fmt.Sprintf("Specific value. Valid values: %q.", validTracingTimeWindow), + }, + }, + } +} + +func tracingLabelFiltersSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "application_name": tracingFiltersTypeSchema(), + "subsystem_name": tracingFiltersTypeSchema(), + "service_name": tracingFiltersTypeSchema(), + "operation_name": tracingFiltersTypeSchema(), + "span_fields": tracingSpanFieldsFilterSchema(), + }, + } +} + +func tracingFiltersTypeSchema() schema.SetNestedAttribute { + return schema.SetNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: tracingFiltersTypeSchemaAttributes(), + }, + } +} + +func tracingFiltersTypeSchemaAttributes() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "values": schema.SetAttribute{ + Required: true, + ElementType: types.StringType, + }, + "operation": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString("IS"), + Validators: []validator.String{ + stringvalidator.OneOf(validTracingFilterOperations...), + }, + MarkdownDescription: fmt.Sprintf("Operation. Valid values: %q. 'IS' by default.", validTracingFilterOperations), + }, + } +} + +func tracingSpanFieldsFilterSchema() schema.SetNestedAttribute { + return schema.SetNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ Required: true, }, - "time_window": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidUniqueCountTimeFrames, false), - Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidUniqueCountTimeFrames), - }, - "group_by_key": { - Type: schema.TypeString, - Optional: true, - RequiredWith: []string{"unique_count.0.condition.0.max_unique_values_for_group_by"}, - Description: "The key to 'group by' on.", - }, - "max_unique_values_for_group_by": { - Type: schema.TypeInt, - Optional: true, - RequiredWith: []string{"unique_count.0.condition.0.group_by_key"}, + "filter_type": schema.SingleNestedAttribute{ + Optional: true, + Attributes: tracingFiltersTypeSchemaAttributes(), }, }, }, - Description: "Defines the conditions for triggering and notify by the alert", } - return uniqueCountSchema } -func timeRelativeSchema() map[string]*schema.Schema { - timeRelativeSchema := commonAlertSchema() - timeRelativeSchema["condition"] = &schema.Schema{ - Type: schema.TypeList, +func metricFilterSchema() schema.Attribute { + return schema.SingleNestedAttribute{ Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "less_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"time_relative.0.condition.0.more_than", - "time_relative.0.condition.0.less_than"}, - Description: "Determines the condition operator." + - " Must be one of - less_than or more_than.", - }, - "more_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"time_relative.0.condition.0.more_than", - "time_relative.0.condition.0.less_than"}, - Description: "Determines the condition operator." + - " Must be one of - less_than or more_than.", - }, - "ratio_threshold": { - Type: schema.TypeFloat, - Required: true, - Description: "The ratio threshold that is needed to trigger the alert.", - }, - "relative_time_window": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidRelativeTimeFrames, false), - Description: fmt.Sprintf("Time-window to compare with. Can be one of %q.", alertValidRelativeTimeFrames), + Attributes: map[string]schema.Attribute{ + "promql": schema.StringAttribute{ + Required: true, + }, + }, + } +} + +func metricTimeWindowSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "specific_value": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validMetricTimeWindowValues...), }, - "ignore_infinity": { - Type: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"time_relative.0.condition.0.less_than"}, - Description: "Not triggered when threshold is infinity (divided by zero).", + MarkdownDescription: fmt.Sprintf("Specific value. Valid values: %q.", validMetricTimeWindowValues), + }, + }, + } +} + +func logsFilterSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "lucene_filter": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), }, - "group_by": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + Attributes: map[string]schema.Attribute{ + "lucene_query": schema.StringAttribute{ + Optional: true, }, - Description: "The fields to 'group by' on.", - }, - "manage_undetected_values": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_triggering_on_undetected_values": { - Type: schema.TypeBool, - Required: true, - Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", - }, - "auto_retire_ratio": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), - Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + "label_filters": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Default: objectdefault.StaticValue(types.ObjectValueMust(labelFiltersAttr(), map[string]attr.Value{ + "application_name": types.SetNull(types.ObjectType{AttrTypes: labelFilterTypesAttr()}), + "subsystem_name": types.SetNull(types.ObjectType{AttrTypes: labelFilterTypesAttr()}), + "severities": types.SetNull(types.StringType), + })), + Attributes: map[string]schema.Attribute{ + "application_name": logsAttributeFilterSchema(), + "subsystem_name": logsAttributeFilterSchema(), + "severities": schema.SetAttribute{ + Optional: true, + ElementType: types.StringType, + Validators: []validator.Set{ + setvalidator.ValueStringsAre( + stringvalidator.OneOf(validLogSeverities...), + ), + }, + MarkdownDescription: fmt.Sprintf("Severities. Valid values: %q.", validLogSeverities), }, }, }, - RequiredWith: []string{"time_relative.0.condition.0.less_than", "time_relative.0.condition.0.group_by"}, - Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", }, }, }, - Description: "Defines the conditions for triggering and notify by the alert", } - return timeRelativeSchema } -func metricSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "lucene": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "search_query": { - Type: schema.TypeString, - Required: true, - Description: "Regular expiration. More info: https://coralogix.com/blog/regex-101/", - }, - "condition": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "metric_field": { - Type: schema.TypeString, - Required: true, - Description: "The name of the metric field to alert on.", - }, - "arithmetic_operator": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidArithmeticOperators, false), - Description: fmt.Sprintf("The arithmetic operator to use on the alert. can be one of %q", alertValidArithmeticOperators), - }, - "arithmetic_operator_modifier": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 100), - Description: "When arithmetic_operator = \"Percentile\" you need to supply the value in this property, 0 < value < 100.", - }, - "less_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"metric.0.lucene.0.condition.0.less_than", - "metric.0.lucene.0.condition.0.more_than"}, - Description: "Determines the condition operator." + - " Must be one of - less_than or more_than.", - }, - "more_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"metric.0.lucene.0.condition.0.less_than", - "metric.0.lucene.0.condition.0.more_than"}, - Description: "Determines the condition operator." + - " Must be one of - less_than or more_than.", - }, - "threshold": { - Type: schema.TypeFloat, - Required: true, - Description: "The number of log threshold that is needed to trigger the alert.", - }, - "sample_threshold_percentage": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), - Description: "The metric value must cross the threshold within this percentage of the timeframe (sum and count arithmetic operators do not use this parameter since they aggregate over the entire requested timeframe), increments of 10, 0 <= value <= 100.", - }, - "time_window": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidMetricTimeFrames, false), - Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidMetricTimeFrames), - }, - "group_by": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "The fields to 'group by' on.", - }, - "replace_missing_value_with_zero": { - Type: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"metric.0.lucene.0.condition.0.min_non_null_values_percentage"}, - Description: "If set to true, missing data will be considered as 0, otherwise, it will not be considered at all.", - }, - "min_non_null_values_percentage": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), - ConflictsWith: []string{"metric.0.lucene.0.condition.0.replace_missing_value_with_zero"}, - Description: "The minimum percentage of the timeframe that should have values for this alert to trigger", - }, - "manage_undetected_values": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_triggering_on_undetected_values": { - Type: schema.TypeBool, - Required: true, - Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", - }, - "auto_retire_ratio": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), - Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), - }, - }, - }, - RequiredWith: []string{"metric.0.lucene.0.condition.0.less_than", "metric.0.lucene.0.condition.0.group_by"}, - Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", - }, - }, - }, - Description: "Defines the conditions for triggering and notify by the alert", +func logsAttributeFilterSchema() schema.SetNestedAttribute { + return schema.SetNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "value": schema.StringAttribute{ + Required: true, + }, + "operation": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString("IS"), + Validators: []validator.String{ + stringvalidator.OneOf(validLogFilterOperationType...), }, + MarkdownDescription: fmt.Sprintf("Operation. Valid values: %q.'IS' by default.", validLogFilterOperationType), }, }, - ExactlyOneOf: []string{"metric.0.lucene", "metric.0.promql"}, }, - "promql": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "search_query": { - Type: schema.TypeString, - Required: true, - Description: "Regular expiration. More info: https://coralogix.com/blog/regex-101/", - }, - "condition": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "less_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{ - "metric.0.promql.0.condition.0.more_than", - "metric.0.promql.0.condition.0.more_than_usual", - "metric.0.promql.0.condition.0.less_than_usual", - "metric.0.promql.0.condition.0.more_than_or_equal", - "metric.0.promql.0.condition.0.less_than_or_equal", - }, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", - }, - "more_than": { - Type: schema.TypeBool, - Optional: true, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", - }, - "more_than_usual": { - Type: schema.TypeBool, - Optional: true, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", - }, - "less_than_usual": { - Type: schema.TypeBool, - Optional: true, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", - }, - "more_than_or_equal": { - Type: schema.TypeBool, - Optional: true, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", - }, - "less_than_or_equal": { - Type: schema.TypeBool, - Optional: true, - Description: "Determines the condition operator." + - " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", - }, - "threshold": { - Type: schema.TypeFloat, - Required: true, - Description: "The threshold that is needed to trigger the alert.", - }, - "time_window": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidMetricTimeFrames, false), - Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidMetricTimeFrames), - }, - "sample_threshold_percentage": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), - }, - "replace_missing_value_with_zero": { - Type: schema.TypeBool, - Optional: true, - ConflictsWith: []string{"metric.0.promql.0.condition.0.min_non_null_values_percentage", "metric.0.promql.0.condition.0.more_than_usual"}, - Description: "If set to true, missing data will be considered as 0, otherwise, it will not be considered at all.", - }, - "min_non_null_values_percentage": { - Type: schema.TypeInt, - Optional: true, - ConflictsWith: []string{"metric.0.promql.0.condition.0.replace_missing_value_with_zero"}, - ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), - }, - "manage_undetected_values": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "enable_triggering_on_undetected_values": { - Type: schema.TypeBool, - Required: true, - Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", - }, - "auto_retire_ratio": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), - Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), - }, - }, - }, - ConflictsWith: []string{"metric.0.promql.0.condition.0.more_than", "metric.0.promql.0.condition.0.more_than_or_equal", "metric.0.promql.0.condition.0.more_than_usual", "metric.0.promql.0.condition.0.less_than_usual"}, - Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", - }, - }, - }, - Description: "Defines the conditions for triggering and notify by the alert", - }, + } +} + +func notificationPayloadFilterSchema() schema.SetAttribute { + return schema.SetAttribute{ + Optional: true, + ElementType: types.StringType, + } +} + +func timeOfDaySchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "hours": schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + int64validator.Between(0, 23), + }, + }, + "minutes": schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + int64validator.Between(0, 59), }, }, - ExactlyOneOf: []string{"metric.0.lucene", "metric.0.promql"}, }, } } -func tracingSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "applications": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, +func logsTimeWindowSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "specific_value": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsTimeWindowValues), }, - Description: "An array that contains log’s application names that we want to be alerted on." + - " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - Set: schema.HashString, }, - "subsystems": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + } +} + +func logsRatioTimeWindowSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "specific_value": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsRatioTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsRatioTimeWindowValues), }, - Description: "An array that contains log’s subsystems names that we want to be alerted on." + - " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - Set: schema.HashString, }, - "services": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + } +} + +func logsNewValueTimeWindowSchema() schema.Attribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "specific_value": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsNewValueTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsNewValueTimeWindowValues), }, - Description: "An array that contains log’s services names that we want to be alerted on." + - " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - Set: schema.HashString, }, - "tag_filter": { - Type: schema.TypeSet, - Optional: true, - Elem: tagFilterSchema(), - Set: schema.HashResource(tagFilterSchema()), - }, - "latency_threshold_milliseconds": { - Type: schema.TypeFloat, - Optional: true, - ValidateFunc: validation.FloatAtLeast(0), - }, - "condition": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "immediately": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"tracing.0.condition.0.immediately", "tracing.0.condition.0.more_than"}, - Description: "Determines the condition operator." + - " Must be one of - immediately or more_than.", - }, - "more_than": { - Type: schema.TypeBool, - Optional: true, - ExactlyOneOf: []string{"tracing.0.condition.0.immediately", "tracing.0.condition.0.more_than"}, - RequiredWith: []string{"tracing.0.condition.0.time_window"}, - Description: "Determines the condition operator." + - " Must be one of - immediately or more_than.", - }, - "threshold": { - Type: schema.TypeInt, - Optional: true, - ConflictsWith: []string{"tracing.0.condition.0.immediately"}, - Description: "The number of log occurrences that is needed to trigger the alert.", - }, - "time_window": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(alertValidTimeFrames, false), - ConflictsWith: []string{"tracing.0.condition.0.immediately"}, - RequiredWith: []string{"tracing.0.condition.0.more_than"}, - Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidTimeFrames), - }, - "group_by": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - ConflictsWith: []string{"tracing.0.condition.0.immediately"}, - Description: "The fields to 'group by' on.", - }, + } +} + +func logsUniqueCountTimeWindowSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Required: true, + Attributes: map[string]schema.Attribute{ + "specific_value": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsUniqueCountTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsUniqueCountTimeWindowValues), + }, + }, + } +} + +func undetectedValuesManagementSchema() schema.SingleNestedAttribute { + return schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "trigger_undetected_values": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(true), + }, + "auto_retire_timeframe": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf(validAutoRetireTimeframes...), }, + MarkdownDescription: fmt.Sprintf("Auto retire timeframe. Valid values: %q.", validAutoRetireTimeframes), }, - Description: "Defines the conditions for triggering and notify by the alert", }, } } -func tagFilterSchema() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "field": { - Type: schema.TypeString, - Required: true, - }, - "values": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - Description: "Tag filter values can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", - }, +func (r *AlertResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +func (r *AlertResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan *AlertResourceModel + if diags := req.Plan.Get(ctx, &plan); diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + alertProperties, diags := extractAlertProperties(ctx, plan) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + createAlertRequest := &cxsdk.CreateAlertDefRequest{AlertDefProperties: alertProperties} + log.Printf("[INFO] Creating new Alert: %s", protojson.Format(createAlertRequest)) + createResp, err := r.client.Create(ctx, createAlertRequest) + if err != nil { + log.Printf("[ERROR] Received error: %s", err) + resp.Diagnostics.AddError("Error creating Alert", + formatRpcErrors(err, createAlertURL, protojson.Format(createAlertRequest)), + ) + return + } + alert := createResp.GetAlertDef() + log.Printf("[INFO] Submitted new alert: %s", protojson.Format(alert)) + + plan, diags = flattenAlert(ctx, alert) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func extractAlertProperties(ctx context.Context, plan *AlertResourceModel) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + groupBy, diags := typeStringSliceToWrappedStringSlice(ctx, plan.GroupBy.Elements()) + if diags.HasError() { + return nil, diags + } + incidentsSettings, diags := extractIncidentsSettings(ctx, plan.IncidentsSettings) + if diags.HasError() { + return nil, diags + } + notificationGroup, diags := extractNotificationGroup(ctx, plan.NotificationGroup) + if diags.HasError() { + return nil, diags + } + labels, diags := typeMapToStringMap(ctx, plan.Labels) + + if diags.HasError() { + return nil, diags + } + alertProperties := &cxsdk.AlertDefProperties{ + Name: typeStringToWrapperspbString(plan.Name), + Description: typeStringToWrapperspbString(plan.Description), + Enabled: typeBoolToWrapperspbBool(plan.Enabled), + Priority: alertPrioritySchemaToProtoMap[plan.Priority.ValueString()], + GroupBy: groupBy, + IncidentsSettings: incidentsSettings, + NotificationGroup: notificationGroup, + Labels: labels, + } + + alertProperties, diags = expandAlertsSchedule(ctx, alertProperties, plan.Schedule) + if diags.HasError() { + return nil, diags + } + + alertProperties, diags = expandAlertsTypeDefinition(ctx, alertProperties, plan.TypeDefinition) + if diags.HasError() { + return nil, diags + } + + return alertProperties, nil +} + +func extractIncidentsSettings(ctx context.Context, incidentsSettingsObject types.Object) (*cxsdk.AlertDefIncidentSettings, diag.Diagnostics) { + if incidentsSettingsObject.IsNull() || incidentsSettingsObject.IsUnknown() { + return nil, nil + } + + var incidentsSettingsModel IncidentsSettingsModel + if diags := incidentsSettingsObject.As(ctx, &incidentsSettingsModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + incidentsSettings := &cxsdk.AlertDefIncidentSettings{ + NotifyOn: notifyOnSchemaToProtoMap[incidentsSettingsModel.NotifyOn.ValueString()], + } + + incidentsSettings, diags := expandIncidentsSettingsByRetriggeringPeriod(ctx, incidentsSettings, incidentsSettingsModel.RetriggeringPeriod) + if diags.HasError() { + return nil, diags + } + + return incidentsSettings, nil +} + +func expandIncidentsSettingsByRetriggeringPeriod(ctx context.Context, incidentsSettings *cxsdk.AlertDefIncidentSettings, period types.Object) (*cxsdk.AlertDefIncidentSettings, diag.Diagnostics) { + if period.IsNull() || period.IsUnknown() { + return incidentsSettings, nil + } + + var periodModel RetriggeringPeriodModel + if diags := period.As(ctx, &periodModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if !(periodModel.Minutes.IsNull() || periodModel.Minutes.IsUnknown()) { + incidentsSettings.RetriggeringPeriod = &cxsdk.AlertDefIncidentSettingsMinutes{ + Minutes: typeInt64ToWrappedUint32(periodModel.Minutes), + } + } + + return incidentsSettings, nil +} + +func extractNotificationGroup(ctx context.Context, notificationGroupObject types.Object) (*cxsdk.AlertDefNotificationGroup, diag.Diagnostics) { + if notificationGroupObject.IsNull() || notificationGroupObject.IsUnknown() { + return nil, nil + } + + var notificationGroupModel NotificationGroupModel + if diags := notificationGroupObject.As(ctx, ¬ificationGroupModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + groupByFields, diags := typeStringSliceToWrappedStringSlice(ctx, notificationGroupModel.GroupByFields.Elements()) + if diags.HasError() { + return nil, diags + } + + notificationGroup := &cxsdk.AlertDefNotificationGroup{ + GroupByFields: groupByFields, + } + notificationGroup, diags = expandNotificationTargetSettings(ctx, notificationGroupModel, notificationGroup) + if diags.HasError() { + return nil, diags + } + + return notificationGroup, nil +} + +func expandNotificationTargetSettings(ctx context.Context, notificationGroupModel NotificationGroupModel, notificationGroup *cxsdk.AlertDefNotificationGroup) (*cxsdk.AlertDefNotificationGroup, diag.Diagnostics) { + if advancedTargetSettings := notificationGroupModel.AdvancedTargetSettings; !(advancedTargetSettings.IsNull() || advancedTargetSettings.IsUnknown()) { + notifications, diags := extractAdvancedTargetSettings(ctx, advancedTargetSettings) + if diags.HasError() { + return nil, diags + } + notificationGroup.Targets = notifications + } else if simpleTargetSettings := notificationGroupModel.SimpleTargetSettings; !(simpleTargetSettings.IsNull() || simpleTargetSettings.IsUnknown()) { + notifications, diags := extractSimpleTargetSettings(ctx, simpleTargetSettings) + if diags.HasError() { + return nil, diags + } + notificationGroup.Targets = notifications + } + + return notificationGroup, nil +} + +func extractAdvancedTargetSettings(ctx context.Context, advancedTargetSettings types.Set) (*cxsdk.AlertDefNotificationGroupAdvanced, diag.Diagnostics) { + if advancedTargetSettings.IsNull() || advancedTargetSettings.IsUnknown() { + return nil, nil + } + + var advancedTargetSettingsObjects []types.Object + diags := advancedTargetSettings.ElementsAs(ctx, &advancedTargetSettingsObjects, true) + if diags.HasError() { + return nil, diags + } + var expandedAdvancedTargetSettings []*cxsdk.AlertDefAdvancedTargetSettings + for _, ao := range advancedTargetSettingsObjects { + var advancedTargetSettingsModel AdvancedTargetSettingsModel + if dg := ao.As(ctx, &advancedTargetSettingsModel, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + expandedAdvancedTargetSetting, expandDiags := extractAdvancedTargetSetting(ctx, advancedTargetSettingsModel) + if expandDiags.HasError() { + diags.Append(expandDiags...) + continue + } + expandedAdvancedTargetSettings = append(expandedAdvancedTargetSettings, expandedAdvancedTargetSetting) + } + + if diags.HasError() { + return nil, diags + } + + return &cxsdk.AlertDefNotificationGroupAdvanced{ + Advanced: &cxsdk.AlertDefAdvancedTargets{ + AdvancedTargetsSettings: expandedAdvancedTargetSettings, + }, + }, nil +} + +func extractAdvancedTargetSetting(ctx context.Context, advancedTargetSettingsModel AdvancedTargetSettingsModel) (*cxsdk.AlertDefAdvancedTargetSettings, diag.Diagnostics) { + notifyOn := notifyOnSchemaToProtoMap[advancedTargetSettingsModel.NotifyOn.ValueString()] + advancedTargetSettings := &cxsdk.AlertDefAdvancedTargetSettings{ + NotifyOn: ¬ifyOn, + } + advancedTargetSettings, diags := expandAlertNotificationByRetriggeringPeriod(ctx, advancedTargetSettings, advancedTargetSettingsModel.RetriggeringPeriod) + if diags.HasError() { + return nil, diags + } + + if !advancedTargetSettingsModel.IntegrationID.IsNull() && !advancedTargetSettingsModel.IntegrationID.IsUnknown() { + integrationId, diag := typeStringToWrapperspbUint32(advancedTargetSettingsModel.IntegrationID) + if diag.HasError() { + return nil, diag + } + advancedTargetSettings.Integration = &cxsdk.AlertDefIntegrationType{ + IntegrationType: &cxsdk.AlertDefIntegrationTypeIntegrationID{ + IntegrationId: integrationId, + }, + } + } else if !advancedTargetSettingsModel.Recipients.IsNull() && !advancedTargetSettingsModel.Recipients.IsUnknown() { + emails, diags := typeStringSliceToWrappedStringSlice(ctx, advancedTargetSettingsModel.Recipients.Elements()) + if diags.HasError() { + return nil, diags + } + advancedTargetSettings.Integration = &cxsdk.AlertDefIntegrationType{ + IntegrationType: &cxsdk.AlertDefIntegrationTypeRecipients{ + Recipients: &cxsdk.AlertDefRecipients{ + Emails: emails, + }, + }, + } + } + + return advancedTargetSettings, nil +} + +func expandAlertNotificationByRetriggeringPeriod(ctx context.Context, alertNotification *cxsdk.AlertDefAdvancedTargetSettings, period types.Object) (*cxsdk.AlertDefAdvancedTargetSettings, diag.Diagnostics) { + if period.IsNull() || period.IsUnknown() { + return alertNotification, nil + } + + var periodModel RetriggeringPeriodModel + if diags := period.As(ctx, &periodModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if !(periodModel.Minutes.IsNull() || periodModel.Minutes.IsUnknown()) { + alertNotification.RetriggeringPeriod = &cxsdk.AlertDefAdvancedTargetSettingsMinutes{ + Minutes: typeInt64ToWrappedUint32(periodModel.Minutes), + } + } + + return alertNotification, nil +} + +func extractSimpleTargetSettings(ctx context.Context, simpleTargetSettings types.Set) (*cxsdk.AlertDefNotificationGroupSimple, diag.Diagnostics) { + if simpleTargetSettings.IsNull() || simpleTargetSettings.IsUnknown() { + return nil, nil + } + + var simpleTargetSettingsObjects []types.Object + diags := simpleTargetSettings.ElementsAs(ctx, &simpleTargetSettingsObjects, true) + if diags.HasError() { + return nil, diags + } + var expandedSimpleTargetSettings []*cxsdk.AlertDefIntegrationType + for _, ao := range simpleTargetSettingsObjects { + var simpleTargetSettingsModel SimpleTargetSettingsModel + if dg := ao.As(ctx, &simpleTargetSettingsModel, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + expandedSimpleTargetSetting, expandDiags := extractSimpleTargetSetting(ctx, simpleTargetSettingsModel) + if expandDiags.HasError() { + diags.Append(expandDiags...) + continue + } + expandedSimpleTargetSettings = append(expandedSimpleTargetSettings, expandedSimpleTargetSetting) + } + + if diags.HasError() { + return nil, diags + } + + return &cxsdk.AlertDefNotificationGroupSimple{ + Simple: &cxsdk.AlertDefTargetSimple{ + Integrations: expandedSimpleTargetSettings, + }, + }, nil + +} + +func extractSimpleTargetSetting(ctx context.Context, model SimpleTargetSettingsModel) (*cxsdk.AlertDefIntegrationType, diag.Diagnostics) { + if !model.IntegrationID.IsNull() && !model.IntegrationID.IsUnknown() { + integrationId, diag := typeStringToWrapperspbUint32(model.IntegrationID) + if diag.HasError() { + return nil, diag + } + return &cxsdk.AlertDefIntegrationType{ + IntegrationType: &cxsdk.AlertDefIntegrationTypeIntegrationID{ + IntegrationId: integrationId, + }, + }, nil + } else if !model.Recipients.IsNull() && !model.Recipients.IsUnknown() { + emails, diags := typeStringSliceToWrappedStringSlice(ctx, model.Recipients.Elements()) + if diags.HasError() { + return nil, diags + } + return &cxsdk.AlertDefIntegrationType{ + IntegrationType: &cxsdk.AlertDefIntegrationTypeRecipients{ + Recipients: &cxsdk.AlertDefRecipients{ + Emails: emails, + }, + }, + }, nil + } + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Integration ID is not set", "Integration ID is not set")} + +} + +func expandAlertsSchedule(ctx context.Context, alertProperties *cxsdk.AlertDefProperties, scheduleObject types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if scheduleObject.IsNull() || scheduleObject.IsUnknown() { + return alertProperties, nil + } + + var scheduleModel AlertScheduleModel + if diags := scheduleObject.As(ctx, &scheduleModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + var diags diag.Diagnostics + if activeOn := scheduleModel.ActiveOn; !(activeOn.IsNull() || activeOn.IsUnknown()) { + alertProperties.Schedule, diags = expandActiveOnSchedule(ctx, activeOn) + } else { + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Schedule object is not valid", "Schedule object is not valid")} + } + + if diags.HasError() { + return nil, diags + } + + return alertProperties, nil +} + +func expandActiveOnSchedule(ctx context.Context, activeOnObject types.Object) (*cxsdk.AlertDefPropertiesActiveOn, diag.Diagnostics) { + if activeOnObject.IsNull() || activeOnObject.IsUnknown() { + return nil, nil + } + + var activeOnModel ActiveOnModel + if diags := activeOnObject.As(ctx, &activeOnModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + daysOfWeek, diags := extractDaysOfWeek(ctx, activeOnModel.DaysOfWeek) + if diags.HasError() { + return nil, diags + } + + startTime, diags := extractTimeOfDay(ctx, activeOnModel.StartTime) + if diags.HasError() { + return nil, diags + } + + endTime, diags := extractTimeOfDay(ctx, activeOnModel.EndTime) + if diags.HasError() { + return nil, diags + } + + return &cxsdk.AlertDefScheduleActiveOn{ + ActiveOn: &cxsdk.AlertDefActivitySchedule{ + DayOfWeek: daysOfWeek, + StartTime: startTime, + EndTime: endTime, + }, + }, nil +} + +func extractTimeOfDay(ctx context.Context, timeObject types.Object) (*cxsdk.AlertTimeOfDay, diag.Diagnostics) { + if timeObject.IsNull() || timeObject.IsUnknown() { + return nil, nil + } + + var timeOfDayModel TimeOfDayModel + if diags := timeObject.As(ctx, &timeOfDayModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + return &cxsdk.AlertTimeOfDay{ + Hours: int32(timeOfDayModel.Hours.ValueInt64()), + Minutes: int32(timeOfDayModel.Minutes.ValueInt64()), + }, nil + +} + +func extractDaysOfWeek(ctx context.Context, daysOfWeek types.List) ([]cxsdk.AlertDayOfWeek, diag.Diagnostics) { + var diags diag.Diagnostics + daysOfWeekElements := daysOfWeek.Elements() + result := make([]cxsdk.AlertDayOfWeek, 0, len(daysOfWeekElements)) + for _, v := range daysOfWeekElements { + val, err := v.ToTerraformValue(ctx) + if err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + var str string + + if err = val.As(&str); err != nil { + diags.AddError("Failed to convert value to string", err.Error()) + continue + } + result = append(result, daysOfWeekSchemaToProtoMap[str]) + } + return result, diags +} + +func expandAlertsTypeDefinition(ctx context.Context, alertProperties *cxsdk.AlertDefProperties, alertDefinition types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if alertDefinition.IsNull() || alertDefinition.IsUnknown() { + return alertProperties, nil + } + + var alertDefinitionModel AlertTypeDefinitionModel + if diags := alertDefinition.As(ctx, &alertDefinitionModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + var diags diag.Diagnostics + if logsImmediate := alertDefinitionModel.LogsImmediate; !(logsImmediate.IsNull() || logsImmediate.IsUnknown()) { + alertProperties, diags = expandLogsImmediateAlertTypeDefinition(ctx, alertProperties, logsImmediate) + } else if logsMoreThan := alertDefinitionModel.LogsMoreThan; !(logsMoreThan.IsNull() || logsMoreThan.IsUnknown()) { + alertProperties, diags = expandLogsMoreThanAlertTypeDefinition(ctx, alertProperties, logsMoreThan) + } else if logsLessThan := alertDefinitionModel.LogsLessThan; !(logsLessThan.IsNull() || logsLessThan.IsUnknown()) { + alertProperties, diags = expandLogsLessThanAlertTypeDefinition(ctx, alertProperties, logsLessThan) + } else if logsMoreThanUsual := alertDefinitionModel.LogsMoreThanUsual; !(logsMoreThanUsual.IsNull() || logsMoreThanUsual.IsUnknown()) { + alertProperties, diags = expandLogsMoreThanUsualAlertTypeDefinition(ctx, alertProperties, logsMoreThanUsual) + } else if logsRatioMoreThan := alertDefinitionModel.LogsRatioMoreThan; !(logsRatioMoreThan.IsNull() || logsRatioMoreThan.IsUnknown()) { + alertProperties, diags = expandLogsRatioMoreThanAlertTypeDefinition(ctx, alertProperties, logsRatioMoreThan) + } else if logsRatioLessThan := alertDefinitionModel.LogsRatioLessThan; !(logsRatioLessThan.IsNull() || logsRatioLessThan.IsUnknown()) { + alertProperties, diags = expandLogsRatioLessThanAlertTypeDefinition(ctx, alertProperties, logsRatioLessThan) + } else if logsNewValue := alertDefinitionModel.LogsNewValue; !(logsNewValue.IsNull() || logsNewValue.IsUnknown()) { + alertProperties, diags = expandLogsNewValueAlertTypeDefinition(ctx, alertProperties, logsNewValue) + } else if logsUniqueCount := alertDefinitionModel.LogsUniqueCount; !(logsUniqueCount.IsNull() || logsUniqueCount.IsUnknown()) { + alertProperties, diags = expandLogsUniqueCountAlertTypeDefinition(ctx, alertProperties, logsUniqueCount) + } else if logsTimeRelativeMoreThan := alertDefinitionModel.LogsTimeRelativeMoreThan; !(logsTimeRelativeMoreThan.IsNull() || logsTimeRelativeMoreThan.IsUnknown()) { + alertProperties, diags = expandLogsTimeRelativeMoreThanAlertTypeDefinition(ctx, alertProperties, logsTimeRelativeMoreThan) + } else if logsTimeRelativeLessThan := alertDefinitionModel.LogsTimeRelativeLessThan; !(logsTimeRelativeLessThan.IsNull() || logsTimeRelativeLessThan.IsUnknown()) { + alertProperties, diags = expandLogsTimeRelativeLessThanAlertTypeDefinition(ctx, alertProperties, logsTimeRelativeLessThan) + } else if metricMoreThan := alertDefinitionModel.MetricMoreThan; !(metricMoreThan.IsNull() || metricMoreThan.IsUnknown()) { + alertProperties, diags = expandMetricMoreThanAlertTypeDefinition(ctx, alertProperties, metricMoreThan) + } else if metricLessThan := alertDefinitionModel.MetricLessThan; !(metricLessThan.IsNull() || metricLessThan.IsUnknown()) { + alertProperties, diags = expandMetricLessThanAlertTypeDefinition(ctx, alertProperties, metricLessThan) + } else if metricMoreThanUsual := alertDefinitionModel.MetricMoreThanUsual; !(metricMoreThanUsual.IsNull() || metricMoreThanUsual.IsUnknown()) { + alertProperties, diags = expandMetricMoreThanUsualAlertTypeDefinition(ctx, alertProperties, metricMoreThanUsual) + } else if metricLessThanUsual := alertDefinitionModel.MetricLessThanUsual; !(metricLessThanUsual.IsNull() || metricLessThanUsual.IsUnknown()) { + alertProperties, diags = expandMetricLessThanUsualAlertTypeDefinition(ctx, alertProperties, metricLessThanUsual) + } else if metricMoreThanOrEquals := alertDefinitionModel.MetricMoreThanOrEquals; !(metricMoreThanOrEquals.IsNull() || metricMoreThanOrEquals.IsUnknown()) { + alertProperties, diags = expandMetricMoreThanOrEqualsAlertTypeDefinition(ctx, alertProperties, metricMoreThanOrEquals) + } else if metricLessThanOrEquals := alertDefinitionModel.MetricLessThanOrEquals; !(metricLessThanOrEquals.IsNull() || metricLessThanOrEquals.IsUnknown()) { + alertProperties, diags = expandMetricLessThanOrEqualsAlertTypeDefinition(ctx, alertProperties, metricLessThanOrEquals) + } else if tracingImmediate := alertDefinitionModel.TracingImmediate; !(tracingImmediate.IsNull() || tracingImmediate.IsUnknown()) { + alertProperties, diags = expandTracingImmediateAlertTypeDefinition(ctx, alertProperties, tracingImmediate) + } else if tracingMoreThan := alertDefinitionModel.TracingMoreThan; !(tracingMoreThan.IsNull() || tracingMoreThan.IsUnknown()) { + alertProperties, diags = expandTracingMoreThanAlertTypeDefinition(ctx, alertProperties, tracingMoreThan) + } else if flow := alertDefinitionModel.Flow; !(flow.IsNull() || flow.IsUnknown()) { + alertProperties, diags = expandFlowAlertTypeDefinition(ctx, alertProperties, flow) + } else { + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Alert Type Definition", "Alert Type Definition is not valid")} + } + + if diags.HasError() { + return nil, diags + } + + return alertProperties, nil +} + +func expandLogsImmediateAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, logsImmediateObject types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if logsImmediateObject.IsNull() || logsImmediateObject.IsUnknown() { + return properties, nil + } + + var immediateModel LogsImmediateModel + if diags := logsImmediateObject.As(ctx, &immediateModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, immediateModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, immediateModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsImmediate{ + LogsImmediate: &cxsdk.LogsImmediateType{ + LogsFilter: logsFilter, + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.Type = cxsdk.AlertDefTypeLogsImmediateOrUnspecified + return properties, nil +} + +func extractLogsFilter(ctx context.Context, filter types.Object) (*cxsdk.LogsFilter, diag.Diagnostics) { + if filter.IsNull() || filter.IsUnknown() { + return nil, nil + } + + var filterModel AlertsLogsFilterModel + if diags := filter.As(ctx, &filterModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter := &cxsdk.LogsFilter{} + var diags diag.Diagnostics + if !(filterModel.LuceneFilter.IsNull() || filterModel.LuceneFilter.IsUnknown()) { + logsFilter.FilterType, diags = extractLuceneFilter(ctx, filterModel.LuceneFilter) + } + + if diags.HasError() { + return nil, diags + } + + return logsFilter, nil +} + +func extractLuceneFilter(ctx context.Context, luceneFilter types.Object) (*cxsdk.LogsFilterLuceneFilter, diag.Diagnostics) { + if luceneFilter.IsNull() || luceneFilter.IsUnknown() { + return nil, nil + } + + var luceneFilterModel LuceneFilterModel + if diags := luceneFilter.As(ctx, &luceneFilterModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + labelFilters, diags := extractLabelFilters(ctx, luceneFilterModel.LabelFilters) + if diags.HasError() { + return nil, diags + } + + return &cxsdk.LogsFilterSimpleFilter{ + SimpleFilter: &cxsdk.SimpleFilter{ + LuceneQuery: typeStringToWrapperspbString(luceneFilterModel.LuceneQuery), + LabelFilters: labelFilters, + }, + }, nil +} + +func extractLabelFilters(ctx context.Context, filters types.Object) (*cxsdk.LabelFilters, diag.Diagnostics) { + if filters.IsNull() || filters.IsUnknown() { + return nil, nil + } + + var filtersModel LabelFiltersModel + if diags := filters.As(ctx, &filtersModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + applicationName, diags := extractLabelFilterTypes(ctx, filtersModel.ApplicationName) + if diags.HasError() { + return nil, diags + } + + subsystemName, diags := extractLabelFilterTypes(ctx, filtersModel.SubsystemName) + if diags.HasError() { + return nil, diags + } + + severities, diags := extractLogSeverities(ctx, filtersModel.Severities.Elements()) + if diags.HasError() { + return nil, diags + } + + return &cxsdk.LabelFilters{ + ApplicationName: applicationName, + SubsystemName: subsystemName, + Severities: severities, + }, nil +} + +func extractLabelFilterTypes(ctx context.Context, labelFilterTypes types.Set) ([]*cxsdk.LabelFilterType, diag.Diagnostics) { + var labelFilterTypesObjects []types.Object + diags := labelFilterTypes.ElementsAs(ctx, &labelFilterTypesObjects, true) + if diags.HasError() { + return nil, diags + } + var expandedLabelFilterTypes []*cxsdk.LabelFilterType + for _, lft := range labelFilterTypesObjects { + var labelFilterTypeModel LabelFilterTypeModel + if dg := lft.As(ctx, &labelFilterTypeModel, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + expandedLabelFilterType := &cxsdk.LabelFilterType{ + Value: typeStringToWrapperspbString(labelFilterTypeModel.Value), + Operation: logFilterOperationTypeSchemaToProtoMap[labelFilterTypeModel.Operation.ValueString()], + } + expandedLabelFilterTypes = append(expandedLabelFilterTypes, expandedLabelFilterType) + } + + if diags.HasError() { + return nil, diags + } + + return expandedLabelFilterTypes, nil +} + +func extractLogSeverities(ctx context.Context, elements []attr.Value) ([]cxsdk.LogSeverity, diag.Diagnostics) { + var diags diag.Diagnostics + result := make([]cxsdk.LogSeverity, 0, len(elements)) + for _, v := range elements { + val, err := v.ToTerraformValue(ctx) + if err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + var str string + + if err = val.As(&str); err != nil { + diags.AddError("Failed to convert value to string", err.Error()) + continue + } + result = append(result, logSeveritySchemaToProtoMap[str]) + } + return result, diags +} + +func expandLogsMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, moreThanObject types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if moreThanObject.IsNull() || moreThanObject.IsUnknown() { + return properties, nil + } + + var moreThanModel LogsMoreThanModel + if diags := moreThanObject.As(ctx, &moreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, moreThanModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, moreThanModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractLogsTimeWindow(ctx, moreThanModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsThreshold{ + LogsThreshold: &cxsdk.LogsThresholdType{ + LogsFilter: logsFilter, + Rules: []*cxsdk.LogsThresholdRule{ + {Condition: &cxsdk.LogsThresholdCondition{ + Threshold: typeFloat64ToWrapperspbDouble(moreThanModel.Threshold), + TimeWindow: timeWindow, + ConditionType: cxsdk.LogsThresholdConditionTypeMoreThanOrUnspecified, + // EvaluationWindow: evaluationWindowTypeSchemaToProtoMap[moreThanModel.EvaluationWindow.ValueString()], + }}, + }, + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.Type = cxsdk.AlertDefTypeLogsThreshold + return properties, nil +} + +func extractLogsTimeWindow(ctx context.Context, timeWindow types.Object) (*cxsdk.LogsTimeWindow, diag.Diagnostics) { + if timeWindow.IsNull() || timeWindow.IsUnknown() { + return nil, nil + } + + var timeWindowModel LogsTimeWindowModel + if diags := timeWindow.As(ctx, &timeWindowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if specificValue := timeWindowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { + return &cxsdk.LogsTimeWindow{ + Type: &cxsdk.LogsTimeWindowSpecificValue{ + LogsTimeWindowSpecificValue: logsTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], + }, + }, nil + } + + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} +} + +func expandLogsLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, lessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if lessThan.IsNull() || lessThan.IsUnknown() { + return properties, nil + } + + var lessThanModel LogsLessThanModel + if diags := lessThan.As(ctx, &lessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, lessThanModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, lessThanModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractLogsTimeWindow(ctx, lessThanModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, lessThanModel.UndetectedValuesManagement) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsLessThan{ + LogsLessThan: &cxsdk.LogsLessThanTypeDefinition{ + LogsFilter: logsFilter, + Threshold: typeInt64ToWrappedUint32(lessThanModel.Threshold), + TimeWindow: timeWindow, + UndetectedValuesManagement: undetectedValuesManagement, + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_LESS_THAN + return properties, nil +} + +func extractUndetectedValuesManagement(ctx context.Context, management types.Object) (*cxsdk.UndetectedValuesManagement, diag.Diagnostics) { + if management.IsNull() || management.IsUnknown() { + return nil, nil + } + + var managementModel UndetectedValuesManagementModel + if diags := management.As(ctx, &managementModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + var autoRetireTimeframe *cxsdk.AutoRetireTimeframe + if !(managementModel.AutoRetireTimeframe.IsNull() || managementModel.AutoRetireTimeframe.IsUnknown()) { + autoRetireTimeframe = new(cxsdk.AutoRetireTimeframe) + *autoRetireTimeframe = autoRetireTimeframeSchemaToProtoMap[managementModel.AutoRetireTimeframe.ValueString()] + } + + return &cxsdk.UndetectedValuesManagement{ + TriggerUndetectedValues: typeBoolToWrapperspbBool(managementModel.TriggerUndetectedValues), + AutoRetireTimeframe: autoRetireTimeframe, + }, nil +} + +func expandLogsMoreThanUsualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, moreThanUsual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if moreThanUsual.IsNull() || moreThanUsual.IsUnknown() { + return properties, nil + } + + var moreThanUsualModel LogsMoreThanUsualModel + if diags := moreThanUsual.As(ctx, &moreThanUsualModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, moreThanUsualModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, moreThanUsualModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractLogsTimeWindow(ctx, moreThanUsualModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsMoreThanUsual{ + LogsMoreThanUsual: &cxsdk.LogsMoreThanUsualTypeDefinition{ + LogsFilter: logsFilter, + MinimumThreshold: typeInt64ToWrappedUint32(moreThanUsualModel.MinimumThreshold), + TimeWindow: timeWindow, + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_MORE_THAN_USUAL + return properties, nil +} + +func expandLogsRatioMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, moreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if moreThan.IsNull() || moreThan.IsUnknown() { + return properties, nil + } + + var moreThanModel LogsRatioMoreThanModel + if diags := moreThan.As(ctx, &moreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + numeratorLogsFilter, diags := extractLogsFilter(ctx, moreThanModel.NumeratorLogsFilter) + if diags.HasError() { + return nil, diags + } + + denominatorLogsFilter, diags := extractLogsFilter(ctx, moreThanModel.DenominatorLogsFilter) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractLogsRatioTimeWindow(ctx, moreThanModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, moreThanModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsRatioThreshold{ + LogsRatioThreshold: &cxsdk.LogsRatioThresholdType{ + Numerator: numeratorLogsFilter, + NumeratorAlias: typeStringToWrapperspbString(moreThanModel.NumeratorAlias), + Denominator: denominatorLogsFilter, + DenominatorAlias: typeStringToWrapperspbString(moreThanModel.DenominatorAlias), + Rules: []*cxsdk.LogsRatioRules{ + { + Condition: &cxsdk.LogsRatioCondition{ + Threshold: typeFloat64ToWrapperspbDouble(moreThanModel.Threshold), + TimeWindow: timeWindow, + IgnoreInfinity: typeBoolToWrapperspbBool(moreThanModel.IgnoreInfinity), + ConditionType: cxsdk.LogsRatioConditionTypeMoreThanOrUnspecified, + }, + }, + }, + NotificationPayloadFilter: notificationPayloadFilter, + GroupByFor: logsRatioGroupByForSchemaToProtoMap[moreThanModel.GroupByFor.ValueString()], + }, + } + properties.Type = cxsdk.AlertDefTypeLogsRatioThreshold + return properties, nil +} + +func extractLogsRatioTimeWindow(ctx context.Context, window types.Object) (*cxsdk.LogsRatioTimeWindow, diag.Diagnostics) { + if window.IsNull() || window.IsUnknown() { + return nil, nil + } + + var windowModel LogsRatioTimeWindowModel + if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { + return &cxsdk.LogsRatioTimeWindow{ + Type: &cxsdk.LogsRatioTimeWindowSpecificValue{ + LogsRatioTimeWindowSpecificValue: logsRatioTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], + }, + }, nil + } + + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} +} + +func expandLogsRatioLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, ratioLessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if ratioLessThan.IsNull() || ratioLessThan.IsUnknown() { + return properties, nil + } + + var ratioLessThanModel LogsRatioLessThanModel + if diags := ratioLessThan.As(ctx, &ratioLessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + numeratorLogsFilter, diags := extractLogsFilter(ctx, ratioLessThanModel.NumeratorLogsFilter) + if diags.HasError() { + return nil, diags + } + + denominatorLogsFilter, diags := extractLogsFilter(ctx, ratioLessThanModel.DenominatorLogsFilter) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractLogsRatioTimeWindow(ctx, ratioLessThanModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, ratioLessThanModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, ratioLessThanModel.UndetectedValuesManagement) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsRatioLessThan{ + LogsRatioLessThan: &cxsdk.LogsRatioLessThanTypeDefinition{ + NumeratorLogsFilter: numeratorLogsFilter, + NumeratorAlias: typeStringToWrapperspbString(ratioLessThanModel.NumeratorAlias), + DenominatorLogsFilter: denominatorLogsFilter, + DenominatorAlias: typeStringToWrapperspbString(ratioLessThanModel.DenominatorAlias), + Threshold: typeInt64ToWrappedUint32(ratioLessThanModel.Threshold), + TimeWindow: timeWindow, + IgnoreInfinity: typeBoolToWrapperspbBool(ratioLessThanModel.IgnoreInfinity), + NotificationPayloadFilter: notificationPayloadFilter, + GroupByFor: logsRatioGroupByForSchemaToProtoMap[ratioLessThanModel.GroupByFor.ValueString()], + UndetectedValuesManagement: undetectedValuesManagement, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_RATIO_LESS_THAN + return properties, nil +} + +func expandLogsNewValueAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, newValue types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if newValue.IsNull() || newValue.IsUnknown() { + return properties, nil + } + + var newValueModel LogsNewValueModel + if diags := newValue.As(ctx, &newValueModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, newValueModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, newValueModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractLogsNewValueTimeWindow(ctx, newValueModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsNewValue{ + LogsNewValue: &cxsdk.LogsNewValueTypeDefinition{ + LogsFilter: logsFilter, + KeypathToTrack: typeStringToWrapperspbString(newValueModel.KeypathToTrack), + TimeWindow: timeWindow, + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_NEW_VALUE + return properties, nil +} + +func extractLogsNewValueTimeWindow(ctx context.Context, window types.Object) (*cxsdk.LogsNewValueTimeWindow, diag.Diagnostics) { + if window.IsNull() || window.IsUnknown() { + return nil, nil + } + + var windowModel LogsNewValueTimeWindowModel + if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { + return &cxsdk.LogsNewValueTimeWindow{ + Type: &cxsdk.LogsNewValueTimeWindow_LogsNewValueTimeWindowSpecificValue{ + LogsNewValueTimeWindowSpecificValue: logsNewValueTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], + }, + }, nil + } + + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} + +} + +func expandLogsUniqueCountAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, uniqueCount types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if uniqueCount.IsNull() || uniqueCount.IsUnknown() { + return properties, nil + } + + var uniqueCountModel LogsUniqueCountModel + if diags := uniqueCount.As(ctx, &uniqueCountModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, uniqueCountModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, uniqueCountModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractLogsUniqueCountTimeWindow(ctx, uniqueCountModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsUniqueCount{ + LogsUniqueCount: &cxsdk.LogsUniqueCountTypeDefinition{ + LogsFilter: logsFilter, + UniqueCountKeypath: typeStringToWrapperspbString(uniqueCountModel.UniqueCountKeypath), + MaxUniqueCount: typeInt64ToWrappedInt64(uniqueCountModel.MaxUniqueCount), + TimeWindow: timeWindow, + NotificationPayloadFilter: notificationPayloadFilter, + MaxUniqueCountPerGroupByKey: typeInt64ToWrappedInt64(uniqueCountModel.MaxUniqueCountPerGroupByKey), + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_UNIQUE_COUNT + return properties, nil +} + +func extractLogsUniqueCountTimeWindow(ctx context.Context, window types.Object) (*cxsdk.LogsUniqueValueTimeWindow, diag.Diagnostics) { + if window.IsNull() || window.IsUnknown() { + return nil, nil + } + + var windowModel LogsUniqueCountTimeWindowModel + if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { + return &cxsdk.LogsUniqueValueTimeWindow{ + Type: &cxsdk.LogsUniqueValueTimeWindow_LogsUniqueValueTimeWindowSpecificValue{ + LogsUniqueValueTimeWindowSpecificValue: logsUniqueCountTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], + }, + }, nil + } + + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} + +} + +func expandLogsTimeRelativeMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, relativeMoreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if relativeMoreThan.IsNull() || relativeMoreThan.IsUnknown() { + return properties, nil + } + + var relativeMoreThanModel LogsTimeRelativeMoreThanModel + if diags := relativeMoreThan.As(ctx, &relativeMoreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, relativeMoreThanModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, relativeMoreThanModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsTimeRelativeMoreThan{ + LogsTimeRelativeMoreThan: &cxsdk.LogsTimeRelativeMoreThanTypeDefinition{ + LogsFilter: logsFilter, + Threshold: typeInt64ToWrappedUint32(relativeMoreThanModel.Threshold), + ComparedTo: logsTimeRelativeComparedToSchemaToProtoMap[relativeMoreThanModel.ComparedTo.ValueString()], + IgnoreInfinity: typeBoolToWrapperspbBool(relativeMoreThanModel.IgnoreInfinity), + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_TIME_RELATIVE_MORE_THAN + return properties, nil +} + +func expandLogsTimeRelativeLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, timeRelativeLessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if timeRelativeLessThan.IsNull() || timeRelativeLessThan.IsUnknown() { + return properties, nil + } + + var timeRelativeLessThanModel LogsTimeRelativeLessThanModel + if diags := timeRelativeLessThan.As(ctx, &timeRelativeLessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + logsFilter, diags := extractLogsFilter(ctx, timeRelativeLessThanModel.LogsFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, timeRelativeLessThanModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, timeRelativeLessThanModel.UndetectedValuesManagement) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsTimeRelativeLessThan{ + LogsTimeRelativeLessThan: &cxsdk.LogsTimeRelativeLessThanTypeDefinition{ + LogsFilter: logsFilter, + Threshold: typeInt64ToWrappedUint32(timeRelativeLessThanModel.Threshold), + ComparedTo: logsTimeRelativeComparedToSchemaToProtoMap[timeRelativeLessThanModel.ComparedTo.ValueString()], + IgnoreInfinity: typeBoolToWrapperspbBool(timeRelativeLessThanModel.IgnoreInfinity), + UndetectedValuesManagement: undetectedValuesManagement, + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_TIME_RELATIVE_LESS_THAN + return properties, nil +} + +func expandMetricMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricMoreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if metricMoreThan.IsNull() || metricMoreThan.IsUnknown() { + return properties, nil + } + + var metricMoreThanModel MetricMoreThanModel + if diags := metricMoreThan.As(ctx, &metricMoreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + metricFilter, diags := extractMetricFilter(ctx, metricMoreThanModel.MetricFilter) + if diags.HasError() { + return nil, diags + } + + ofTheLast, diags := extractMetricTimeWindow(ctx, metricMoreThanModel.OfTheLast) + if diags.HasError() { + return nil, diags + } + + missingValues, diags := extractMissingValues(ctx, metricMoreThanModel.MissingValues) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricMoreThan{ + MetricMoreThan: &cxsdk.MetricMoreThanTypeDefinition{ + MetricFilter: metricFilter, + Threshold: typeFloat64ToWrapperspbFloat(metricMoreThanModel.Threshold), + ForOverPct: typeInt64ToWrappedUint32(metricMoreThanModel.ForOverPct), + OfTheLast: ofTheLast, + MissingValues: missingValues, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_MORE_THAN + + return properties, nil +} + +func extractMetricFilter(ctx context.Context, filter types.Object) (*cxsdk.MetricFilter, diag.Diagnostics) { + if filter.IsNull() || filter.IsUnknown() { + return nil, nil + } + + var filterModel MetricFilterModel + if diags := filter.As(ctx, &filterModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if promql := filterModel.Promql; !(promql.IsNull() || promql.IsUnknown()) { + return &cxsdk.MetricFilter{ + Type: &cxsdk.MetricFilter_Promql{ + Promql: typeStringToWrapperspbString(promql), + }, + }, nil + } + + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Metric Filter", "Metric Filter is not valid")} +} + +func extractMetricTimeWindow(ctx context.Context, timeWindow types.Object) (*cxsdk.MetricTimeWindow, diag.Diagnostics) { + if timeWindow.IsNull() || timeWindow.IsUnknown() { + return nil, nil + } + + var timeWindowModel MetricTimeWindowModel + if diags := timeWindow.As(ctx, &timeWindowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if specificValue := timeWindowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { + return &cxsdk.MetricTimeWindow{ + Type: &cxsdk.MetricTimeWindow_MetricTimeWindowSpecificValue{ + MetricTimeWindowSpecificValue: metricTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], + }, + }, nil + } + + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} +} + +func extractMissingValues(ctx context.Context, missingValues types.Object) (*cxsdk.MetricMissingValues, diag.Diagnostics) { + if missingValues.IsNull() || missingValues.IsUnknown() { + return nil, nil + } + + var missingValuesModel MetricMissingValuesModel + if diags := missingValues.As(ctx, &missingValuesModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + metricMissingValues := &cxsdk.MetricMissingValues{} + if replaceWithZero := missingValuesModel.ReplaceWithZero; !(replaceWithZero.IsNull() || replaceWithZero.IsUnknown()) { + metricMissingValues.MissingValues = &cxsdk.MetricMissingValues_ReplaceWithZero{ + ReplaceWithZero: typeBoolToWrapperspbBool(replaceWithZero), + } + } else if minNonNullValuesPct := missingValuesModel.MinNonNullValuesPct; !(minNonNullValuesPct.IsNull() || minNonNullValuesPct.IsUnknown()) { + metricMissingValues.MissingValues = &cxsdk.MetricMissingValues_MinNonNullValuesPct{ + MinNonNullValuesPct: typeInt64ToWrappedUint32(minNonNullValuesPct), + } + } else { + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Missing Values", "Missing Values is not valid")} + } + + return metricMissingValues, nil +} + +func expandMetricLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricLessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if metricLessThan.IsNull() || metricLessThan.IsUnknown() { + return properties, nil + } + + var metricLessThanModel MetricLessThanModel + if diags := metricLessThan.As(ctx, &metricLessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + metricFilter, diags := extractMetricFilter(ctx, metricLessThanModel.MetricFilter) + if diags.HasError() { + return nil, diags + } + + ofTheLast, diags := extractMetricTimeWindow(ctx, metricLessThanModel.OfTheLast) + if diags.HasError() { + return nil, diags + } + + missingValues, diags := extractMissingValues(ctx, metricLessThanModel.MissingValues) + if diags.HasError() { + return nil, diags + } + + undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, metricLessThanModel.UndetectedValuesManagement) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricLessThan{ + MetricLessThan: &cxsdk.MetricLessThanTypeDefinition{ + MetricFilter: metricFilter, + Threshold: typeFloat64ToWrapperspbFloat(metricLessThanModel.Threshold), + ForOverPct: typeInt64ToWrappedUint32(metricLessThanModel.ForOverPct), + OfTheLast: ofTheLast, + MissingValues: missingValues, + UndetectedValuesManagement: undetectedValuesManagement, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_LESS_THAN + + return properties, nil +} + +func expandTracingMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, tracingMoreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if tracingMoreThan.IsNull() || tracingMoreThan.IsUnknown() { + return properties, nil + } + + var tracingMoreThanModel TracingMoreThanModel + if diags := tracingMoreThan.As(ctx, &tracingMoreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + tracingQuery, diags := extractTracingFilter(ctx, tracingMoreThanModel.TracingFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, tracingMoreThanModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + timeWindow, diags := extractTracingTimeWindow(ctx, tracingMoreThanModel.TimeWindow) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_TracingMoreThan{ + TracingMoreThan: &cxsdk.TracingMoreThanTypeDefinition{ + TracingFilter: tracingQuery, + SpanAmount: typeInt64ToWrappedUint32(tracingMoreThanModel.SpanAmount), + TimeWindow: timeWindow, + NotificationPayloadFilter: notificationPayloadFilter, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_TRACING_MORE_THAN + + return properties, nil +} + +func extractTracingFilter(ctx context.Context, query types.Object) (*cxsdk.TracingFilter, diag.Diagnostics) { + if query.IsNull() || query.IsUnknown() { + return nil, nil + } + + var queryModel TracingFilterModel + if diags := query.As(ctx, &queryModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + tracingQuery := &cxsdk.TracingFilter{ + LatencyThresholdMs: typeInt64ToWrappedUint32(queryModel.LatencyThresholdMs), + } + + tracingQuery, diags := expandTracingFilters(ctx, tracingQuery, &queryModel) + if diags.HasError() { + return nil, diags + } + + return tracingQuery, nil +} + +func expandTracingFilters(ctx context.Context, query *cxsdk.TracingFilter, tracingQueryModel *TracingFilterModel) (*cxsdk.TracingFilter, diag.Diagnostics) { + if tracingQueryModel == nil { + return query, nil + } + + var diags diag.Diagnostics + if tracingLabelFilters := tracingQueryModel.TracingLabelFilters; !(tracingLabelFilters.IsNull() || tracingLabelFilters.IsUnknown()) { + query, diags = expandTracingLabelFilters(ctx, query, tracingLabelFilters) + } else { + diags = diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Tracing Label Filters", "Tracing Label Filters is not valid")} + } + + return query, diags +} + +func expandTracingLabelFilters(ctx context.Context, query *cxsdk.TracingFilter, tracingLabelFilters types.Object) (*cxsdk.TracingFilter, diag.Diagnostics) { + var filtersModel TracingLabelFiltersModel + if diags := tracingLabelFilters.As(ctx, &filtersModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + applicationName, diags := extractTracingLabelFilters(ctx, filtersModel.ApplicationName) + if diags.HasError() { + return nil, diags + } + + subsystemName, diags := extractTracingLabelFilters(ctx, filtersModel.SubsystemName) + if diags.HasError() { + return nil, diags + } + + operationName, diags := extractTracingLabelFilters(ctx, filtersModel.OperationName) + if diags.HasError() { + return nil, diags + } + + spanFields, diags := extractTracingSpanFieldsFilterType(ctx, filtersModel.SpanFields) + if diags.HasError() { + return nil, diags + } + + query.Filters = &cxsdk.TracingFilter_TracingLabelFilters{ + TracingLabelFilters: &cxsdk.TracingLabelFilters{ + ApplicationName: applicationName, + SubsystemName: subsystemName, + OperationName: operationName, + SpanFields: spanFields, + }, + } + + return query, nil +} + +func extractTracingLabelFilters(ctx context.Context, tracingLabelFilters types.Set) ([]*cxsdk.TracingFilterType, diag.Diagnostics) { + if tracingLabelFilters.IsNull() || tracingLabelFilters.IsUnknown() { + return nil, nil + } + + var filtersObjects []types.Object + diags := tracingLabelFilters.ElementsAs(ctx, &filtersObjects, true) + if diags.HasError() { + return nil, diags + } + var filters []*cxsdk.TracingFilterType + for _, filtersObject := range filtersObjects { + filter, diags := extractTracingLabelFilter(ctx, filtersObject) + if diags.HasError() { + return nil, diags + } + filters = append(filters, filter) + } + + return filters, nil +} + +func extractTracingLabelFilter(ctx context.Context, filterModelObject types.Object) (*cxsdk.TracingFilterType, diag.Diagnostics) { + var filterModel TracingFilterTypeModel + if diags := filterModelObject.As(ctx, &filterModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + values, diags := typeStringSliceToWrappedStringSlice(ctx, filterModel.Values.Elements()) + if diags.HasError() { + return nil, diags + } + + return &cxsdk.TracingFilterType{ + Values: values, + Operation: tracingFilterOperationSchemaToProtoMap[filterModel.Operation.ValueString()], + }, nil +} + +func extractTracingSpanFieldsFilterType(ctx context.Context, spanFields types.Set) ([]*cxsdk.TracingSpanFieldsFilterType, diag.Diagnostics) { + if spanFields.IsNull() || spanFields.IsUnknown() { + return nil, nil + } + + var spanFieldsObjects []types.Object + diags := spanFields.ElementsAs(ctx, &spanFieldsObjects, true) + var filters []*cxsdk.TracingSpanFieldsFilterType + for _, element := range spanFieldsObjects { + var filterModel TracingSpanFieldsFilterModel + if diags = element.As(ctx, &filterModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + filterType, diags := extractTracingLabelFilter(ctx, filterModel.FilterType) + if diags.HasError() { + return nil, diags + } + + filters = append(filters, &cxsdk.TracingSpanFieldsFilterType{ + Key: typeStringToWrapperspbString(filterModel.Key), + FilterType: filterType, + }) + } + + return filters, nil +} + +func extractTracingTimeWindow(ctx context.Context, window types.Object) (*cxsdk.TracingTimeWindow, diag.Diagnostics) { + if window.IsNull() || window.IsUnknown() { + return nil, nil + } + + var windowModel TracingTimeWindowModel + if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { + return &cxsdk.TracingTimeWindow{ + Type: &cxsdk.TracingTimeWindow_TracingTimeWindowValue{ + TracingTimeWindowValue: tracingTimeWindowSchemaToProtoMap[specificValue.ValueString()], + }, + }, nil + } + + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} + +} + +func expandMetricMoreThanUsualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricMoreThanUsual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if metricMoreThanUsual.IsNull() || metricMoreThanUsual.IsUnknown() { + return properties, nil + } + + var metricMoreThanUsualModel MetricMoreThanUsualModel + if diags := metricMoreThanUsual.As(ctx, &metricMoreThanUsualModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + metricFilter, diags := extractMetricFilter(ctx, metricMoreThanUsualModel.MetricFilter) + if diags.HasError() { + return nil, diags + } + + ofTheLast, diags := extractMetricTimeWindow(ctx, metricMoreThanUsualModel.OfTheLast) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricMoreThanUsual{ + MetricMoreThanUsual: &cxsdk.MetricMoreThanUsualTypeDefinition{ + MetricFilter: metricFilter, + Threshold: typeInt64ToWrappedUint32(metricMoreThanUsualModel.Threshold), + ForOverPct: typeInt64ToWrappedUint32(metricMoreThanUsualModel.ForOverPct), + OfTheLast: ofTheLast, + MinNonNullValuesPct: typeInt64ToWrappedUint32(metricMoreThanUsualModel.MinNonNullValuesPct), + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_MORE_THAN_USUAL + + return properties, nil +} + +func expandMetricLessThanUsualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricLessThanUsual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if metricLessThanUsual.IsNull() || metricLessThanUsual.IsUnknown() { + return properties, nil + } + + var metricLessThanUsualModel MetricLessThanUsualModel + if diags := metricLessThanUsual.As(ctx, &metricLessThanUsualModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + metricFilter, diags := extractMetricFilter(ctx, metricLessThanUsualModel.MetricFilter) + if diags.HasError() { + return nil, diags + } + + ofTheLast, diags := extractMetricTimeWindow(ctx, metricLessThanUsualModel.OfTheLast) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricLessThanUsual{ + MetricLessThanUsual: &cxsdk.MetricLessThanUsualTypeDefinition{ + MetricFilter: metricFilter, + Threshold: typeInt64ToWrappedUint32(metricLessThanUsualModel.Threshold), + ForOverPct: typeInt64ToWrappedUint32(metricLessThanUsualModel.ForOverPct), + OfTheLast: ofTheLast, + MinNonNullValuesPct: typeInt64ToWrappedUint32(metricLessThanUsualModel.MinNonNullValuesPct), + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_LESS_THAN_USUAL + + return properties, nil +} + +func expandMetricMoreThanOrEqualsAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricMoreThanOrEquals types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if metricMoreThanOrEquals.IsNull() || metricMoreThanOrEquals.IsUnknown() { + return properties, nil + } + + var metricMoreThanOrEqualsModel MetricMoreThanOrEqualsModel + if diags := metricMoreThanOrEquals.As(ctx, &metricMoreThanOrEqualsModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + metricFilter, diags := extractMetricFilter(ctx, metricMoreThanOrEqualsModel.MetricFilter) + if diags.HasError() { + return nil, diags + } + + ofTheLast, diags := extractMetricTimeWindow(ctx, metricMoreThanOrEqualsModel.OfTheLast) + if diags.HasError() { + return nil, diags + } + + missingValues, diags := extractMissingValues(ctx, metricMoreThanOrEqualsModel.MissingValues) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricMoreThanOrEquals{ + MetricMoreThanOrEquals: &cxsdk.MetricMoreThanOrEqualsTypeDefinition{ + MetricFilter: metricFilter, + Threshold: typeFloat64ToWrapperspbFloat(metricMoreThanOrEqualsModel.Threshold), + ForOverPct: typeInt64ToWrappedUint32(metricMoreThanOrEqualsModel.ForOverPct), + OfTheLast: ofTheLast, + MissingValues: missingValues, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_MORE_THAN_OR_EQUALS + return properties, nil +} + +func expandMetricLessThanOrEqualsAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, equals types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if equals.IsNull() || equals.IsUnknown() { + return properties, nil + } + + var equalsModel MetricLessThanOrEqualsModel + if diags := equals.As(ctx, &equalsModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + metricFilter, diags := extractMetricFilter(ctx, equalsModel.MetricFilter) + if diags.HasError() { + return nil, diags + } + + ofTheLast, diags := extractMetricTimeWindow(ctx, equalsModel.OfTheLast) + if diags.HasError() { + return nil, diags + } + + missingValues, diags := extractMissingValues(ctx, equalsModel.MissingValues) + if diags.HasError() { + return nil, diags + } + + undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, equalsModel.UndetectedValuesManagement) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricLessThanOrEquals{ + MetricLessThanOrEquals: &cxsdk.MetricLessThanOrEqualsTypeDefinition{ + MetricFilter: metricFilter, + Threshold: typeFloat64ToWrapperspbFloat(equalsModel.Threshold), + ForOverPct: typeInt64ToWrappedUint32(equalsModel.ForOverPct), + OfTheLast: ofTheLast, + MissingValues: missingValues, + UndetectedValuesManagement: undetectedValuesManagement, + }, + } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_LESS_THAN_OR_EQUALS + return properties, nil +} + +func expandTracingImmediateAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, tracingImmediate types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if tracingImmediate.IsNull() || tracingImmediate.IsUnknown() { + return properties, nil + } + + var tracingImmediateModel TracingImmediateModel + if diags := tracingImmediate.As(ctx, &tracingImmediateModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + tracingQuery, diags := extractTracingFilter(ctx, tracingImmediateModel.TracingFilter) + if diags.HasError() { + return nil, diags + } + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, tracingImmediateModel.NotificationPayloadFilter.Elements()) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_TracingImmediate{ + TracingImmediate: &cxsdk.TracingImmediateTypeDefinition{ + TracingFilter: tracingQuery, + NotificationPayloadFilter: notificationPayloadFilter, }, } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_TRACING_IMMEDIATE + + return properties, nil } -func flowSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "stage": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "group": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sub_alerts": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "operator": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidFlowOperator, false), - Description: fmt.Sprintf("The operator to use on the alert. can be one of %q", alertValidFlowOperator), - }, - "flow_alert": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "not": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "user_alert_id": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - }, - }, - }, - "next_operator": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(alertValidFlowOperator, false), - Description: fmt.Sprintf("The operator to use on the alert. can be one of %q", alertValidFlowOperator), - }, - }, - }, - }, - "time_window": timeSchema("Timeframe for flow stage."), - }, - }, - }, - "group_by": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, +func expandFlowAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, flow types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if flow.IsNull() || flow.IsUnknown() { + return properties, nil + } + + var flowModel FlowModel + if diags := flow.As(ctx, &flowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + stages, diags := extractFlowStages(ctx, flowModel.Stages) + if diags.HasError() { + return nil, diags + } + + properties.TypeDefinition = &cxsdk.AlertDefProperties_Flow{ + Flow: &cxsdk.FlowTypeDefinition{ + Stages: stages, + EnforceSuppression: typeBoolToWrapperspbBool(flowModel.EnforceSuppression), }, } + properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_FLOW + return properties, nil } -func resourceCoralogixAlertCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - createAlertRequest, diags := extractCreateAlertRequest(d) - if len(diags) != 0 { - return diags +func extractFlowStages(ctx context.Context, stages types.List) ([]*cxsdk.FlowStages, diag.Diagnostics) { + if stages.IsNull() || stages.IsUnknown() { + return nil, nil } - createAlertStr := protojson.Format(createAlertRequest) - log.Printf("[INFO] Creating new alert: %s", createAlertStr) - AlertResp, err := meta.(*clientset.ClientSet).Alerts().CreateAlert(ctx, createAlertRequest) - - if err != nil { - log.Printf("[ERROR] Received error: %s", err.Error()) - return diag.Errorf(formatRpcErrors(err, createAlertURL, createAlertStr)) + var stagesObjects []types.Object + diags := stages.ElementsAs(ctx, &stagesObjects, true) + if diags.HasError() { + return nil, diags } - alert := AlertResp.GetAlert() - log.Printf("[INFO] Submitted new alert: %s", protojson.Format(alert)) - d.SetId(alert.GetUniqueIdentifier().GetValue()) + var flowStages []*cxsdk.FlowStages + for _, stageObject := range stagesObjects { + stage, diags := extractFlowStage(ctx, stageObject) + if diags.HasError() { + return nil, diags + } + flowStages = append(flowStages, stage) + } - return resourceCoralogixAlertRead(ctx, d, meta) + return flowStages, nil } -func resourceCoralogixAlertRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - id := wrapperspb.String(d.Id()) - getAlertRequest := &alerts.GetAlertByUniqueIdRequest{ - Id: id, +func extractFlowStage(ctx context.Context, object types.Object) (*cxsdk.FlowStages, diag.Diagnostics) { + var stageModel FlowStageModel + if diags := object.As(ctx, &stageModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags } - log.Printf("[INFO] Reading alert %s", id) - alertResp, err := meta.(*clientset.ClientSet).Alerts().GetAlert(ctx, getAlertRequest) - if err != nil { - log.Printf("[ERROR] Received error: %s", err.Error()) - if status.Code(err) == codes.NotFound { - d.SetId("") - return diag.Diagnostics{diag.Diagnostic{ - Severity: diag.Warning, - Summary: fmt.Sprintf("Alert %q is in state, but no longer exists in Coralogix backend", id), - Detail: fmt.Sprintf("%s will be recreated when you apply", id), - }} + flowStage := &cxsdk.FlowStages{ + TimeframeMs: typeInt64ToWrappedInt64(stageModel.TimeframeMs), + TimeframeType: flowStageTimeFrameTypeSchemaToProtoMap[stageModel.TimeframeType.ValueString()], + } + + if flowStagesGroups := stageModel.FlowStagesGroups; !(flowStagesGroups.IsNull() || flowStagesGroups.IsUnknown()) { + flowStages, diags := extractFlowStagesGroups(ctx, flowStagesGroups) + if diags.HasError() { + return nil, diags } - return diag.Errorf(formatRpcErrors(err, getAlertURL, protojson.Format(getAlertRequest))) + flowStage.FlowStages = flowStages } - alert := alertResp.GetAlert() - alertStr := protojson.Format(alert) - log.Printf("[INFO] Received alert: %s", alertStr) - return setAlert(d, alert) + return flowStage, nil } -func resourceCoralogixAlertUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - req, diags := extractAlert(d) - if len(diags) != 0 { - return diags +func extractFlowStagesGroups(ctx context.Context, groups types.List) (*cxsdk.FlowStages_FlowStagesGroups, diag.Diagnostics) { + if groups.IsNull() || groups.IsUnknown() { + return nil, nil } - updateAlertRequest := &alerts.UpdateAlertByUniqueIdRequest{ - Alert: req, + var groupsObjects []types.Object + diags := groups.ElementsAs(ctx, &groupsObjects, true) + if diags.HasError() { + return nil, diags } - updateAlertStr := protojson.Format(updateAlertRequest) - log.Printf("[INFO] Updating alert %s", updateAlertStr) - alertResp, err := meta.(*clientset.ClientSet).Alerts().UpdateAlert(ctx, updateAlertRequest) - if err != nil { - log.Printf("[ERROR] Received error: %s", err.Error()) - return diag.Errorf(formatRpcErrors(err, updateAlertURL, updateAlertStr)) + + var flowStagesGroups []*cxsdk.FlowStagesGroup + for _, groupObject := range groupsObjects { + group, diags := extractFlowStagesGroup(ctx, groupObject) + if diags.HasError() { + return nil, diags + } + flowStagesGroups = append(flowStagesGroups, group) } - updateAlertStr = protojson.Format(alertResp) - log.Printf("[INFO] Submitted updated alert: %s", updateAlertStr) - d.SetId(alertResp.GetAlert().GetUniqueIdentifier().GetValue()) - return resourceCoralogixAlertRead(ctx, d, meta) + return &cxsdk.FlowStages_FlowStagesGroups{FlowStagesGroups: &cxsdk.FlowStagesGroups{ + Groups: flowStagesGroups, + }}, nil + } -func resourceCoralogixAlertDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - id := wrapperspb.String(d.Id()) - deleteAlertRequest := &alerts.DeleteAlertByUniqueIdRequest{ - Id: id, +func extractFlowStagesGroup(ctx context.Context, object types.Object) (*cxsdk.FlowStagesGroup, diag.Diagnostics) { + var groupModel FlowStagesGroupModel + if diags := object.As(ctx, &groupModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags } - log.Printf("[INFO] Deleting alert %s", id) - _, err := meta.(*clientset.ClientSet).Alerts().DeleteAlert(ctx, deleteAlertRequest) - if err != nil { - log.Printf("[ERROR] Received error: %s", err.Error()) - return diag.Errorf(formatRpcErrors(err, deleteAlertURL, protojson.Format(deleteAlertRequest))) + alertDefs, diags := extractAlertDefs(ctx, groupModel.AlertDefs) + if diags.HasError() { + return nil, diags } - log.Printf("[INFO] alert %s deleted", id) - d.SetId("") - return nil + return &cxsdk.FlowStagesGroup{ + AlertDefs: alertDefs, + NextOp: flowStagesGroupNextOpSchemaToProtoMap[groupModel.NextOp.ValueString()], + AlertsOp: flowStagesGroupAlertsOpSchemaToProtoMap[groupModel.AlertsOp.ValueString()], + }, nil + } -func extractCreateAlertRequest(d *schema.ResourceData) (*alerts.CreateAlertRequest, diag.Diagnostics) { - var diags diag.Diagnostics - enabled := wrapperspb.Bool(d.Get("enabled").(bool)) - name := wrapperspb.String(d.Get("name").(string)) - description := wrapperspb.String(d.Get("description").(string)) - severity := expandAlertSeverity(d.Get("severity").(string)) - metaLabels := extractMetaLabels(d.Get("meta_labels")) - expirationDate := expandExpirationDate(d.Get("expiration_date")) - incidentSettings := expandIncidentSettings(d.Get("incident_settings")) - notificationGroups, dgs := expandNotificationGroups(d.Get("notifications_group")) - diags = append(diags, dgs...) - if len(diags) != 0 { - return nil, diags - } - payloadFilters := expandPayloadFilters(d.Get("payload_filters")) - scheduling := expandActiveWhen(d.Get("scheduling")) - alertTypeParams, tracingAlert, dgs := expandAlertType(d) - diags = append(diags, dgs...) - if len(diags) != 0 { - return nil, diags - } - - return &alerts.CreateAlertRequest{ - Name: name, - Description: description, - IsActive: enabled, - Severity: severity, - MetaLabels: metaLabels, - Expiration: expirationDate, - NotificationGroups: notificationGroups, - IncidentSettings: incidentSettings, - NotificationPayloadFilters: payloadFilters, - ActiveWhen: scheduling, - Filters: alertTypeParams.Filters, - Condition: alertTypeParams.Condition, - TracingAlert: tracingAlert, - }, diags -} - -func extractAlert(d *schema.ResourceData) (*alerts.Alert, diag.Diagnostics) { - var diags diag.Diagnostics - id := wrapperspb.String(d.Id()) - enabled := wrapperspb.Bool(d.Get("enabled").(bool)) - name := wrapperspb.String(d.Get("name").(string)) - description := wrapperspb.String(d.Get("description").(string)) - severity := expandAlertSeverity(d.Get("severity").(string)) - metaLabels := extractMetaLabels(d.Get("meta_labels")) - expirationDate := expandExpirationDate(d.Get("expiration_date")) - incidentSettings := expandIncidentSettings(d.Get("incident_settings")) - notificationGroups, dgs := expandNotificationGroups(d.Get("notifications_group")) - diags = append(diags, dgs...) - payloadFilters := expandPayloadFilters(d.Get("payload_filters")) - scheduling := expandActiveWhen(d.Get("scheduling")) - alertTypeParams, tracingAlert, dgs := expandAlertType(d) - diags = append(diags, dgs...) - if len(diags) != 0 { +func extractAlertDefs(ctx context.Context, defs types.List) ([]*cxsdk.FlowStagesGroupsAlertDefs, diag.Diagnostics) { + if defs.IsNull() || defs.IsUnknown() { + return nil, nil + } + + var defsObjects []types.Object + diags := defs.ElementsAs(ctx, &defsObjects, true) + if diags.HasError() { return nil, diags } - return &alerts.Alert{ - UniqueIdentifier: id, - Name: name, - Description: description, - IsActive: enabled, - Severity: severity, - MetaLabels: metaLabels, - Expiration: expirationDate, - IncidentSettings: incidentSettings, - NotificationGroups: notificationGroups, - NotificationPayloadFilters: payloadFilters, - ActiveWhen: scheduling, - Filters: alertTypeParams.Filters, - Condition: alertTypeParams.Condition, - TracingAlert: tracingAlert, - }, diags + var alertDefs []*cxsdk.FlowStagesGroupsAlertDefs + for _, defObject := range defsObjects { + def, diags := extractAlertDef(ctx, defObject) + if diags.HasError() { + return nil, diags + } + alertDefs = append(alertDefs, def) + } + + return alertDefs, nil + } -func expandPayloadFilters(v interface{}) []*wrapperspb.StringValue { - return interfaceSliceToWrappedStringSlice(v.(*schema.Set).List()) +func extractAlertDef(ctx context.Context, def types.Object) (*cxsdk.FlowStagesGroupsAlertDefs, diag.Diagnostics) { + var defModel FlowStagesGroupsAlertDefsModel + if diags := def.As(ctx, &defModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags + } + + return &cxsdk.FlowStagesGroupsAlertDefs{ + Id: typeStringToWrapperspbString(defModel.Id), + Not: typeBoolToWrapperspbBool(defModel.Not), + }, nil + } -func setAlert(d *schema.ResourceData, alert *alerts.Alert) diag.Diagnostics { - if err := d.Set("name", alert.GetName().GetValue()); err != nil { - return diag.FromErr(err) +func (r *AlertResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state *AlertResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return } - if err := d.Set("description", alert.GetDescription().GetValue()); err != nil { - return diag.FromErr(err) + //Get refreshed Alert value from Coralogix + id := state.ID.ValueString() + log.Printf("[INFO] Reading Alert: %s", id) + getAlertReq := &cxsdk.GetAlertDefRequest{Id: wrapperspb.String(id)} + getAlertResp, err := r.client.Get(ctx, getAlertReq) + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + if status.Code(err) == codes.NotFound { + resp.Diagnostics.AddWarning( + fmt.Sprintf("Alert %q is in state, but no longer exists in Coralogix backend", id), + fmt.Sprintf("%s will be recreated when you apply", id), + ) + resp.State.RemoveResource(ctx) + } else { + resp.Diagnostics.AddError( + "Error reading Alert", + formatRpcErrors(err, getAlertURL, protojson.Format(getAlertReq)), + ) + } + return } + alert := getAlertResp.GetAlertDef() + log.Printf("[INFO] Received Alert: %s", protojson.Format(alert)) - if err := d.Set("enabled", alert.GetIsActive().GetValue()); err != nil { - return diag.FromErr(err) + state, diags = flattenAlert(ctx, alert) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return } - if err := d.Set("severity", flattenAlertSeverity(alert.GetSeverity().String())); err != nil { - return diag.FromErr(err) + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func flattenAlert(ctx context.Context, alert *cxsdk.AlertDef) (*AlertResourceModel, diag.Diagnostics) { + alertProperties := alert.GetAlertDefProperties() + alertSchedule, diags := flattenAlertSchedule(ctx, alertProperties) + if diags.HasError() { + return nil, diags } - if err := d.Set("meta_labels", flattenMetaLabels(alert.GetMetaLabels())); err != nil { - return diag.FromErr(err) + alertTypeDefinition, diags := flattenAlertTypeDefinition(ctx, alertProperties) + if diags.HasError() { + return nil, diags } - if err := d.Set("expiration_date", flattenExpirationDate(alert.GetExpiration())); err != nil { - return diag.FromErr(err) + incidentsSettings, diags := flattenIncidentsSettings(ctx, alertProperties.GetIncidentsSettings()) + if diags.HasError() { + return nil, diags } - incidentSettings := flattenIncidentSettings(alert.GetIncidentSettings()) - if err := d.Set("incident_settings", incidentSettings); err != nil { - return diag.FromErr(err) + notificationGroup, diags := flattenNotificationGroup(ctx, alertProperties.GetNotificationGroup()) + if diags.HasError() { + return nil, diags } - if err := d.Set("notifications_group", flattenNotificationGroups(alert.GetNotificationGroups(), incidentSettings != nil)); err != nil { - return diag.FromErr(err) + labels, diags := types.MapValueFrom(ctx, types.StringType, alertProperties.GetLabels()) + + return &AlertResourceModel{ + ID: wrapperspbStringToTypeString(alert.GetId()), + Name: wrapperspbStringToTypeString(alertProperties.GetName()), + Description: wrapperspbStringToTypeString(alertProperties.GetDescription()), + Enabled: wrapperspbBoolToTypeBool(alertProperties.GetEnabled()), + Priority: types.StringValue(alertPriorityProtoToSchemaMap[alertProperties.GetPriority()]), + Schedule: alertSchedule, + TypeDefinition: alertTypeDefinition, + GroupBy: wrappedStringSliceToTypeStringSet(alertProperties.GetGroupBy()), + IncidentsSettings: incidentsSettings, + NotificationGroup: notificationGroup, + Labels: labels, + }, nil +} + +func flattenNotificationGroup(ctx context.Context, notificationGroup *cxsdk.AlertDefNotificationGroup) (types.Object, diag.Diagnostics) { + if notificationGroup == nil { + return types.ObjectNull(notificationGroupAttr()), nil } - if err := d.Set("payload_filters", wrappedStringSliceToStringSlice(alert.GetNotificationPayloadFilters())); err != nil { - return diag.FromErr(err) + advancedTargetSettings, diags := flattenAdvancedTargetSettings(ctx, notificationGroup.GetAdvanced()) + if diags.HasError() { + return types.ObjectNull(notificationGroupAttr()), diags } - if err := d.Set("scheduling", flattenScheduling(d, alert.GetActiveWhen())); err != nil { - return diag.FromErr(err) + simpleTargetSettings, diags := flattenSimpleTargetSettings(ctx, notificationGroup.GetSimple()) + if diags.HasError() { + return types.ObjectNull(notificationGroupAttr()), diags } - alertType, alertTypeParams := flattenAlertType(alert) - if err := d.Set(alertType, alertTypeParams); err != nil { - return diag.FromErr(err) + notificationGroupModel := NotificationGroupModel{ + GroupByFields: wrappedStringSliceToTypeStringList(notificationGroup.GetGroupByFields()), + AdvancedTargetSettings: advancedTargetSettings, + SimpleTargetSettings: simpleTargetSettings, } - return nil + return types.ObjectValueFrom(ctx, notificationGroupAttr(), notificationGroupModel) } -func flattenIncidentSettings(settings *alerts.AlertIncidentSettings) interface{} { - if settings == nil { - return nil +func flattenAdvancedTargetSettings(ctx context.Context, advancedTargetSettings *cxsdk.AlertDefAdvancedTargets) (types.Set, diag.Diagnostics) { + if advancedTargetSettings == nil { + return types.SetNull(types.ObjectType{AttrTypes: advancedTargetSettingsAttr()}), nil } - if !settings.GetUseAsNotificationSettings().GetValue() { - return nil + + var notificationsModel []*AdvancedTargetSettingsModel + var diags diag.Diagnostics + for _, notification := range advancedTargetSettings.GetAdvancedTargetsSettings() { + retriggeringPeriod, dgs := flattenRetriggeringPeriod(ctx, notification) + if dgs.HasError() { + diags.Append(dgs...) + continue + } + notificationModel := AdvancedTargetSettingsModel{ + NotifyOn: types.StringValue(notifyOnProtoToSchemaMap[notification.GetNotifyOn()]), + RetriggeringPeriod: retriggeringPeriod, + IntegrationID: types.StringNull(), + Recipients: types.SetNull(types.StringType), + } + switch integrationType := notification.GetIntegration(); integrationType.GetIntegrationType().(type) { + case *cxsdk.IntegrationType_IntegrationId: + notificationModel.IntegrationID = types.StringValue(strconv.Itoa(int(integrationType.GetIntegrationId().GetValue()))) + case *cxsdk.IntegrationType_Recipients: + notificationModel.Recipients = wrappedStringSliceToTypeStringSet(integrationType.GetRecipients().GetEmails()) + } + notificationsModel = append(notificationsModel, ¬ificationModel) } - return []interface{}{ - map[string]interface{}{ - "retriggering_period_minutes": int(settings.GetRetriggeringPeriodSeconds().GetValue() / 60), - "notify_on": alertProtoNotifyOnToSchemaNotifyOn[settings.GetNotifyOn()], - }, + + if diags.HasError() { + return types.SetNull(types.ObjectType{AttrTypes: advancedTargetSettingsAttr()}), diags } -} -func flattenAlertSeverity(str string) string { - return alertProtoSeverityToSchemaSeverity[str] + return types.SetValueFrom(ctx, types.ObjectType{AttrTypes: advancedTargetSettingsAttr()}, notificationsModel) } -func flattenMetaLabels(labels []*alerts.MetaLabel) interface{} { - result := make(map[string]interface{}) - for _, l := range labels { - key := l.GetKey().GetValue() - val := l.GetValue().GetValue() - result[key] = val +func flattenRetriggeringPeriod(ctx context.Context, notifications *cxsdk.AlertDefAdvancedTargetSettings) (types.Object, diag.Diagnostics) { + switch notificationPeriodType := notifications.RetriggeringPeriod.(type) { + case *cxsdk.AlertDefAdvancedTargetSettings_Minutes: + return types.ObjectValueFrom(ctx, retriggeringPeriodAttr(), RetriggeringPeriodModel{ + Minutes: wrapperspbUint32ToTypeInt64(notificationPeriodType.Minutes), + }) + case nil: + return types.ObjectNull(retriggeringPeriodAttr()), nil + default: + return types.ObjectNull(retriggeringPeriodAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Retriggering Period", fmt.Sprintf("Retriggering Period %v is not supported", notificationPeriodType))} } - return result } -func flattenNotificationGroups(notificationGroups []*alerts.AlertNotificationGroups, incidentSettingsConfigured bool) interface{} { - result := make([]interface{}, 0, len(notificationGroups)) - for _, group := range notificationGroups { - notificationGroup := flattenNotificationGroup(group, incidentSettingsConfigured) - result = append(result, notificationGroup) +func flattenSimpleTargetSettings(ctx context.Context, simpleTargetSettings *cxsdk.AlertDefTargetSimple) (types.Set, diag.Diagnostics) { + if simpleTargetSettings == nil { + return types.SetNull(types.ObjectType{AttrTypes: simpleTargetSettingsAttr()}), nil } - return result -} -func flattenNotificationGroup(notificationGroup *alerts.AlertNotificationGroups, incidentSettingsConfigured bool) interface{} { - groupByFields := wrappedStringSliceToStringSlice(notificationGroup.GetGroupByFields()) - notifications := flattenNotifications(notificationGroup.GetNotifications(), incidentSettingsConfigured) - return map[string]interface{}{ - "group_by_fields": groupByFields, - "notification": notifications, + var notificationsModel []SimpleTargetSettingsModel + for _, notification := range simpleTargetSettings.GetIntegrations() { + notificationModel := SimpleTargetSettingsModel{ + IntegrationID: types.StringNull(), + Recipients: types.SetNull(types.StringType), + } + switch notification.GetIntegrationType().(type) { + case *cxsdk.IntegrationType_IntegrationId: + notificationModel.IntegrationID = types.StringValue(strconv.Itoa(int(notification.GetIntegrationId().GetValue()))) + case *cxsdk.IntegrationType_Recipients: + notificationModel.Recipients = wrappedStringSliceToTypeStringSet(notification.GetRecipients().GetEmails()) + } + notificationsModel = append(notificationsModel, notificationModel) } + return types.SetValueFrom(ctx, types.ObjectType{AttrTypes: simpleTargetSettingsAttr()}, notificationsModel) } -func flattenNotifications(notifications []*alerts.AlertNotification, incidentSettingsConfigured bool) interface{} { - result := make([]interface{}, 0, len(notifications)) - for _, n := range notifications { - notificationSubgroup := flattenNotificationSubgroup(n, incidentSettingsConfigured) - result = append(result, notificationSubgroup) +func flattenIncidentsSettings(ctx context.Context, incidentsSettings *cxsdk.AlertDefIncidentSettings) (types.Object, diag.Diagnostics) { + if incidentsSettings == nil { + return types.ObjectNull(incidentsSettingsAttr()), nil } - return result -} -func flattenNotificationSubgroup(notification *alerts.AlertNotification, incidentSettingsConfigured bool) interface{} { - notificationSchema := map[string]interface{}{} - if !incidentSettingsConfigured { - notificationSchema["retriggering_period_minutes"] = int(notification.GetRetriggeringPeriodSeconds().GetValue() / 60) - notificationSchema["notify_on"] = alertProtoNotifyOnToSchemaNotifyOn[notification.GetNotifyOn()] - } - switch integration := notification.GetIntegrationType().(type) { - case *alerts.AlertNotification_IntegrationId: - notificationSchema["integration_id"] = strconv.Itoa(int(integration.IntegrationId.GetValue())) - case *alerts.AlertNotification_Recipients: - notificationSchema["email_recipients"] = wrappedStringSliceToStringSlice(integration.Recipients.Emails) + retriggeringPeriod, diags := flattenIncidentsSettingsByRetriggeringPeriod(ctx, incidentsSettings) + if diags.HasError() { + return types.ObjectNull(incidentsSettingsAttr()), diags } - return notificationSchema + incidentsSettingsModel := IncidentsSettingsModel{ + NotifyOn: types.StringValue(notifyOnProtoToSchemaMap[incidentsSettings.GetNotifyOn()]), + RetriggeringPeriod: retriggeringPeriod, + } + return types.ObjectValueFrom(ctx, incidentsSettingsAttr(), incidentsSettingsModel) } -func flattenScheduling(d *schema.ResourceData, activeWhen *alerts.AlertActiveWhen) interface{} { - scheduling, ok := d.GetOk("scheduling") - if !ok || activeWhen == nil { - return nil +func flattenIncidentsSettingsByRetriggeringPeriod(ctx context.Context, settings *cxsdk.AlertDefIncidentSettings) (types.Object, diag.Diagnostics) { + if settings.RetriggeringPeriod == nil { + return types.ObjectNull(retriggeringPeriodAttr()), nil } - timeZone := scheduling.([]interface{})[0].(map[string]interface{})["time_zone"].(string) - - timeFrames := flattenTimeFrames(activeWhen, timeZone) + var periodModel RetriggeringPeriodModel + switch period := settings.RetriggeringPeriod.(type) { + case *cxsdk.AlertDefIncidentSettings_Minutes: + periodModel.Minutes = wrapperspbUint32ToTypeInt64(period.Minutes) + default: + return types.ObjectNull(retriggeringPeriodAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Retriggering Period", fmt.Sprintf("Retriggering Period %v is not supported", period))} + } + + return types.ObjectValueFrom(ctx, retriggeringPeriodAttr(), periodModel) +} + +func flattenAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties) (types.Object, diag.Diagnostics) { + if properties.TypeDefinition == nil { + return types.ObjectNull(alertTypeDefinitionAttr()), nil + } + + alertTypeDefinitionModel := AlertTypeDefinitionModel{ + LogsImmediate: types.ObjectNull(logsImmediateAttr()), + LogsMoreThan: types.ObjectNull(logsMoreThanAttr()), + LogsLessThan: types.ObjectNull(logsLessThanAttr()), + LogsMoreThanUsual: types.ObjectNull(logsMoreThanUsualAttr()), + LogsRatioMoreThan: types.ObjectNull(logsRatioMoreThanAttr()), + LogsRatioLessThan: types.ObjectNull(logsRatioLessThanAttr()), + LogsNewValue: types.ObjectNull(logsNewValueAttr()), + LogsUniqueCount: types.ObjectNull(logsUniqueCountAttr()), + LogsTimeRelativeMoreThan: types.ObjectNull(logsTimeRelativeMoreThanAttr()), + LogsTimeRelativeLessThan: types.ObjectNull(logsTimeRelativeLessThanAttr()), + MetricMoreThan: types.ObjectNull(metricMoreThanAttr()), + MetricLessThan: types.ObjectNull(metricLessThanAttr()), + MetricMoreThanUsual: types.ObjectNull(metricMoreThanUsualAttr()), + MetricLessThanUsual: types.ObjectNull(metricLessThanUsualAttr()), + MetricLessThanOrEquals: types.ObjectNull(metricLessThanOrEqualsAttr()), + MetricMoreThanOrEquals: types.ObjectNull(metricMoreThanOrEqualsAttr()), + TracingImmediate: types.ObjectNull(tracingImmediateAttr()), + TracingMoreThan: types.ObjectNull(tracingMoreThanAttr()), + Flow: types.ObjectNull(flowAttr()), + } + var diags diag.Diagnostics + switch alertTypeDefinition := properties.TypeDefinition.(type) { + case *cxsdk.AlertDefProperties_LogsImmediate: + alertTypeDefinitionModel.LogsImmediate, diags = flattenLogsImmediate(ctx, alertTypeDefinition.LogsImmediate) + case *cxsdk.AlertDefProperties_LogsMoreThan: + alertTypeDefinitionModel.LogsMoreThan, diags = flattenLogsMoreThan(ctx, alertTypeDefinition.LogsMoreThan) + case *cxsdk.AlertDefProperties_LogsLessThan: + alertTypeDefinitionModel.LogsLessThan, diags = flattenLogsLessThan(ctx, alertTypeDefinition.LogsLessThan) + case *cxsdk.AlertDefProperties_LogsMoreThanUsual: + alertTypeDefinitionModel.LogsMoreThanUsual, diags = flattenLogsMoreThanUsual(ctx, alertTypeDefinition.LogsMoreThanUsual) + case *cxsdk.AlertDefProperties_LogsRatioMoreThan: + alertTypeDefinitionModel.LogsRatioMoreThan, diags = flattenLogsRatioMoreThan(ctx, alertTypeDefinition.LogsRatioMoreThan) + case *cxsdk.AlertDefProperties_LogsRatioLessThan: + alertTypeDefinitionModel.LogsRatioLessThan, diags = flattenLogsRatioLessThan(ctx, alertTypeDefinition.LogsRatioLessThan) + case *cxsdk.AlertDefProperties_LogsNewValue: + alertTypeDefinitionModel.LogsNewValue, diags = flattenLogsNewValue(ctx, alertTypeDefinition.LogsNewValue) + case *cxsdk.AlertDefProperties_LogsUniqueCount: + alertTypeDefinitionModel.LogsUniqueCount, diags = flattenLogsUniqueCount(ctx, alertTypeDefinition.LogsUniqueCount) + case *cxsdk.AlertDefProperties_LogsTimeRelativeMoreThan: + alertTypeDefinitionModel.LogsTimeRelativeMoreThan, diags = flattenLogsTimeRelativeMoreThan(ctx, alertTypeDefinition.LogsTimeRelativeMoreThan) + case *cxsdk.AlertDefProperties_LogsTimeRelativeLessThan: + alertTypeDefinitionModel.LogsTimeRelativeLessThan, diags = flattenLogsTimeRelativeLessThan(ctx, alertTypeDefinition.LogsTimeRelativeLessThan) + case *cxsdk.AlertDefProperties_MetricMoreThan: + alertTypeDefinitionModel.MetricMoreThan, diags = flattenMetricMoreThan(ctx, alertTypeDefinition.MetricMoreThan) + case *cxsdk.AlertDefProperties_MetricLessThan: + alertTypeDefinitionModel.MetricLessThan, diags = flattenMetricLessThan(ctx, alertTypeDefinition.MetricLessThan) + case *cxsdk.AlertDefProperties_MetricMoreThanUsual: + alertTypeDefinitionModel.MetricMoreThanUsual, diags = flattenMetricMoreThanUsual(ctx, alertTypeDefinition.MetricMoreThanUsual) + case *cxsdk.AlertDefProperties_MetricLessThanUsual: + alertTypeDefinitionModel.MetricLessThanUsual, diags = flattenMetricLessThanUsual(ctx, alertTypeDefinition.MetricLessThanUsual) + case *cxsdk.AlertDefProperties_MetricLessThanOrEquals: + alertTypeDefinitionModel.MetricLessThanOrEquals, diags = flattenMetricLessThanOrEquals(ctx, alertTypeDefinition.MetricLessThanOrEquals) + case *cxsdk.AlertDefProperties_MetricMoreThanOrEquals: + alertTypeDefinitionModel.MetricMoreThanOrEquals, diags = flattenMetricMoreThanOrEquals(ctx, alertTypeDefinition.MetricMoreThanOrEquals) + case *cxsdk.AlertDefProperties_TracingImmediate: + alertTypeDefinitionModel.TracingImmediate, diags = flattenTracingImmediate(ctx, alertTypeDefinition.TracingImmediate) + case *cxsdk.AlertDefProperties_TracingMoreThan: + alertTypeDefinitionModel.TracingMoreThan, diags = flattenTracingMoreThan(ctx, alertTypeDefinition.TracingMoreThan) + case *cxsdk.AlertDefProperties_Flow: + alertTypeDefinitionModel.Flow, diags = flattenFlow(ctx, alertTypeDefinition.Flow) + default: + return types.ObjectNull(alertTypeDefinitionAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Alert Type Definition", fmt.Sprintf("Alert Type %v Definition is not valid", alertTypeDefinition))} + } - return []interface{}{ - map[string]interface{}{ - "time_zone": timeZone, - "time_frame": timeFrames, - }, + if diags.HasError() { + return types.ObjectNull(alertTypeDefinitionAttr()), diags } + + return types.ObjectValueFrom(ctx, alertTypeDefinitionAttr(), alertTypeDefinitionModel) } -func flattenTimeFrames(activeWhen *alerts.AlertActiveWhen, timeZone string) interface{} { - timeFrames := activeWhen.GetTimeframes() - utc := flattenUtc(timeZone) - result := schema.NewSet(hashTimeFrames(), []interface{}{}) - for _, tf := range timeFrames { - m := flattenTimeFrame(tf, utc) - result.Add(m) +func flattenLogsImmediate(ctx context.Context, immediate *cxsdk.LogsImmediateTypeDefinition) (types.Object, diag.Diagnostics) { + if immediate == nil { + return types.ObjectNull(logsImmediateAttr()), nil } - return result -} -func flattenUtc(timeZone string) int32 { - utcStr := strings.Split(timeZone, "UTC")[1] - utc, _ := strconv.Atoi(utcStr) - return int32(utc) + logsFilter, diags := flattenAlertsLogsFilter(ctx, immediate.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsImmediateAttr()), diags + } + + logsImmediateModel := LogsImmediateModel{ + LogsFilter: logsFilter, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(immediate.GetNotificationPayloadFilter()), + } + return types.ObjectValueFrom(ctx, logsImmediateAttr(), logsImmediateModel) } -func flattenTimeFrame(tf *alerts.AlertActiveTimeframe, utc int32) map[string]interface{} { - tr := tf.GetRange() - activityStartGMT, activityEndGMT := tr.GetStart(), tr.GetEnd() - daysOffset := getDaysOffsetFromGMT(activityStartGMT, utc) - activityStartUTC := flattenTimeInDay(activityStartGMT, utc) - activityEndUTC := flattenTimeInDay(activityEndGMT, utc) - daysOfWeek := flattenDaysOfWeek(tf.GetDaysOfWeek(), daysOffset) +func flattenAlertsLogsFilter(ctx context.Context, filter *cxsdk.LogsFilter) (types.Object, diag.Diagnostics) { + if filter == nil { + return types.ObjectNull(logsFilterAttr()), nil + } - return map[string]interface{}{ - "days_enabled": daysOfWeek, - "start_time": activityStartUTC, - "end_time": activityEndUTC, + var diags diag.Diagnostics + var logsFilterModer AlertsLogsFilterModel + switch filterType := filter.FilterType.(type) { + case *cxsdk.LogsFilter_LuceneFilter: + logsFilterModer.LuceneFilter, diags = flattenLuceneFilter(ctx, filterType.LuceneFilter) + default: + return types.ObjectNull(logsFilterAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Logs Filter", fmt.Sprintf("Logs Filter %v is not supported", filterType))} } -} -func getDaysOffsetFromGMT(activityStartGMT *alerts.Time, utc int32) int32 { - daysOffset := int32(activityStartGMT.GetHours()+utc) / 24 - if daysOffset < 0 { - daysOffset += 7 + if diags.HasError() { + return types.ObjectNull(logsFilterAttr()), diags } - return daysOffset + return types.ObjectValueFrom(ctx, logsFilterAttr(), logsFilterModer) } -func flattenTimeInDay(t *alerts.Time, utc int32) string { - hours := convertGmtToUtc(t.GetHours(), utc) - hoursStr := toTwoDigitsFormat(hours) - minStr := toTwoDigitsFormat(t.GetMinutes()) - return fmt.Sprintf("%s:%s", hoursStr, minStr) -} +func flattenLuceneFilter(ctx context.Context, filter *cxsdk.LuceneFilter) (types.Object, diag.Diagnostics) { + if filter == nil { + return types.ObjectNull(luceneFilterAttr()), nil + } -func flattenDaysOfWeek(daysOfWeek []alerts.DayOfWeek, daysOffset int32) interface{} { - result := schema.NewSet(schema.HashString, []interface{}{}) - for _, d := range daysOfWeek { - dayConvertedFromGmtToUtc := alerts.DayOfWeek((int32(d) + daysOffset) % 7) - day := alertProtoDayOfWeekToSchemaDayOfWeek[dayConvertedFromGmtToUtc.String()] - result.Add(day) + labelFilters, diags := flattenLabelFilters(ctx, filter.GetLabelFilters()) + if diags.HasError() { + return types.ObjectNull(luceneFilterAttr()), diags } - return result + + return types.ObjectValueFrom(ctx, luceneFilterAttr(), LuceneFilterModel{ + LuceneQuery: wrapperspbStringToTypeString(filter.GetLuceneQuery()), + LabelFilters: labelFilters, + }) } -func flattenAlertType(a *alerts.Alert) (alertType string, alertSchema interface{}) { - filters := a.GetFilters() - condition := a.GetCondition().GetCondition() +func flattenLabelFilters(ctx context.Context, filters *cxsdk.LabelFilters) (types.Object, diag.Diagnostics) { + if filters == nil { + return types.ObjectNull(labelFiltersAttr()), nil + } - switch filters.GetFilterType() { - case alerts.AlertFilters_FILTER_TYPE_TEXT_OR_UNSPECIFIED: - if _, ok := condition.(*alerts.AlertCondition_NewValue); ok { - alertType = "new_value" - alertSchema = flattenNewValueAlert(filters, condition) - } else { - alertType = "standard" - alertSchema = flattenStandardAlert(filters, condition) - } - case alerts.AlertFilters_FILTER_TYPE_RATIO: - alertType = "ratio" - alertSchema = flattenRatioAlert(filters, condition) - case alerts.AlertFilters_FILTER_TYPE_UNIQUE_COUNT: - alertType = "unique_count" - alertSchema = flattenUniqueCountAlert(filters, condition) - case alerts.AlertFilters_FILTER_TYPE_TIME_RELATIVE: - alertType = "time_relative" - alertSchema = flattenTimeRelativeAlert(filters, condition) - case alerts.AlertFilters_FILTER_TYPE_METRIC: - alertType = "metric" - alertSchema = flattenMetricAlert(filters, condition) - case alerts.AlertFilters_FILTER_TYPE_TRACING: - alertType = "tracing" - alertSchema = flattenTracingAlert(condition, a.TracingAlert) - case alerts.AlertFilters_FILTER_TYPE_FLOW: - alertType = "flow" - alertSchema = flattenFlowAlert(condition) - } - - return -} - -func flattenNewValueAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { - alertSchema := flattenCommonAlert(filters) - conditionMap := flattenNewValueCondition(condition) - alertSchema["condition"] = []interface{}{conditionMap} - return []interface{}{alertSchema} -} - -func flattenNewValueCondition(condition interface{}) interface{} { - conditionParams := condition.(*alerts.AlertCondition_NewValue).NewValue.GetParameters() - return map[string]interface{}{ - "time_window": alertProtoNewValueTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], - "key_to_track": conditionParams.GetGroupBy()[0].GetValue(), - } -} - -func flattenStandardAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { - alertSchemaMap := flattenCommonAlert(filters) - conditionSchema := flattenStandardCondition(condition) - alertSchemaMap["condition"] = conditionSchema - return []interface{}{alertSchemaMap} -} - -func flattenStandardCondition(condition interface{}) (conditionSchema interface{}) { - var conditionParams *alerts.ConditionParameters - switch condition := condition.(type) { - case *alerts.AlertCondition_Immediate: - conditionSchema = []interface{}{ - map[string]interface{}{ - "immediately": true, - }, - } - case *alerts.AlertCondition_LessThan: - conditionParams = condition.LessThan.GetParameters() - groupBy := wrappedStringSliceToStringSlice(conditionParams.GroupBy) - m := map[string]interface{}{ - "less_than": true, - "threshold": int(conditionParams.GetThreshold().GetValue()), - "group_by": groupBy, - "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.Timeframe.String()], - } + applicationName, diags := flattenLabelFilterTypes(ctx, filters.GetApplicationName()) + if diags.HasError() { + return types.ObjectNull(labelFiltersAttr()), diags + } - if len(groupBy) > 0 { - m["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) - } + subsystemName, diags := flattenLabelFilterTypes(ctx, filters.GetSubsystemName()) + if diags.HasError() { + return types.ObjectNull(labelFiltersAttr()), diags + } - conditionSchema = []interface{}{m} - case *alerts.AlertCondition_MoreThan: - conditionParams = condition.MoreThan.GetParameters() - conditionSchema = []interface{}{ - map[string]interface{}{ - "more_than": true, - "threshold": int(conditionParams.GetThreshold().GetValue()), - "group_by": wrappedStringSliceToStringSlice(conditionParams.GroupBy), - "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.Timeframe.String()], - "evaluation_window": alertProtoToSchemaEvaluationWindow[condition.MoreThan.GetEvaluationWindow()], - }, - } - case *alerts.AlertCondition_MoreThanUsual: - conditionParams = condition.MoreThanUsual.GetParameters() - conditionMap := map[string]interface{}{ - "more_than_usual": true, - "threshold": int(conditionParams.GetThreshold().GetValue()), - "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], - "group_by": wrappedStringSliceToStringSlice(conditionParams.GroupBy), - } - conditionSchema = []interface{}{ - conditionMap, - } + severities, diags := flattenLogSeverities(ctx, filters.GetSeverities()) + if diags.HasError() { + return types.ObjectNull(labelFiltersAttr()), diags } - return + return types.ObjectValueFrom(ctx, labelFiltersAttr(), LabelFiltersModel{ + ApplicationName: applicationName, + SubsystemName: subsystemName, + Severities: severities, + }) } -func flattenManageUndetectedValues(data *alerts.RelatedExtendedData) interface{} { - if data == nil { - return []map[string]interface{}{ - { - "enable_triggering_on_undetected_values": true, - "auto_retire_ratio": flattenDeadmanRatio(alerts.CleanupDeadmanDuration_CLEANUP_DEADMAN_DURATION_NEVER_OR_UNSPECIFIED), - }, - } - } else if data.GetShouldTriggerDeadman().GetValue() { - return []map[string]interface{}{ - { - "enable_triggering_on_undetected_values": true, - "auto_retire_ratio": flattenDeadmanRatio(data.GetCleanupDeadmanDuration()), - }, +func flattenLabelFilterTypes(ctx context.Context, name []*cxsdk.LabelFilterType) (types.Set, diag.Diagnostics) { + var labelFilterTypes []LabelFilterTypeModel + var diags diag.Diagnostics + for _, lft := range name { + labelFilterType := LabelFilterTypeModel{ + Value: wrapperspbStringToTypeString(lft.GetValue()), + Operation: types.StringValue(logFilterOperationTypeProtoToSchemaMap[lft.GetOperation()]), } + labelFilterTypes = append(labelFilterTypes, labelFilterType) } - - return []map[string]interface{}{ - { - "enable_triggering_on_undetected_values": false, - }, + if diags.HasError() { + return types.SetNull(types.ObjectType{AttrTypes: labelFilterTypesAttr()}), diags } + return types.SetValueFrom(ctx, types.ObjectType{AttrTypes: labelFilterTypesAttr()}, labelFilterTypes) + } -func flattenDeadmanRatio(cleanupDeadmanDuration alerts.CleanupDeadmanDuration) string { - deadmanRatioStr := alerts.CleanupDeadmanDuration_name[int32(cleanupDeadmanDuration)] - deadmanRatio := alertProtoDeadmanRatiosToSchemaDeadmanRatios[deadmanRatioStr] - return deadmanRatio +func flattenLogSeverities(ctx context.Context, severities []cxsdk.LogSeverity) (types.Set, diag.Diagnostics) { + var result []attr.Value + for _, severity := range severities { + result = append(result, types.StringValue(logSeverityProtoToSchemaMap[severity])) + } + return types.SetValueFrom(ctx, types.StringType, result) } -func flattenRatioAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { - query1Map := flattenCommonAlert(filters) - query1Map["alias"] = filters.GetAlias().GetValue() - query2 := filters.GetRatioAlerts()[0] - query2Map := flattenQuery2ParamsMap(query2) - conditionMap := flattenRatioCondition(condition, query2) +func flattenLogsMoreThan(ctx context.Context, moreThan *cxsdk.LogsMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { + if moreThan == nil { + return types.ObjectNull(logsMoreThanAttr()), nil + } + + logsFilter, diags := flattenAlertsLogsFilter(ctx, moreThan.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsMoreThanAttr()), diags + } - return []interface{}{ - map[string]interface{}{ - "query_1": []interface{}{query1Map}, - "query_2": []interface{}{query2Map}, - "condition": []interface{}{conditionMap}, - }, + timeWindow, diags := flattenLogsTimeWindow(ctx, moreThan.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(logsMoreThanAttr()), diags + } + + logsMoreThanModel := LogsMoreThanModel{ + LogsFilter: logsFilter, + Threshold: wrapperspbUint32ToTypeInt64(moreThan.GetThreshold()), + TimeWindow: timeWindow, + EvaluationWindow: types.StringValue(evaluationWindowTypeProtoToSchemaMap[moreThan.GetEvaluationWindow()]), + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(moreThan.GetNotificationPayloadFilter()), } + return types.ObjectValueFrom(ctx, logsMoreThanAttr(), logsMoreThanModel) } -func flattenRatioCondition(condition interface{}, query2 *alerts.AlertFilters_RatioAlert) interface{} { - var conditionParams *alerts.ConditionParameters - ratioParamsMap := make(map[string]interface{}) +func flattenLogsTimeWindow(ctx context.Context, timeWindow *cxsdk.LogsTimeWindow) (types.Object, diag.Diagnostics) { + if timeWindow == nil { + return types.ObjectNull(logsTimeWindowAttr()), nil + } - lessThan := false - switch condition := condition.(type) { - case *alerts.AlertCondition_LessThan: - conditionParams = condition.LessThan.GetParameters() - ratioParamsMap["less_than"] = true - lessThan = true - case *alerts.AlertCondition_MoreThan: - conditionParams = condition.MoreThan.GetParameters() - ratioParamsMap["more_than"] = true + switch timeWindowType := timeWindow.Type.(type) { + case *cxsdk.LogsTimeWindow_LogsTimeWindowSpecificValue: + return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsTimeWindowModel{ + SpecificValue: types.StringValue(logsTimeWindowValueProtoToSchemaMap[timeWindowType.LogsTimeWindowSpecificValue]), + }) default: - return nil + return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} } - ratioParamsMap["ratio_threshold"] = conditionParams.GetThreshold().GetValue() - ratioParamsMap["time_window"] = alertProtoTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()] - ratioParamsMap["ignore_infinity"] = conditionParams.GetIgnoreInfinity().GetValue() +} - groupByQ1 := conditionParams.GetGroupBy() - groupByQ2 := query2.GetGroupBy() - var groupBy []string - if len(groupByQ1) > 0 { - groupBy = wrappedStringSliceToStringSlice(groupByQ1) - if len(groupByQ2) > 0 { - ratioParamsMap["group_by_both"] = true - } else { - ratioParamsMap["group_by_q1"] = true - } - } else if len(groupByQ2) > 0 { - groupBy = wrappedStringSliceToStringSlice(groupByQ2) - ratioParamsMap["group_by_q1"] = true +func flattenLogsLessThan(ctx context.Context, lessThan *cxsdk.LogsLessThanTypeDefinition) (types.Object, diag.Diagnostics) { + if lessThan == nil { + return types.ObjectNull(logsLessThanAttr()), nil } - ratioParamsMap["group_by"] = groupBy - if len(groupBy) > 0 && lessThan { - ratioParamsMap["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) + logsFilter, diags := flattenAlertsLogsFilter(ctx, lessThan.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsLessThanAttr()), diags } - return ratioParamsMap -} + timeWindow, diags := flattenLogsTimeWindow(ctx, lessThan.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(logsLessThanAttr()), diags + } -func flattenQuery2ParamsMap(query2 *alerts.AlertFilters_RatioAlert) interface{} { - return map[string]interface{}{ - "alias": query2.GetAlias().GetValue(), - "search_query": query2.GetText().GetValue(), - "severities": extractSeverities(query2.GetSeverities()), - "applications": wrappedStringSliceToStringSlice(query2.GetApplications()), - "subsystems": wrappedStringSliceToStringSlice(query2.GetSubsystems()), + undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, lessThan.GetUndetectedValuesManagement()) + if diags.HasError() { + return types.ObjectNull(logsLessThanAttr()), diags } -} -func flattenUniqueCountAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { - alertSchema := flattenCommonAlert(filters) - conditionMap := flattenUniqueCountCondition(condition) - alertSchema["condition"] = []interface{}{conditionMap} - return []interface{}{alertSchema} + logsLessThanModel := LogsLessThanModel{ + LogsFilter: logsFilter, + Threshold: wrapperspbUint32ToTypeInt64(lessThan.GetThreshold()), + TimeWindow: timeWindow, + UndetectedValuesManagement: undetectedValuesManagement, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(lessThan.GetNotificationPayloadFilter()), + } + return types.ObjectValueFrom(ctx, logsLessThanAttr(), logsLessThanModel) } -func flattenUniqueCountCondition(condition interface{}) interface{} { - conditionParams := condition.(*alerts.AlertCondition_UniqueCount).UniqueCount.GetParameters() - conditionMap := map[string]interface{}{ - "unique_count_key": conditionParams.GetCardinalityFields()[0].GetValue(), - "max_unique_values": conditionParams.GetThreshold().GetValue(), - "time_window": alertProtoUniqueCountTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], +func flattenUndetectedValuesManagement(ctx context.Context, undetectedValuesManagement *cxsdk.UndetectedValuesManagement) (types.Object, diag.Diagnostics) { + if undetectedValuesManagement == nil { + return types.ObjectNull(undetectedValuesManagementAttr()), nil } - if groupBy := conditionParams.GetGroupBy(); len(groupBy) > 0 { - conditionMap["group_by_key"] = conditionParams.GetGroupBy()[0].GetValue() - conditionMap["max_unique_values_for_group_by"] = conditionParams.GetMaxUniqueCountValuesForGroupByKey().GetValue() + undetectedValuesManagementModel := UndetectedValuesManagementModel{ + TriggerUndetectedValues: wrapperspbBoolToTypeBool(undetectedValuesManagement.GetTriggerUndetectedValues()), + AutoRetireTimeframe: types.StringValue(autoRetireTimeframeProtoToSchemaMap[undetectedValuesManagement.GetAutoRetireTimeframe()]), } - return conditionMap + return types.ObjectValueFrom(ctx, undetectedValuesManagementAttr(), undetectedValuesManagementModel) } -func flattenTimeRelativeAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { - alertSchema := flattenCommonAlert(filters) - conditionMap := flattenTimeRelativeCondition(condition) - alertSchema["condition"] = []interface{}{conditionMap} - return []interface{}{alertSchema} -} +func flattenLogsMoreThanUsual(ctx context.Context, moreThanUsual *cxsdk.LogsMoreThanUsualTypeDefinition) (types.Object, diag.Diagnostics) { + if moreThanUsual == nil { + return types.ObjectNull(logsMoreThanUsualAttr()), nil + } -func flattenTimeRelativeCondition(condition interface{}) interface{} { - var conditionParams *alerts.ConditionParameters - timeRelativeCondition := make(map[string]interface{}) - switch condition := condition.(type) { - case *alerts.AlertCondition_LessThan: - conditionParams = condition.LessThan.GetParameters() - timeRelativeCondition["less_than"] = true - if len(conditionParams.GroupBy) > 0 { - timeRelativeCondition["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) - } - case *alerts.AlertCondition_MoreThan: - conditionParams = condition.MoreThan.GetParameters() - timeRelativeCondition["more_than"] = true - default: - return nil + logsFilter, diags := flattenAlertsLogsFilter(ctx, moreThanUsual.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsMoreThanUsualAttr()), diags } - timeRelativeCondition["ignore_infinity"] = conditionParams.GetIgnoreInfinity().GetValue() - timeRelativeCondition["ratio_threshold"] = conditionParams.GetThreshold().GetValue() - timeRelativeCondition["group_by"] = wrappedStringSliceToStringSlice(conditionParams.GroupBy) - timeFrame := conditionParams.GetTimeframe() - relativeTimeFrame := conditionParams.GetRelativeTimeframe() - timeRelativeCondition["relative_time_window"] = flattenRelativeTimeWindow(timeFrame, relativeTimeFrame) - - return timeRelativeCondition -} - -func flattenRelativeTimeWindow(timeFrame alerts.Timeframe, relativeTimeFrame alerts.RelativeTimeframe) string { - p := protoTimeFrameAndRelativeTimeFrame{timeFrame: timeFrame, relativeTimeFrame: relativeTimeFrame} - return alertProtoTimeFrameAndRelativeTimeFrameToSchemaRelativeTimeFrame[p] -} - -func flattenMetricAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { - var conditionParams *alerts.ConditionParameters - var conditionStr string - switch condition := condition.(type) { - case *alerts.AlertCondition_LessThan: - conditionParams = condition.LessThan.GetParameters() - conditionStr = "less_than" - case *alerts.AlertCondition_MoreThan: - conditionParams = condition.MoreThan.GetParameters() - conditionStr = "more_than" - case *alerts.AlertCondition_MoreThanUsual: - conditionParams = condition.MoreThanUsual.GetParameters() - conditionStr = "more_than_usual" - case *alerts.AlertCondition_LessThanUsual: - conditionParams = condition.LessThanUsual.GetParameters() - conditionStr = "less_than_usual" - case *alerts.AlertCondition_MoreThanOrEqual: - conditionParams = condition.MoreThanOrEqual.GetParameters() - conditionStr = "more_than_or_equal" - case *alerts.AlertCondition_LessThanOrEqual: - conditionParams = condition.LessThanOrEqual.GetParameters() - conditionStr = "less_than_or_equal" - default: - return nil + timeWindow, diags := flattenLogsTimeWindow(ctx, moreThanUsual.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(logsMoreThanUsualAttr()), diags } - var metricTypeStr string - var searchQuery string - var conditionMap map[string]interface{} - promqlParams := conditionParams.GetMetricAlertPromqlParameters() - if promqlParams != nil { - metricTypeStr = "promql" - searchQuery = promqlParams.GetPromqlText().GetValue() - conditionMap = flattenPromQLCondition(conditionParams) - } else { - metricTypeStr = "lucene" - searchQuery = filters.GetText().GetValue() - conditionMap = flattenLuceneCondition(conditionParams) + logsMoreThanUsualModel := LogsMoreThanUsualModel{ + LogsFilter: logsFilter, + MinimumThreshold: wrapperspbUint32ToTypeInt64(moreThanUsual.GetMinimumThreshold()), + TimeWindow: timeWindow, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(moreThanUsual.GetNotificationPayloadFilter()), } - conditionMap[conditionStr] = true - if conditionStr == "less_than" || conditionStr == "less_than_or_equal" { - conditionMap["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) + return types.ObjectValueFrom(ctx, logsMoreThanUsualAttr(), logsMoreThanUsualModel) +} + +func flattenLogsRatioMoreThan(ctx context.Context, ratioMoreThan *cxsdk.LogsRatioMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { + if ratioMoreThan == nil { + return types.ObjectNull(logsRatioMoreThanAttr()), nil } - metricMap := map[string]interface{}{ - "search_query": searchQuery, - "condition": []interface{}{conditionMap}, + numeratorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioMoreThan.GetNumeratorLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsRatioMoreThanAttr()), diags } - return []interface{}{ - map[string]interface{}{ - metricTypeStr: []interface{}{metricMap}, - }, + denominatorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioMoreThan.GetDenominatorLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsRatioMoreThanAttr()), diags } -} -func flattenPromQLCondition(params *alerts.ConditionParameters) (promQLConditionMap map[string]interface{}) { - promqlParams := params.GetMetricAlertPromqlParameters() - promQLConditionMap = - map[string]interface{}{ - "threshold": params.GetThreshold().GetValue(), - "time_window": alertProtoMetricTimeFrameToMetricSchemaTimeFrame[params.GetTimeframe().String()], - "sample_threshold_percentage": promqlParams.GetSampleThresholdPercentage().GetValue(), - "replace_missing_value_with_zero": promqlParams.GetSwapNullValues().GetValue(), - "min_non_null_values_percentage": promqlParams.GetNonNullPercentage().GetValue(), - } - return -} + timeWindow, diags := flattenLogsRatioTimeWindow(ctx, ratioMoreThan.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(logsRatioMoreThanAttr()), diags + } -func flattenLuceneCondition(params *alerts.ConditionParameters) map[string]interface{} { - metricParams := params.GetMetricAlertParameters() - return map[string]interface{}{ - "metric_field": metricParams.GetMetricField().GetValue(), - "arithmetic_operator": alertProtoArithmeticOperatorToSchemaArithmetic[metricParams.GetArithmeticOperator().String()], - "threshold": params.GetThreshold().GetValue(), - "arithmetic_operator_modifier": metricParams.GetArithmeticOperatorModifier().GetValue(), - "sample_threshold_percentage": metricParams.GetSampleThresholdPercentage().GetValue(), - "time_window": alertProtoMetricTimeFrameToMetricSchemaTimeFrame[params.GetTimeframe().String()], - "group_by": wrappedStringSliceToStringSlice(params.GetGroupBy()), - "replace_missing_value_with_zero": metricParams.GetSwapNullValues().GetValue(), - "min_non_null_values_percentage": metricParams.GetNonNullPercentage().GetValue(), + logsRatioMoreThanModel := LogsRatioMoreThanModel{ + NumeratorLogsFilter: numeratorLogsFilter, + NumeratorAlias: wrapperspbStringToTypeString(ratioMoreThan.GetNumeratorAlias()), + DenominatorLogsFilter: denominatorLogsFilter, + DenominatorAlias: wrapperspbStringToTypeString(ratioMoreThan.GetDenominatorAlias()), + Threshold: typeFloat64ToWrapperspbDouble(ratioMoreThan.GetThreshold()), + TimeWindow: timeWindow, + IgnoreInfinity: wrapperspbBoolToTypeBool(ratioMoreThan.GetIgnoreInfinity()), + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(ratioMoreThan.GetNotificationPayloadFilter()), + GroupByFor: types.StringValue(logsRatioGroupByForProtoToSchemaMap[ratioMoreThan.GetGroupByFor()]), } + return types.ObjectValueFrom(ctx, logsRatioMoreThanAttr(), logsRatioMoreThanModel) } -func flattenTracingAlert(condition interface{}, tracingAlert *alerts.TracingAlert) interface{} { - latencyThresholdMS := float64(tracingAlert.GetConditionLatency()) / float64(time.Millisecond.Microseconds()) - applications, subsystems, services := flattenTracingFilters(tracingAlert.GetFieldFilters()) - tagFilters := flattenTagFiltersData(tracingAlert.GetTagFilters()) - conditionSchema := flattenTracingCondition(condition) +func flattenLogsRatioTimeWindow(ctx context.Context, window *cxsdk.LogsRatioTimeWindow) (types.Object, diag.Diagnostics) { + if window == nil { + return types.ObjectNull(logsTimeWindowAttr()), nil + } - return []interface{}{ - map[string]interface{}{ - "latency_threshold_milliseconds": latencyThresholdMS, - "applications": applications, - "subsystems": subsystems, - "services": services, - "tag_filter": tagFilters, - "condition": conditionSchema, - }, + switch timeWindowType := window.Type.(type) { + case *cxsdk.LogsRatioTimeWindow_LogsRatioTimeWindowSpecificValue: + return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsRatioTimeWindowModel{ + SpecificValue: types.StringValue(logsRatioTimeWindowValueProtoToSchemaMap[timeWindowType.LogsRatioTimeWindowSpecificValue]), + }) + default: + return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} } } -func flattenTracingFilters(tracingFilters []*alerts.FilterData) (applications, subsystems, services interface{}) { - filtersData := flattenFiltersData(tracingFilters) - applications = filtersData["applicationName"] - subsystems = filtersData["subsystemName"] - services = filtersData["serviceName"] - return -} +func flattenLogsRatioLessThan(ctx context.Context, ratioLessThan *cxsdk.LogsRatioLessThanTypeDefinition) (types.Object, diag.Diagnostics) { + if ratioLessThan == nil { + return types.ObjectNull(logsRatioLessThanAttr()), nil + } -func flattenFlowAlert(condition interface{}) interface{} { - return []interface{}{flattenFlowAlertsCondition(condition.(*alerts.AlertCondition_Flow))} -} + numeratorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioLessThan.GetNumeratorLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsRatioLessThanAttr()), diags + } -func flattenFlowAlertsCondition(condition *alerts.AlertCondition_Flow) interface{} { - stages := flattenStages(condition.Flow.GetStages()) + denominatorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioLessThan.GetDenominatorLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsRatioLessThanAttr()), diags + } - m := map[string]interface{}{ - "stage": stages, + timeWindow, diags := flattenLogsRatioTimeWindow(ctx, ratioLessThan.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(logsRatioLessThanAttr()), diags } - if flowParams := condition.Flow.GetParameters(); flowParams != nil { - groupBy := wrappedStringSliceToStringSlice(flowParams.GetGroupBy()) - if len(groupBy) != 0 { - m["group_by"] = groupBy - } + undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, ratioLessThan.GetUndetectedValuesManagement()) + if diags.HasError() { + return types.ObjectNull(logsRatioLessThanAttr()), diags } - return m + logsRatioLessThanModel := LogsRatioLessThanModel{ + NumeratorLogsFilter: numeratorLogsFilter, + NumeratorAlias: wrapperspbStringToTypeString(ratioLessThan.GetNumeratorAlias()), + DenominatorLogsFilter: denominatorLogsFilter, + DenominatorAlias: wrapperspbStringToTypeString(ratioLessThan.GetDenominatorAlias()), + Threshold: wrapperspbUint32ToTypeInt64(ratioLessThan.GetThreshold()), + TimeWindow: timeWindow, + IgnoreInfinity: wrapperspbBoolToTypeBool(ratioLessThan.GetIgnoreInfinity()), + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(ratioLessThan.GetNotificationPayloadFilter()), + GroupByFor: types.StringValue(logsRatioGroupByForProtoToSchemaMap[ratioLessThan.GetGroupByFor()]), + UndetectedValuesManagement: undetectedValuesManagement, + } + return types.ObjectValueFrom(ctx, logsRatioLessThanAttr(), logsRatioLessThanModel) } -func flattenStages(stages []*alerts.FlowStage) []interface{} { - result := make([]interface{}, 0, len(stages)) - for _, stage := range stages { - result = append(result, flattenStage(stage)) +func flattenLogsUniqueCount(ctx context.Context, uniqueCount *cxsdk.LogsUniqueCountTypeDefinition) (types.Object, diag.Diagnostics) { + if uniqueCount == nil { + return types.ObjectNull(logsUniqueCountAttr()), nil } - return result -} -func flattenStage(stage *alerts.FlowStage) interface{} { - timeMS := int(stage.GetTimeframe().GetMs().GetValue()) - return map[string]interface{}{ - "group": flattenGroups(stage.GetGroups()), - "time_window": flattenTimeframe(timeMS), + logsFilter, diags := flattenAlertsLogsFilter(ctx, uniqueCount.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsUniqueCountAttr()), diags } -} -func flattenGroups(groups []*alerts.FlowGroup) []interface{} { - result := make([]interface{}, 0, len(groups)) - for _, g := range groups { - result = append(result, flattenGroup(g)) + timeWindow, diags := flattenLogsUniqueCountTimeWindow(ctx, uniqueCount.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(logsUniqueCountAttr()), diags } - return result -} -func flattenGroup(fg *alerts.FlowGroup) interface{} { - subAlerts := flattenSubAlerts(fg.GetAlerts()) - operator := fg.GetNextOp().String() - return map[string]interface{}{ - "sub_alerts": subAlerts, - "next_operator": operator, + logsUniqueCountModel := LogsUniqueCountModel{ + LogsFilter: logsFilter, + UniqueCountKeypath: wrapperspbStringToTypeString(uniqueCount.GetUniqueCountKeypath()), + MaxUniqueCount: wrapperspbInt64ToTypeInt64(uniqueCount.GetMaxUniqueCount()), + TimeWindow: timeWindow, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(uniqueCount.GetNotificationPayloadFilter()), + MaxUniqueCountPerGroupByKey: wrapperspbInt64ToTypeInt64(uniqueCount.GetMaxUniqueCountPerGroupByKey()), } + return types.ObjectValueFrom(ctx, logsUniqueCountAttr(), logsUniqueCountModel) } -func flattenSubAlerts(subAlerts *alerts.FlowAlerts) interface{} { - operator := subAlerts.GetOp().String() - flowAlerts := make([]interface{}, 0, len(subAlerts.GetValues())) - for _, sa := range subAlerts.GetValues() { - flowAlerts = append(flowAlerts, flattenInnerFlowAlert(sa)) +func flattenLogsUniqueCountTimeWindow(ctx context.Context, timeWindow *cxsdk.LogsUniqueValueTimeWindow) (types.Object, diag.Diagnostics) { + if timeWindow == nil { + return types.ObjectNull(logsTimeWindowAttr()), nil } - return []interface{}{ - map[string]interface{}{ - "operator": operator, - "flow_alert": flowAlerts, - }, + switch timeWindowType := timeWindow.Type.(type) { + case *cxsdk.LogsUniqueValueTimeWindow_LogsUniqueValueTimeWindowSpecificValue: + return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsUniqueCountTimeWindowModel{ + SpecificValue: types.StringValue(logsUniqueCountTimeWindowValueProtoToSchemaMap[timeWindowType.LogsUniqueValueTimeWindowSpecificValue]), + }) + default: + return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} } + } -func flattenInnerFlowAlert(subAlert *alerts.FlowAlert) interface{} { - return map[string]interface{}{ - "not": subAlert.GetNot().GetValue(), - "user_alert_id": subAlert.GetId().GetValue(), +func flattenLogsNewValue(ctx context.Context, newValue *cxsdk.LogsNewValueTypeDefinition) (types.Object, diag.Diagnostics) { + if newValue == nil { + return types.ObjectNull(logsNewValueAttr()), nil } -} -func flattenFiltersData(filtersData []*alerts.FilterData) map[string]interface{} { - result := make(map[string]interface{}, len(filtersData)) - for _, filter := range filtersData { - field := filter.GetField() - result[field] = flattenFilters(filter.GetFilters()) + logsFilter, diags := flattenAlertsLogsFilter(ctx, newValue.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsNewValueAttr()), diags } - return result -} -func flattenTagFiltersData(filtersData []*alerts.FilterData) interface{} { - fieldToFilters := flattenFiltersData(filtersData) - result := make([]interface{}, 0, len(fieldToFilters)) - for field, filters := range fieldToFilters { - filterSchema := map[string]interface{}{ - "field": field, - "values": filters, - } - result = append(result, filterSchema) + timeWindow, diags := flattenLogsNewValueTimeWindow(ctx, newValue.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(logsNewValueAttr()), diags } - return result -} -func flattenFilters(filters []*alerts.Filters) []string { - result := make([]string, 0) - for _, f := range filters { - values := f.GetValues() - switch operator := f.GetOperator(); operator { - case "notEquals", "contains", "startsWith", "endsWith": - for i, val := range values { - values[i] = fmt.Sprintf("filter:%s:%s", operator, val) - } - } - result = append(result, values...) + logsNewValueModel := LogsNewValueModel{ + LogsFilter: logsFilter, + KeypathToTrack: wrapperspbStringToTypeString(newValue.GetKeypathToTrack()), + TimeWindow: timeWindow, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(newValue.GetNotificationPayloadFilter()), } - return result + return types.ObjectValueFrom(ctx, logsNewValueAttr(), logsNewValueModel) } -func flattenTracingCondition(condition interface{}) interface{} { - switch condition := condition.(type) { - case *alerts.AlertCondition_Immediate: - return []interface{}{ - map[string]interface{}{ - "immediately": true, - }, - } - case *alerts.AlertCondition_MoreThan: - conditionParams := condition.MoreThan.GetParameters() - return []interface{}{ - map[string]interface{}{ - "more_than": true, - "threshold": conditionParams.GetThreshold().GetValue(), - "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], - "group_by": wrappedStringSliceToStringSlice(conditionParams.GetGroupBy()), - }, - } +func flattenLogsNewValueTimeWindow(ctx context.Context, window *cxsdk.LogsNewValueTimeWindow) (types.Object, diag.Diagnostics) { + if window == nil { + return types.ObjectNull(logsTimeWindowAttr()), nil + } + + switch timeWindowType := window.Type.(type) { + case *cxsdk.LogsNewValueTimeWindow_LogsNewValueTimeWindowSpecificValue: + return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsNewValueTimeWindowModel{ + SpecificValue: types.StringValue(logsNewValueTimeWindowValueProtoToSchemaMap[timeWindowType.LogsNewValueTimeWindowSpecificValue]), + }) default: - return nil + return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} } } -func flattenCommonAlert(filters *alerts.AlertFilters) map[string]interface{} { - metadata := filters.GetMetadata() - return map[string]interface{}{ - "search_query": filters.GetText().GetValue(), - "severities": extractSeverities(filters.GetSeverities()), - "applications": wrappedStringSliceToStringSlice(metadata.GetApplications()), - "subsystems": wrappedStringSliceToStringSlice(metadata.GetSubsystems()), - "categories": wrappedStringSliceToStringSlice(metadata.GetCategories()), - "computers": wrappedStringSliceToStringSlice(metadata.GetComputers()), - "classes": wrappedStringSliceToStringSlice(metadata.GetClasses()), - "methods": wrappedStringSliceToStringSlice(metadata.GetMethods()), - "ip_addresses": wrappedStringSliceToStringSlice(metadata.GetIpAddresses()), +func flattenAlertSchedule(ctx context.Context, alertProperties *cxsdk.AlertDefProperties) (types.Object, diag.Diagnostics) { + if alertProperties.Schedule == nil { + return types.ObjectNull(alertScheduleAttr()), nil + } + + var alertScheduleModel AlertScheduleModel + var diags diag.Diagnostics + switch alertScheduleType := alertProperties.Schedule.(type) { + case *cxsdk.AlertDefProperties_ActiveOn: + alertScheduleModel.ActiveOn, diags = flattenActiveOn(ctx, alertScheduleType.ActiveOn) + default: + return types.ObjectNull(alertScheduleAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Alert Schedule", fmt.Sprintf("Alert Schedule %v is not supported", alertScheduleType))} } -} -func extractSeverities(severities []alerts.AlertFilters_LogSeverity) []string { - result := make([]string, 0, len(severities)) - for _, s := range severities { - result = append(result, alertProtoLogSeverityToSchemaLogSeverity[s.String()]) + if diags.HasError() { + return types.ObjectNull(alertScheduleAttr()), diags } - return result + + return types.ObjectValueFrom(ctx, alertScheduleAttr(), alertScheduleModel) } -func flattenExpirationDate(expiration *alerts.Date) []map[string]int { - if expiration == nil { - return nil +func flattenActiveOn(ctx context.Context, activeOn *cxsdk.ActivitySchedule) (types.Object, diag.Diagnostics) { + if activeOn == nil { + return types.ObjectNull(alertScheduleActiveOnAttr()), nil + } + + daysOfWeek, diags := flattenDaysOfWeek(ctx, activeOn.GetDayOfWeek()) + if diags.HasError() { + return types.ObjectNull(alertScheduleActiveOnAttr()), diags + } + + startTime, diags := flattenTimeOfDay(ctx, activeOn.GetStartTime()) + if diags.HasError() { + return types.ObjectNull(alertScheduleActiveOnAttr()), diags + } + + endTime, diags := flattenTimeOfDay(ctx, activeOn.GetEndTime()) + if diags.HasError() { + return types.ObjectNull(alertScheduleActiveOnAttr()), diags } - m := map[string]int{ - "year": int(expiration.GetYear()), - "month": int(expiration.GetMonth()), - "day": int(expiration.GetDay()), + + activeOnModel := ActiveOnModel{ + DaysOfWeek: daysOfWeek, + StartTime: startTime, + EndTime: endTime, } + return types.ObjectValueFrom(ctx, alertScheduleActiveOnAttr(), activeOnModel) +} - return []map[string]int{m} +func flattenDaysOfWeek(ctx context.Context, daysOfWeek []cxsdk.DayOfWeek) (types.List, diag.Diagnostics) { + var daysOfWeekStrings []types.String + for _, dow := range daysOfWeek { + daysOfWeekStrings = append(daysOfWeekStrings, types.StringValue(daysOfWeekProtoToSchemaMap[dow])) + } + return types.ListValueFrom(ctx, types.StringType, daysOfWeekStrings) } -func expandAlertSeverity(severity string) alerts.AlertSeverity { - severityStr := alertSchemaSeverityToProtoSeverity[severity] - formatStandardVal := alerts.AlertSeverity_value[severityStr] - return alerts.AlertSeverity(formatStandardVal) +func flattenTimeOfDay(ctx context.Context, time *cxsdk.TimeOfDay) (types.Object, diag.Diagnostics) { + if time == nil { + return types.ObjectNull(timeOfDayAttr()), nil + } + return types.ObjectValueFrom(ctx, timeOfDayAttr(), TimeOfDayModel{ + Hours: types.Int64Value(int64(time.GetHours())), + Minutes: types.Int64Value(int64(time.GetMinutes())), + }) } -func expandExpirationDate(v interface{}) *alerts.Date { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil +func flattenLogsTimeRelativeMoreThan(ctx context.Context, logsTimeRelativeMoreThan *cxsdk.LogsTimeRelativeMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { + if logsTimeRelativeMoreThan == nil { + return types.ObjectNull(logsTimeRelativeMoreThanAttr()), nil + } + + logsFilter, diags := flattenAlertsLogsFilter(ctx, logsTimeRelativeMoreThan.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsTimeRelativeMoreThanAttr()), diags } - raw := l[0] - m := raw.(map[string]interface{}) - return &alerts.Date{ - Year: int32(m["year"].(int)), - Month: int32(m["month"].(int)), - Day: int32(m["day"].(int)), + + logsTimeRelativeMoreThanModel := LogsTimeRelativeMoreThanModel{ + LogsFilter: logsFilter, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(logsTimeRelativeMoreThan.GetNotificationPayloadFilter()), + Threshold: wrapperspbUint32ToTypeInt64(logsTimeRelativeMoreThan.GetThreshold()), + ComparedTo: types.StringValue(logsTimeRelativeComparedToProtoToSchemaMap[logsTimeRelativeMoreThan.GetComparedTo()]), + IgnoreInfinity: wrapperspbBoolToTypeBool(logsTimeRelativeMoreThan.GetIgnoreInfinity()), } + + return types.ObjectValueFrom(ctx, logsTimeRelativeMoreThanAttr(), logsTimeRelativeMoreThanModel) } -func expandIncidentSettings(v interface{}) *alerts.AlertIncidentSettings { - l, ok := v.([]interface{}) - if !ok || len(l) == 0 || l[0] == nil { - return nil +func flattenMetricMoreThan(ctx context.Context, metricMoreThan *cxsdk.MetricMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { + if metricMoreThan == nil { + return types.ObjectNull(metricMoreThanAttr()), nil + } + + metricFilter, diags := flattenMetricFilter(ctx, metricMoreThan.GetMetricFilter()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanAttr()), diags } - raw := l[0] - m := raw.(map[string]interface{}) - retriggeringPeriodSeconds := wrapperspb.UInt32(uint32(m["retriggering_period_minutes"].(int)) * 60) - notifyOn := alertSchemaNotifyOnToProtoNotifyOn[m["notify_on"].(string)] + ofTheLast, diags := flattenMetricTimeWindow(ctx, metricMoreThan.GetOfTheLast()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanAttr()), diags + } - return &alerts.AlertIncidentSettings{ - RetriggeringPeriodSeconds: retriggeringPeriodSeconds, - NotifyOn: notifyOn, - UseAsNotificationSettings: wrapperspb.Bool(true), + missingValues, diags := flattenMissingValues(ctx, metricMoreThan.GetMissingValues()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanAttr()), diags } + metricMoreThanModel := MetricMoreThanModel{ + MetricFilter: metricFilter, + Threshold: wrapperspbFloat64ToTypeFloat64(metricMoreThan.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(metricMoreThan.GetForOverPct()), + OfTheLast: ofTheLast, + MissingValues: missingValues, + } + return types.ObjectValueFrom(ctx, metricMoreThanAttr(), metricMoreThanModel) } -func expandNotificationGroups(v interface{}) ([]*alerts.AlertNotificationGroups, diag.Diagnostics) { - v = v.(*schema.Set).List() - l := v.([]interface{}) - result := make([]*alerts.AlertNotificationGroups, 0, len(l)) - var diags diag.Diagnostics - for _, s := range l { - ml, dgs := expandNotificationGroup(s) - diags = append(diags, dgs...) - result = append(result, ml) +func flattenMetricFilter(ctx context.Context, filter *cxsdk.MetricFilter) (types.Object, diag.Diagnostics) { + if filter == nil { + return types.ObjectNull(metricFilterAttr()), nil + } + + switch filterType := filter.Type.(type) { + case *cxsdk.MetricFilter_Promql: + return types.ObjectValueFrom(ctx, metricFilterAttr(), MetricFilterModel{ + Promql: wrapperspbStringToTypeString(filterType.Promql), + }) + default: + return types.ObjectNull(metricFilterAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Metric Filter", fmt.Sprintf("Metric Filter %v is not supported", filterType))} + } +} + +func flattenMetricTimeWindow(ctx context.Context, last *cxsdk.MetricTimeWindow) (types.Object, diag.Diagnostics) { + if last == nil { + return types.ObjectNull(metricTimeWindowAttr()), nil + } + + switch timeWindowType := last.Type.(type) { + case *cxsdk.MetricTimeWindow_MetricTimeWindowSpecificValue: + return types.ObjectValueFrom(ctx, metricTimeWindowAttr(), MetricTimeWindowModel{ + SpecificValue: types.StringValue(metricFilterOperationTypeProtoToSchemaMap[timeWindowType.MetricTimeWindowSpecificValue]), + }) + default: + return types.ObjectNull(metricTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} } - return result, diags } -func expandNotificationGroup(v interface{}) (*alerts.AlertNotificationGroups, diag.Diagnostics) { - if v == nil { - return nil, nil +func flattenMissingValues(ctx context.Context, missingValues *cxsdk.MetricMissingValues) (types.Object, diag.Diagnostics) { + if missingValues == nil { + return types.ObjectNull(metricMissingValuesAttr()), nil } - m := v.(map[string]interface{}) - groupByFields := interfaceSliceToWrappedStringSlice(m["group_by_fields"].([]interface{})) - notifications, diags := expandNotificationSubgroups(m["notification"]) - if len(diags) != 0 { - return nil, diags + metricMissingValuesModel := MetricMissingValuesModel{} + switch missingValuesType := missingValues.MissingValues.(type) { + case *cxsdk.MetricMissingValues_ReplaceWithZero: + metricMissingValuesModel.ReplaceWithZero = wrapperspbBoolToTypeBool(missingValuesType.ReplaceWithZero) + case *cxsdk.MetricMissingValues_MinNonNullValuesPct: + metricMissingValuesModel.MinNonNullValuesPct = wrapperspbUint32ToTypeInt64(missingValuesType.MinNonNullValuesPct) + default: + return types.ObjectNull(metricMissingValuesAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Missing Values", fmt.Sprintf("Missing Values %v is not supported", missingValuesType))} } - return &alerts.AlertNotificationGroups{ - GroupByFields: groupByFields, - Notifications: notifications, - }, nil + return types.ObjectValueFrom(ctx, metricMissingValuesAttr(), metricMissingValuesModel) } -func expandNotificationSubgroups(v interface{}) ([]*alerts.AlertNotification, diag.Diagnostics) { - v = v.(*schema.Set).List() - notifications := v.([]interface{}) - result := make([]*alerts.AlertNotification, 0, len(notifications)) - var diags diag.Diagnostics - for _, n := range notifications { - notification, err := expandNotificationSubgroup(n) - if err != nil { - diags = append(diags, diag.FromErr(err)...) - } - result = append(result, notification) +func flattenMetricLessThan(ctx context.Context, metricLessThan *cxsdk.MetricLessThanTypeDefinition) (types.Object, diag.Diagnostics) { + if metricLessThan == nil { + return types.ObjectNull(metricLessThanAttr()), nil } - return result, diags -} -func expandNotificationSubgroup(v interface{}) (*alerts.AlertNotification, error) { - if v == nil { - return nil, nil + metricFilter, diags := flattenMetricFilter(ctx, metricLessThan.GetMetricFilter()) + if diags.HasError() { + return types.ObjectNull(metricLessThanAttr()), diags } - m := v.(map[string]interface{}) - var notifyEverySec *wrapperspb.UInt32Value - if minutes, ok := m["retriggering_period_minutes"].(int); ok && minutes != 0 { - notifyEverySec = wrapperspb.UInt32(uint32(minutes) * 60) + ofTheLast, diags := flattenMetricTimeWindow(ctx, metricLessThan.GetOfTheLast()) + if diags.HasError() { + return types.ObjectNull(metricLessThanAttr()), diags } - var notifyOn *alerts.NotifyOn - if notifyOnStr, ok := m["notify_on"].(string); ok { - notifyOn = new(alerts.NotifyOn) - *notifyOn = alertSchemaNotifyOnToProtoNotifyOn[notifyOnStr] + missingValues, diags := flattenMissingValues(ctx, metricLessThan.GetMissingValues()) + if diags.HasError() { + return types.ObjectNull(metricLessThanAttr()), diags } - notification := &alerts.AlertNotification{ - RetriggeringPeriodSeconds: notifyEverySec, - NotifyOn: notifyOn, + undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, metricLessThan.GetUndetectedValuesManagement()) + if diags.HasError() { + return types.ObjectNull(metricLessThanAttr()), diags } - var isWebhookIdDefined bool - if webhookID, ok := m["integration_id"].(string); ok && webhookID != "" { - isWebhookIdDefined = true - id := parseNumUint32(webhookID) - notification.IntegrationType = &alerts.AlertNotification_IntegrationId{ - IntegrationId: wrapperspb.UInt32(id), - } + metricLessThanModel := MetricLessThanModel{ + MetricFilter: metricFilter, + Threshold: wrapperspbFloat64ToTypeFloat64(metricLessThan.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(metricLessThan.GetForOverPct()), + OfTheLast: ofTheLast, + MissingValues: missingValues, + UndetectedValuesManagement: undetectedValuesManagement, } + return types.ObjectValueFrom(ctx, metricLessThanAttr(), metricLessThanModel) +} - if emails := m["email_recipients"].(*schema.Set).List(); len(emails) != 0 { - if isWebhookIdDefined { - return nil, fmt.Errorf("required exactly on of 'integration_id' or 'email_recipients'") - } - - notification.IntegrationType = &alerts.AlertNotification_Recipients{ - Recipients: &alerts.Recipients{ - Emails: interfaceSliceToWrappedStringSlice(emails), - }, - } +func flattenLogsTimeRelativeLessThan(ctx context.Context, timeRelativeLessThan *cxsdk.LogsTimeRelativeLessThanTypeDefinition) (types.Object, diag.Diagnostics) { + if timeRelativeLessThan == nil { + return types.ObjectNull(logsTimeRelativeLessThanAttr()), nil } - return notification, nil -} + logsFilter, diags := flattenAlertsLogsFilter(ctx, timeRelativeLessThan.GetLogsFilter()) + if diags.HasError() { + return types.ObjectNull(logsTimeRelativeLessThanAttr()), diags + } -func extractMetaLabels(v interface{}) []*alerts.MetaLabel { - m := v.(map[string]interface{}) - result := make([]*alerts.MetaLabel, 0, len(m)) - for key, val := range m { - ml := &alerts.MetaLabel{ - Key: wrapperspb.String(key), - Value: wrapperspb.String(val.(string)), - } - result = append(result, ml) + undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, timeRelativeLessThan.GetUndetectedValuesManagement()) + if diags.HasError() { + return types.ObjectNull(logsTimeRelativeLessThanAttr()), diags } - return result -} -func expandActiveWhen(v interface{}) *alerts.AlertActiveWhen { - l := v.([]interface{}) - if len(l) == 0 { - return nil + logsTimeRelativeLessThanModel := LogsTimeRelativeLessThanModel{ + LogsFilter: logsFilter, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(timeRelativeLessThan.GetNotificationPayloadFilter()), + Threshold: wrapperspbUint32ToTypeInt64(timeRelativeLessThan.GetThreshold()), + ComparedTo: types.StringValue(logsTimeRelativeComparedToProtoToSchemaMap[timeRelativeLessThan.GetComparedTo()]), + IgnoreInfinity: wrapperspbBoolToTypeBool(timeRelativeLessThan.GetIgnoreInfinity()), + UndetectedValuesManagement: undetectedValuesManagement, } - schedulingMap := l[0].(map[string]interface{}) - utc := flattenUtc(schedulingMap["time_zone"].(string)) - timeFrames := schedulingMap["time_frame"].(*schema.Set).List() + return types.ObjectValueFrom(ctx, logsTimeRelativeLessThanAttr(), logsTimeRelativeLessThanModel) +} - expandedTimeframes := expandActiveTimeframes(timeFrames, utc) +func flattenTracingImmediate(ctx context.Context, tracingImmediate *cxsdk.TracingImmediateTypeDefinition) (types.Object, diag.Diagnostics) { + if tracingImmediate == nil { + return types.ObjectNull(tracingImmediateAttr()), nil + } - return &alerts.AlertActiveWhen{ - Timeframes: expandedTimeframes, + tracingQuery, diag := flattenTracingFilter(ctx, tracingImmediate.GetTracingFilter()) + if diag.HasError() { + return types.ObjectNull(tracingImmediateAttr()), diag } -} -func expandActiveTimeframes(timeFrames []interface{}, utc int32) []*alerts.AlertActiveTimeframe { - result := make([]*alerts.AlertActiveTimeframe, 0, len(timeFrames)) - for _, tf := range timeFrames { - alertActiveTimeframe := expandActiveTimeFrame(tf, utc) - result = append(result, alertActiveTimeframe) + tracingImmediateModel := TracingImmediateModel{ + TracingFilter: tracingQuery, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(tracingImmediate.GetNotificationPayloadFilter()), } - return result -} -func expandActiveTimeFrame(timeFrame interface{}, utc int32) *alerts.AlertActiveTimeframe { - m := timeFrame.(map[string]interface{}) - daysOfWeek := expandDaysOfWeek(m["days_enabled"]) - frameRange := expandRange(m["start_time"], m["end_time"]) - frameRange, daysOfWeek = convertTimeFramesToGMT(frameRange, daysOfWeek, utc) + return types.ObjectValueFrom(ctx, tracingImmediateAttr(), tracingImmediateModel) +} - alertActiveTimeframe := &alerts.AlertActiveTimeframe{ - DaysOfWeek: daysOfWeek, - Range: frameRange, +func flattenTracingFilter(ctx context.Context, tracingQuery *cxsdk.TracingFilter) (types.Object, diag.Diagnostics) { + if tracingQuery == nil { + return types.ObjectNull(tracingQueryAttr()), nil } - return alertActiveTimeframe -} -func convertTimeFramesToGMT(frameRange *alerts.TimeRange, daysOfWeek []alerts.DayOfWeek, utc int32) (*alerts.TimeRange, []alerts.DayOfWeek) { - daysOfWeekOffset := daysOfWeekOffsetToGMT(frameRange, utc) - frameRange.Start.Hours = convertUtcToGmt(frameRange.GetStart().GetHours(), utc) - frameRange.End.Hours = convertUtcToGmt(frameRange.GetEnd().GetHours(), utc) - if daysOfWeekOffset != 0 { - for i, d := range daysOfWeek { - daysOfWeek[i] = alerts.DayOfWeek((int32(d) + daysOfWeekOffset) % 7) - } + tracingQueryModel := &TracingFilterModel{ + LatencyThresholdMs: wrapperspbUint32ToTypeInt64(tracingQuery.GetLatencyThresholdMs()), + } + tracingQueryModel, diags := flattenTracingFilterFilters(ctx, tracingQueryModel, tracingQuery) + if diags.HasError() { + return types.ObjectNull(tracingQueryAttr()), diags } - return frameRange, daysOfWeek + return types.ObjectValueFrom(ctx, tracingQueryAttr(), tracingQueryModel) } -func daysOfWeekOffsetToGMT(frameRange *alerts.TimeRange, utc int32) int32 { - daysOfWeekOffset := int32(frameRange.Start.Hours-utc) / 24 - if daysOfWeekOffset < 0 { - daysOfWeekOffset += 7 +func flattenTracingFilterFilters(ctx context.Context, tracingQueryModel *TracingFilterModel, tracingQuery *cxsdk.TracingFilter) (*TracingFilterModel, diag.Diagnostics) { + if tracingQuery == nil || tracingQuery.Filters == nil { + return nil, nil } - return daysOfWeekOffset -} -func convertUtcToGmt(hours, utc int32) int32 { - hours -= utc - if hours < 0 { - hours += 24 - } else if hours >= 24 { - hours -= 24 + var diags diag.Diagnostics + switch filtersType := tracingQuery.Filters.(type) { + case *cxsdk.TracingFilter_TracingLabelFilters: + tracingQueryModel.TracingLabelFilters, diags = flattenTracingLabelFilters(ctx, filtersType.TracingLabelFilters) + default: + return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Tracing Query Filters", fmt.Sprintf("Tracing Query Filters %v is not supported", filtersType))} } - return hours + return tracingQueryModel, diags } -func convertGmtToUtc(hours, utc int32) int32 { - hours += utc - if hours < 0 { - hours += 24 - } else if hours >= 24 { - hours -= 24 +func flattenTracingLabelFilters(ctx context.Context, filters *cxsdk.TracingLabelFilters) (types.Object, diag.Diagnostics) { + if filters == nil { + return types.ObjectNull(tracingLabelFiltersAttr()), nil } - return hours -} - -func expandDaysOfWeek(v interface{}) []alerts.DayOfWeek { - l := v.(*schema.Set).List() - result := make([]alerts.DayOfWeek, 0, len(l)) - for _, v := range l { - dayOfWeekStr := alertSchemaDayOfWeekToProtoDayOfWeek[v.(string)] - dayOfWeekVal := alerts.DayOfWeek_value[dayOfWeekStr] - result = append(result, alerts.DayOfWeek(dayOfWeekVal)) + applicationName, diags := flattenTracingFilterTypes(ctx, filters.GetApplicationName()) + if diags.HasError() { + return types.ObjectNull(tracingLabelFiltersAttr()), diags } - return result -} -func expandRange(activityStarts, activityEnds interface{}) *alerts.TimeRange { - start := expandTimeInDay(activityStarts) - end := expandTimeInDay(activityEnds) + subsystemName, diags := flattenTracingFilterTypes(ctx, filters.GetSubsystemName()) + if diags.HasError() { + return types.ObjectNull(tracingLabelFiltersAttr()), diags - return &alerts.TimeRange{ - Start: start, - End: end, } -} -func expandAlertType(d *schema.ResourceData) (alertTypeParams *alertParams, tracingAlert *alerts.TracingAlert, diags diag.Diagnostics) { - alertTypeStr := From(validAlertTypes).FirstWith(func(key interface{}) bool { - return len(d.Get(key.(string)).([]interface{})) > 0 - }).(string) + serviceName, diags := flattenTracingFilterTypes(ctx, filters.GetServiceName()) + if diags.HasError() { + return types.ObjectNull(tracingLabelFiltersAttr()), diags + } - alertType := d.Get(alertTypeStr).([]interface{})[0].(map[string]interface{}) + operationName, diags := flattenTracingFilterTypes(ctx, filters.GetOperationName()) + if diags.HasError() { + return types.ObjectNull(tracingLabelFiltersAttr()), diags + } - switch alertTypeStr { - case "standard": - alertTypeParams, diags = expandStandard(alertType) - case "ratio": - alertTypeParams, diags = expandRatio(alertType) - case "new_value": - alertTypeParams = expandNewValue(alertType) - case "unique_count": - alertTypeParams = expandUniqueCount(alertType) - case "time_relative": - alertTypeParams, diags = expandTimeRelative(alertType) - case "metric": - alertTypeParams, diags = expandMetric(alertType) - case "tracing": - alertTypeParams, tracingAlert = expandTracing(alertType) - case "flow": - alertTypeParams = expandFlow(alertType) + spanFields, diags := flattenTracingSpansFields(ctx, filters.GetSpanFields()) + if diags.HasError() { + return types.ObjectNull(tracingLabelFiltersAttr()), diags } - return + return types.ObjectValueFrom(ctx, tracingLabelFiltersAttr(), TracingLabelFiltersModel{ + ApplicationName: applicationName, + SubsystemName: subsystemName, + ServiceName: serviceName, + OperationName: operationName, + SpanFields: spanFields, + }) + } -func expandStandard(m map[string]interface{}) (*alertParams, diag.Diagnostics) { - conditionMap := extractConditionMap(m) - condition, err := expandStandardCondition(conditionMap) - if err != nil { - return nil, diag.FromErr(err) +func flattenTracingFilterTypes(ctx context.Context, TracingFilterType []*cxsdk.TracingFilterType) (types.Set, diag.Diagnostics) { + var tracingFilterTypes []*TracingFilterTypeModel + for _, tft := range TracingFilterType { + tracingFilterTypes = append(tracingFilterTypes, flattenTracingFilterType(tft)) } - filters := expandStandardFilter(m) - return &alertParams{ - Condition: condition, - Filters: filters, - }, nil + return types.SetValueFrom(ctx, types.ObjectType{AttrTypes: tracingFiltersTypeAttr()}, tracingFilterTypes) } -func expandStandardCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { - if immediately := m["immediately"]; immediately != nil && immediately.(bool) { - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_Immediate{}, - }, nil - } else if moreThenUsual := m["more_than_usual"]; moreThenUsual != nil && moreThenUsual.(bool) { - threshold := wrapperspb.Double(float64(m["threshold"].(int))) - groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) - parameters := &alerts.ConditionParameters{ - Threshold: threshold, - GroupBy: groupBy, - Timeframe: expandTimeFrame(m["time_window"].(string)), - } - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_MoreThanUsual{ - MoreThanUsual: &alerts.MoreThanUsualCondition{Parameters: parameters}, - }, - }, nil - } else { - parameters, err := expandStandardConditionParameters(m) - if err != nil { - return nil, err - } - if lessThan := m["less_than"]; lessThan != nil && lessThan.(bool) { - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_LessThan{ - LessThan: &alerts.LessThanCondition{Parameters: parameters}, - }, - }, nil - } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { - evaluationWindow := expandEvaluationWindow(m) - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_MoreThan{ - MoreThan: &alerts.MoreThanCondition{ - Parameters: parameters, - EvaluationWindow: evaluationWindow, - }, - }, - }, nil - } +func flattenTracingFilterType(tracingFilterType *cxsdk.TracingFilterType) *TracingFilterTypeModel { + if tracingFilterType == nil { + return nil } - return nil, fmt.Errorf("immediately, less_than, more_than or more_than_usual have to be true") + return &TracingFilterTypeModel{ + Values: wrappedStringSliceToTypeStringSet(tracingFilterType.GetValues()), + Operation: types.StringValue(tracingFilterOperationProtoToSchemaMap[tracingFilterType.GetOperation()]), + } } -func expandEvaluationWindow(m map[string]interface{}) *alerts.EvaluationWindow { - var evaluationWindow *alerts.EvaluationWindow - if evaluationWindowStr, ok := m["evaluation_window"].(string); ok && evaluationWindowStr != "" { - evaluationWindow = new(alerts.EvaluationWindow) - *evaluationWindow = alertSchemaToProtoEvaluationWindow[evaluationWindowStr] +func flattenTracingSpansFields(ctx context.Context, spanFields []*cxsdk.TracingSpanFieldsFilterType) (types.Set, diag.Diagnostics) { + var tracingSpanFields []*TracingSpanFieldsFilterModel + for _, field := range spanFields { + tracingSpanField, diags := flattenTracingSpanField(ctx, field) + if diags.HasError() { + return types.SetNull(types.ObjectType{AttrTypes: tracingSpanFieldsFilterAttr()}), diags + } + tracingSpanFields = append(tracingSpanFields, tracingSpanField) } - return evaluationWindow + return types.SetValueFrom(ctx, types.ObjectType{AttrTypes: tracingSpanFieldsFilterAttr()}, tracingSpanFields) } -func expandRelatedExtendedData(m map[string]interface{}) (*alerts.RelatedExtendedData, error) { - if v, ok := m["less_than"]; !(ok && v.(bool)) { +func flattenTracingSpanField(ctx context.Context, spanField *cxsdk.TracingSpanFieldsFilterType) (*TracingSpanFieldsFilterModel, diag.Diagnostics) { + if spanField == nil { return nil, nil } - if v, ok := m["manage_undetected_values"]; ok { - if manageUndetectedValues, ok := v.([]interface{}); ok && len(manageUndetectedValues) != 0 { - raw := manageUndetectedValues[0].(map[string]interface{}) - if enable, autoRetireRatio := raw["enable_triggering_on_undetected_values"], raw["auto_retire_ratio"]; enable.(bool) { - if autoRetireRatio == nil || autoRetireRatio.(string) == "" { - return nil, fmt.Errorf("auto_retire_ratio is required when enable_triggering_on_undetected_values = true") - } - cleanupDeadmanDurationStr := alertSchemaDeadmanRatiosToProtoDeadmanRatios[autoRetireRatio.(string)] - cleanupDeadmanDuration := alerts.CleanupDeadmanDuration(alerts.CleanupDeadmanDuration_value[cleanupDeadmanDurationStr]) - return &alerts.RelatedExtendedData{ - CleanupDeadmanDuration: &cleanupDeadmanDuration, - ShouldTriggerDeadman: wrapperspb.Bool(true), - }, nil - } else { - if autoRetireRatio != nil && autoRetireRatio.(string) != "" { - return nil, fmt.Errorf("auto_retire_ratio is not allowed when enable_triggering_on_undetected_values = false") - } - return &alerts.RelatedExtendedData{ - ShouldTriggerDeadman: wrapperspb.Bool(false), - }, nil - } - } + filterType, diags := types.ObjectValueFrom(ctx, tracingFiltersTypeAttr(), flattenTracingFilterType(spanField.GetFilterType())) + if diags.HasError() { + return nil, diags } - return nil, nil + return &TracingSpanFieldsFilterModel{ + Key: wrapperspbStringToTypeString(spanField.GetKey()), + FilterType: filterType, + }, nil } -func expandStandardConditionParameters(m map[string]interface{}) (*alerts.ConditionParameters, error) { - timeFrame := expandTimeFrame(m["time_window"].(string)) - groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) - threshold := wrapperspb.Double(float64(m["threshold"].(int))) - relatedExtendedData, err := expandRelatedExtendedData(m) - if err != nil { - return nil, err +func flattenTracingMoreThan(ctx context.Context, tracingMoreThan *cxsdk.TracingMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { + if tracingMoreThan == nil { + return types.ObjectNull(tracingMoreThanAttr()), nil } - return &alerts.ConditionParameters{ - Threshold: threshold, - Timeframe: timeFrame, - GroupBy: groupBy, - RelatedExtendedData: relatedExtendedData, - }, nil -} - -func expandTracingConditionParameters(m map[string]interface{}) *alerts.ConditionParameters { - timeFrame := expandTimeFrame(m["time_window"].(string)) - groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) - threshold := wrapperspb.Double(float64(m["threshold"].(int))) + tracingQuery, diags := flattenTracingFilter(ctx, tracingMoreThan.GetTracingFilter()) + if diags.HasError() { + return types.ObjectNull(tracingMoreThanAttr()), diags + } - return &alerts.ConditionParameters{ - Threshold: threshold, - Timeframe: timeFrame, - GroupBy: groupBy, + timeWindow, diags := flattenTracingTimeWindow(ctx, tracingMoreThan.GetTimeWindow()) + if diags.HasError() { + return types.ObjectNull(tracingMoreThanAttr()), diags } -} -func expandStandardFilter(m map[string]interface{}) *alerts.AlertFilters { - filters := expandCommonAlertFilter(m) - filters.FilterType = alerts.AlertFilters_FILTER_TYPE_TEXT_OR_UNSPECIFIED - return filters + tracingMoreThanModel := TracingMoreThanModel{ + TracingFilter: tracingQuery, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(tracingMoreThan.GetNotificationPayloadFilter()), + TimeWindow: timeWindow, + SpanAmount: wrapperspbUint32ToTypeInt64(tracingMoreThan.GetSpanAmount()), + } + return types.ObjectValueFrom(ctx, tracingMoreThanAttr(), tracingMoreThanModel) } -func expandRatio(m map[string]interface{}) (*alertParams, diag.Diagnostics) { - conditionMap := extractConditionMap(m) - groupBy := interfaceSliceToWrappedStringSlice(conditionMap["group_by"].([]interface{})) - var groupByQ1, groupByQ2 []*wrapperspb.StringValue - if len(groupBy) > 0 { - if conditionMap["group_by_q1"].(bool) { - groupByQ1 = groupBy - } else if conditionMap["group_by_q2"].(bool) { - groupByQ2 = groupBy - } else if conditionMap["group_by_both"].(bool) { - groupByQ1 = groupBy - groupByQ2 = groupBy - } else { - return nil, diag.Errorf("group_by is required with one of - group_by_q1/group_by_q1/group_by_both") - } +func flattenTracingTimeWindow(ctx context.Context, window *cxsdk.TracingTimeWindow) (types.Object, diag.Diagnostics) { + if window == nil { + return types.ObjectNull(logsTimeWindowAttr()), nil } - condition, err := expandRatioCondition(conditionMap, groupByQ1) - if err != nil { - return nil, diag.FromErr(err) + switch timeWindowType := window.Type.(type) { + case *cxsdk.TracingTimeWindow_TracingTimeWindowValue: + return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), TracingTimeWindowModel{ + SpecificValue: types.StringValue(tracingTimeWindowProtoToSchemaMap[timeWindowType.TracingTimeWindowValue]), + }) + default: + return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} } - filters := expandRatioFilters(m, groupByQ2) - - return &alertParams{ - Condition: condition, - Filters: filters, - }, nil -} -func expandRatioFilters(m map[string]interface{}, groupBy []*wrapperspb.StringValue) *alerts.AlertFilters { - query1 := m["query_1"].([]interface{})[0].(map[string]interface{}) - filters := expandCommonAlertFilter(query1) - filters.FilterType = alerts.AlertFilters_FILTER_TYPE_RATIO - filters.Alias = wrapperspb.String(query1["alias"].(string)) - query2 := expandQuery2(m["query_2"], groupBy) - filters.RatioAlerts = []*alerts.AlertFilters_RatioAlert{query2} - return filters } -func expandRatioCondition(m map[string]interface{}, groupBy []*wrapperspb.StringValue) (*alerts.AlertCondition, error) { - parameters, err := expandRatioParams(m, groupBy) - if err != nil { - return nil, err +func flattenMetricMoreThanUsual(ctx context.Context, metricMoreThanUsual *cxsdk.MetricMoreThanUsualTypeDefinition) (types.Object, diag.Diagnostics) { + if metricMoreThanUsual == nil { + return types.ObjectNull(metricMoreThanUsualAttr()), nil } - return expandLessThanOrMoreThanAlertCondition(m, parameters) -} + metricFilter, diags := flattenMetricFilter(ctx, metricMoreThanUsual.GetMetricFilter()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanUsualAttr()), diags + } -func expandRatioParams(m map[string]interface{}, groupBy []*wrapperspb.StringValue) (*alerts.ConditionParameters, error) { - threshold := wrapperspb.Double(m["ratio_threshold"].(float64)) - timeFrame := expandTimeFrame(m["time_window"].(string)) - ignoreInfinity := wrapperspb.Bool(m["ignore_infinity"].(bool)) - relatedExtendedData, err := expandRelatedExtendedData(m) - if err != nil { - return nil, err + ofTheLast, diags := flattenMetricTimeWindow(ctx, metricMoreThanUsual.GetOfTheLast()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanUsualAttr()), diags } - return &alerts.ConditionParameters{ - Threshold: threshold, - Timeframe: timeFrame, - GroupBy: groupBy, - IgnoreInfinity: ignoreInfinity, - RelatedExtendedData: relatedExtendedData, - }, nil + metricMoreThanUsualModel := MetricMoreThanUsualModel{ + MetricFilter: metricFilter, + OfTheLast: ofTheLast, + Threshold: wrapperspbUint32ToTypeInt64(metricMoreThanUsual.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(metricMoreThanUsual.GetForOverPct()), + MinNonNullValuesPct: wrapperspbUint32ToTypeInt64(metricMoreThanUsual.GetMinNonNullValuesPct()), + } + return types.ObjectValueFrom(ctx, metricMoreThanUsualAttr(), metricMoreThanUsualModel) } -func expandQuery2(v interface{}, groupBy []*wrapperspb.StringValue) *alerts.AlertFilters_RatioAlert { - m := v.([]interface{})[0].(map[string]interface{}) - alias := wrapperspb.String(m["alias"].(string)) - text := wrapperspb.String(m["search_query"].(string)) - severities := expandAlertFiltersSeverities(m["severities"].(*schema.Set).List()) - applications := interfaceSliceToWrappedStringSlice(m["applications"].(*schema.Set).List()) - subsystems := interfaceSliceToWrappedStringSlice(m["subsystems"].(*schema.Set).List()) - return &alerts.AlertFilters_RatioAlert{ - Alias: alias, - Text: text, - Severities: severities, - Applications: applications, - Subsystems: subsystems, - GroupBy: groupBy, +func flattenMetricLessThanUsual(ctx context.Context, metricLessThanUsual *cxsdk.MetricLessThanUsualTypeDefinition) (types.Object, diag.Diagnostics) { + if metricLessThanUsual == nil { + return types.ObjectNull(metricLessThanUsualAttr()), nil } -} -func expandNewValue(m map[string]interface{}) *alertParams { - conditionMap := extractConditionMap(m) - condition := expandNewValueCondition(conditionMap) - filters := expandNewValueFilters(m) + metricFilter, diags := flattenMetricFilter(ctx, metricLessThanUsual.GetMetricFilter()) + if diags.HasError() { + return types.ObjectNull(metricLessThanUsualAttr()), diags + } - return &alertParams{ - Condition: condition, - Filters: filters, + ofTheLast, diags := flattenMetricTimeWindow(ctx, metricLessThanUsual.GetOfTheLast()) + if diags.HasError() { + return types.ObjectNull(metricLessThanUsualAttr()), diags } -} -func expandNewValueCondition(m map[string]interface{}) *alerts.AlertCondition { - parameters := expandNewValueConditionParameters(m) - condition := &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_NewValue{ - NewValue: &alerts.NewValueCondition{ - Parameters: parameters, - }, - }, + metricLessThanUsualModel := MetricLessThanUsualModel{ + MetricFilter: metricFilter, + OfTheLast: ofTheLast, + Threshold: wrapperspbUint32ToTypeInt64(metricLessThanUsual.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(metricLessThanUsual.GetForOverPct()), + MinNonNullValuesPct: wrapperspbUint32ToTypeInt64(metricLessThanUsual.GetMinNonNullValuesPct()), } - return condition + return types.ObjectValueFrom(ctx, metricLessThanUsualAttr(), metricLessThanUsualModel) } -func expandNewValueConditionParameters(m map[string]interface{}) *alerts.ConditionParameters { - timeFrame := expandNewValueTimeFrame(m["time_window"].(string)) - groupBy := []*wrapperspb.StringValue{wrapperspb.String(m["key_to_track"].(string))} - parameters := &alerts.ConditionParameters{ - Timeframe: timeFrame, - GroupBy: groupBy, +func flattenMetricMoreThanOrEquals(ctx context.Context, equals *cxsdk.MetricMoreThanOrEqualsTypeDefinition) (types.Object, diag.Diagnostics) { + if equals == nil { + return types.ObjectNull(metricMoreThanOrEqualsAttr()), nil } - return parameters -} -func expandNewValueFilters(m map[string]interface{}) *alerts.AlertFilters { - filters := expandCommonAlertFilter(m) - filters.FilterType = alerts.AlertFilters_FILTER_TYPE_TEXT_OR_UNSPECIFIED - return filters -} + metricFilter, diags := flattenMetricFilter(ctx, equals.GetMetricFilter()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanOrEqualsAttr()), diags + } -func expandUniqueCount(m map[string]interface{}) *alertParams { - conditionMap := extractConditionMap(m) - condition := expandUniqueCountCondition(conditionMap) - filters := expandUniqueCountFilters(m) + ofTheLast, diags := flattenMetricTimeWindow(ctx, equals.GetOfTheLast()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanOrEqualsAttr()), diags + } - return &alertParams{ - Condition: condition, - Filters: filters, + missingValues, diags := flattenMissingValues(ctx, equals.GetMissingValues()) + if diags.HasError() { + return types.ObjectNull(metricMoreThanOrEqualsAttr()), diags } -} -func expandUniqueCountCondition(m map[string]interface{}) *alerts.AlertCondition { - parameters := expandUniqueCountConditionParameters(m) - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_UniqueCount{ - UniqueCount: &alerts.UniqueCountCondition{ - Parameters: parameters, - }, - }, + metricMoreThanOrEqualsModel := MetricMoreThanOrEqualsModel{ + MetricFilter: metricFilter, + Threshold: wrapperspbFloat64ToTypeFloat64(equals.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(equals.GetForOverPct()), + OfTheLast: ofTheLast, + MissingValues: missingValues, } + return types.ObjectValueFrom(ctx, metricMoreThanOrEqualsAttr(), metricMoreThanOrEqualsModel) } -func expandUniqueCountConditionParameters(m map[string]interface{}) *alerts.ConditionParameters { - uniqueCountKey := []*wrapperspb.StringValue{wrapperspb.String(m["unique_count_key"].(string))} - threshold := wrapperspb.Double(float64(m["max_unique_values"].(int))) - timeFrame := expandUniqueValueTimeFrame(m["time_window"].(string)) +func flattenMetricLessThanOrEquals(ctx context.Context, equals *cxsdk.MetricLessThanOrEqualsTypeDefinition) (types.Object, diag.Diagnostics) { + if equals == nil { + return types.ObjectNull(metricLessThanOrEqualsAttr()), nil + } - var groupByThreshold *wrapperspb.UInt32Value - var groupBy []*wrapperspb.StringValue - if groupByKey := m["group_by_key"]; groupByKey != nil && groupByKey.(string) != "" { - groupBy = []*wrapperspb.StringValue{wrapperspb.String(groupByKey.(string))} - groupByThreshold = wrapperspb.UInt32(uint32(m["max_unique_values_for_group_by"].(int))) + metricFilter, diags := flattenMetricFilter(ctx, equals.GetMetricFilter()) + if diags.HasError() { + return types.ObjectNull(metricLessThanOrEqualsAttr()), diags } - return &alerts.ConditionParameters{ - CardinalityFields: uniqueCountKey, - Threshold: threshold, - Timeframe: timeFrame, - GroupBy: groupBy, - MaxUniqueCountValuesForGroupByKey: groupByThreshold, + ofTheLast, diags := flattenMetricTimeWindow(ctx, equals.GetOfTheLast()) + if diags.HasError() { + return types.ObjectNull(metricLessThanOrEqualsAttr()), diags } -} -func expandUniqueCountFilters(m map[string]interface{}) *alerts.AlertFilters { - filters := expandCommonAlertFilter(m) - filters.FilterType = alerts.AlertFilters_FILTER_TYPE_UNIQUE_COUNT - return filters -} + missingValues, diags := flattenMissingValues(ctx, equals.GetMissingValues()) + if diags.HasError() { + return types.ObjectNull(metricLessThanOrEqualsAttr()), diags + } -func expandCommonAlertFilter(m map[string]interface{}) *alerts.AlertFilters { - severities := expandAlertFiltersSeverities(m["severities"].(*schema.Set).List()) - metadata := expandMetadata(m) - text := wrapperspb.String(m["search_query"].(string)) + undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, equals.GetUndetectedValuesManagement()) + if diags.HasError() { + return types.ObjectNull(metricLessThanOrEqualsAttr()), diags + } - return &alerts.AlertFilters{ - Severities: severities, - Metadata: metadata, - Text: text, + metricLessThanOrEqualsModel := MetricLessThanOrEqualsModel{ + MetricFilter: metricFilter, + Threshold: wrapperspbFloat64ToTypeFloat64(equals.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(equals.GetForOverPct()), + OfTheLast: ofTheLast, + MissingValues: missingValues, + UndetectedValuesManagement: undetectedValuesManagement, } + return types.ObjectValueFrom(ctx, metricLessThanOrEqualsAttr(), metricLessThanOrEqualsModel) } -func expandTimeRelative(m map[string]interface{}) (*alertParams, diag.Diagnostics) { - conditionMap := extractConditionMap(m) - condition, err := expandTimeRelativeCondition(conditionMap) - if err != nil { - return nil, diag.FromErr(err) +func flattenFlow(ctx context.Context, flow *cxsdk.FlowTypeDefinition) (types.Object, diag.Diagnostics) { + if flow == nil { + return types.ObjectNull(flowAttr()), nil } - filters := expandTimeRelativeFilters(m) - return &alertParams{ - Condition: condition, - Filters: filters, - }, nil + stages, diags := flattenFlowStages(ctx, flow.GetStages()) + if diags.HasError() { + return types.ObjectNull(flowAttr()), diags + } + + flowModel := FlowModel{ + Stages: stages, + EnforceSuppression: wrapperspbBoolToTypeBool(flow.GetEnforceSuppression()), + } + return types.ObjectValueFrom(ctx, flowAttr(), flowModel) } -func expandTimeRelativeCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { - parameters, err := expandTimeRelativeConditionParameters(m) - if err != nil { - return nil, err +func flattenFlowStages(ctx context.Context, stages []*cxsdk.FlowStages) (types.List, diag.Diagnostics) { + var flowStages []*FlowStageModel + for _, stage := range stages { + flowStage, diags := flattenFlowStage(ctx, stage) + if diags.HasError() { + return types.ListNull(types.ObjectType{AttrTypes: flowStageAttr()}), diags + } + flowStages = append(flowStages, flowStage) } + return types.ListValueFrom(ctx, types.ObjectType{AttrTypes: flowStageAttr()}, flowStages) - return expandLessThanOrMoreThanAlertCondition(m, parameters) } -func expandLessThanOrMoreThanAlertCondition( - m map[string]interface{}, parameters *alerts.ConditionParameters) (*alerts.AlertCondition, error) { - lessThan, err := trueIfIsLessThanFalseIfMoreThanAndErrorOtherwise(m) - if err != nil { - return nil, err +func flattenFlowStage(ctx context.Context, stage *cxsdk.FlowStages) (*FlowStageModel, diag.Diagnostics) { + if stage == nil { + return nil, nil } - if lessThan { - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_LessThan{ - LessThan: &alerts.LessThanCondition{Parameters: parameters}, - }, - }, nil + flowStagesGroups, diags := flattenFlowStagesGroups(ctx, stage) + if diags.HasError() { + return nil, diags } - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_MoreThan{ - MoreThan: &alerts.MoreThanCondition{Parameters: parameters}, - }, - }, nil + flowStageModel := &FlowStageModel{ + FlowStagesGroups: flowStagesGroups, + TimeframeMs: wrapperspbInt64ToTypeInt64(stage.GetTimeframeMs()), + TimeframeType: types.StringValue(flowStageTimeFrameTypeProtoToSchemaMap[stage.GetTimeframeType()]), + } + return flowStageModel, nil + } -func trueIfIsLessThanFalseIfMoreThanAndErrorOtherwise(m map[string]interface{}) (bool, error) { - if lessThan := m["less_than"]; lessThan != nil && lessThan.(bool) { - return true, nil - } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { - return false, nil +func flattenFlowStagesGroups(ctx context.Context, stage *cxsdk.FlowStages) (types.List, diag.Diagnostics) { + var flowStagesGroups []*FlowStagesGroupModel + for _, group := range stage.GetFlowStagesGroups().GetGroups() { + flowStageGroup, diags := flattenFlowStageGroup(ctx, group) + if diags.HasError() { + return types.ListNull(types.ObjectType{AttrTypes: flowStageGroupAttr()}), diags + } + flowStagesGroups = append(flowStagesGroups, flowStageGroup) } - return false, fmt.Errorf("less_than or more_than have to be true") + return types.ListValueFrom(ctx, types.ObjectType{AttrTypes: flowStageGroupAttr()}, flowStagesGroups) + } -func expandPromqlCondition(m map[string]interface{}, parameters *alerts.ConditionParameters) (*alerts.AlertCondition, error) { - conditionsStr, err := returnAlertConditionString(m) - if err != nil { - return nil, err +func flattenFlowStageGroup(ctx context.Context, group *cxsdk.FlowStagesGroup) (*FlowStagesGroupModel, diag.Diagnostics) { + if group == nil { + return nil, nil } - switch conditionsStr { - case "less_than": - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_LessThan{ - LessThan: &alerts.LessThanCondition{Parameters: parameters}, - }, - }, nil - case "more_than": - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_MoreThan{ - MoreThan: &alerts.MoreThanCondition{Parameters: parameters}, - }, - }, nil - case "more_than_usual": - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_MoreThanUsual{ - MoreThanUsual: &alerts.MoreThanUsualCondition{Parameters: parameters}, - }, - }, nil - case "less_than_usual": - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_LessThanUsual{ - LessThanUsual: &alerts.LessThanUsualCondition{Parameters: parameters}, - }, - }, nil - case "less_than_or_equal": - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_LessThanOrEqual{ - LessThanOrEqual: &alerts.LessThanOrEqualCondition{Parameters: parameters}, - }, - }, nil - case "more_than_or_equal": - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_MoreThanOrEqual{ - MoreThanOrEqual: &alerts.MoreThanOrEqualCondition{Parameters: parameters}, - }, - }, nil + alertDefs, diags := flattenAlertDefs(ctx, group.GetAlertDefs()) + if diags.HasError() { + return nil, diags } - return nil, fmt.Errorf("less_than, more_than, more_than_usual, less_than_usual, less_than_or_equal, or more_than_or_equal must be set to true") + flowStageGroupModel := &FlowStagesGroupModel{ + AlertDefs: alertDefs, + NextOp: types.StringValue(flowStagesGroupNextOpProtoToSchemaMap[group.GetNextOp()]), + AlertsOp: types.StringValue(flowStagesGroupAlertsOpProtoToSchemaMap[group.GetAlertsOp()]), + } + return flowStageGroupModel, nil } -func returnAlertConditionString(m map[string]interface{}) (string, error) { - if lessThan := m["less_than"]; lessThan != nil && lessThan.(bool) { - return "less_than", nil - } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { - return "more_than", nil - } else if moreThanUsual := m["more_than_usual"]; moreThanUsual != nil && moreThanUsual.(bool) { - return "more_than_usual", nil - } else if lessThanUsual := m["less_than_usual"]; lessThanUsual != nil && lessThanUsual.(bool) { - return "less_than_usual", nil - } else if lessThanOrEqual := m["less_than_or_equal"]; lessThanOrEqual != nil && lessThanOrEqual.(bool) { - return "less_than_or_equal", nil - } else if moreThanOrEqual := m["more_than_or_equal"]; moreThanOrEqual != nil && moreThanOrEqual.(bool) { - return "more_than_or_equal", nil +func flattenAlertDefs(ctx context.Context, defs []*cxsdk.FlowStagesGroupsAlertDefs) (types.List, diag.Diagnostics) { + var alertDefs []*FlowStagesGroupsAlertDefsModel + for _, def := range defs { + alertDef := &FlowStagesGroupsAlertDefsModel{ + Id: wrapperspbStringToTypeString(def.GetId()), + Not: wrapperspbBoolToTypeBool(def.GetNot()), + } + alertDefs = append(alertDefs, alertDef) } - - return "", fmt.Errorf("less_than, more_than, more_than_usual, less_than_usual, less_than_or_equal, or more_than_or_equal must be set to true") + return types.ListValueFrom(ctx, types.ObjectType{AttrTypes: alertDefsAttr()}, alertDefs) } -func expandTimeRelativeConditionParameters(m map[string]interface{}) (*alerts.ConditionParameters, error) { - timeFrame, relativeTimeframe := expandTimeFrameAndRelativeTimeframe(m["relative_time_window"].(string)) - ignoreInfinity := wrapperspb.Bool(m["ignore_infinity"].(bool)) - groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) - threshold := wrapperspb.Double(m["ratio_threshold"].(float64)) - relatedExtendedData, err := expandRelatedExtendedData(m) - if err != nil { - return nil, err +func retriggeringPeriodAttr() map[string]attr.Type { + return map[string]attr.Type{ + "minutes": types.Int64Type, } - - return &alerts.ConditionParameters{ - Timeframe: timeFrame, - RelativeTimeframe: relativeTimeframe, - GroupBy: groupBy, - Threshold: threshold, - IgnoreInfinity: ignoreInfinity, - RelatedExtendedData: relatedExtendedData, - }, nil } -func expandTimeFrameAndRelativeTimeframe(relativeTimeframeStr string) (alerts.Timeframe, alerts.RelativeTimeframe) { - p := alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame[relativeTimeframeStr] - return p.timeFrame, p.relativeTimeFrame +func incidentsSettingsAttr() map[string]attr.Type { + return map[string]attr.Type{ + "notify_on": types.StringType, + "retriggering_period": types.ObjectType{ + AttrTypes: retriggeringPeriodAttr(), + }, + } } -func expandTimeRelativeFilters(m map[string]interface{}) *alerts.AlertFilters { - filters := expandCommonAlertFilter(m) - filters.FilterType = alerts.AlertFilters_FILTER_TYPE_TIME_RELATIVE - return filters +func notificationGroupAttr() map[string]attr.Type { + return map[string]attr.Type{ + "group_by_fields": types.ListType{ + ElemType: types.StringType, + }, + "advanced_target_settings": types.SetType{ + ElemType: types.ObjectType{ + AttrTypes: advancedTargetSettingsAttr(), + }, + }, + "simple_target_settings": types.SetType{ + ElemType: types.ObjectType{ + AttrTypes: simpleTargetSettingsAttr(), + }, + }, + } } -func expandMetric(m map[string]interface{}) (*alertParams, diag.Diagnostics) { - condition, err := expandMetricCondition(m) - if err != nil { - return nil, diag.FromErr(err) +func advancedTargetSettingsAttr() map[string]attr.Type { + return map[string]attr.Type{ + "notify_on": types.StringType, + "retriggering_period": types.ObjectType{ + AttrTypes: retriggeringPeriodAttr(), + }, + "integration_id": types.StringType, + "recipients": types.SetType{ElemType: types.StringType}, } - filters := expandMetricFilters(m) - - return &alertParams{ - Condition: condition, - Filters: filters, - }, nil } -func expandMetricCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { - isPromQL := len(m["promql"].([]interface{})) > 0 - var metricType string - if isPromQL { - metricType = "promql" - } else { - metricType = "lucene" - } - - metricMap := (m[metricType].([]interface{}))[0].(map[string]interface{}) - text := wrapperspb.String(metricMap["search_query"].(string)) - conditionMap := extractConditionMap(metricMap) - threshold := wrapperspb.Double(conditionMap["threshold"].(float64)) - sampleThresholdPercentage := wrapperspb.UInt32(uint32(conditionMap["sample_threshold_percentage"].(int))) - nonNullPercentage := wrapperspb.UInt32(uint32(conditionMap["min_non_null_values_percentage"].(int))) - swapNullValues := wrapperspb.Bool(conditionMap["replace_missing_value_with_zero"].(bool)) - timeFrame := expandMetricTimeFrame(conditionMap["time_window"].(string)) - relatedExtendedData, err := expandRelatedExtendedData(conditionMap) - if err != nil { - return nil, err +func simpleTargetSettingsAttr() map[string]attr.Type { + return map[string]attr.Type{ + "integration_id": types.StringType, + "recipients": types.SetType{ElemType: types.StringType}, } +} - parameters := &alerts.ConditionParameters{ - Threshold: threshold, - Timeframe: timeFrame, - RelatedExtendedData: relatedExtendedData, +func alertTypeDefinitionAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_immediate": types.ObjectType{ + AttrTypes: logsImmediateAttr(), + }, + "logs_more_than": types.ObjectType{ + AttrTypes: logsMoreThanAttr(), + }, + "logs_less_than": types.ObjectType{ + AttrTypes: logsLessThanAttr(), + }, + "logs_more_than_usual": types.ObjectType{ + AttrTypes: logsMoreThanUsualAttr(), + }, + "logs_ratio_more_than": types.ObjectType{ + AttrTypes: logsRatioMoreThanAttr(), + }, + "logs_ratio_less_than": types.ObjectType{ + AttrTypes: logsRatioLessThanAttr(), + }, + "logs_new_value": types.ObjectType{ + AttrTypes: logsNewValueAttr(), + }, + "logs_unique_count": types.ObjectType{ + AttrTypes: logsUniqueCountAttr(), + }, + "logs_time_relative_more_than": types.ObjectType{ + AttrTypes: logsTimeRelativeMoreThanAttr(), + }, + "logs_time_relative_less_than": types.ObjectType{ + AttrTypes: logsTimeRelativeLessThanAttr(), + }, + "metric_more_than": types.ObjectType{ + AttrTypes: metricMoreThanAttr(), + }, + "metric_less_than": types.ObjectType{ + AttrTypes: metricLessThanAttr(), + }, + "metric_more_than_usual": types.ObjectType{ + AttrTypes: metricMoreThanUsualAttr(), + }, + "metric_less_than_usual": types.ObjectType{ + AttrTypes: metricLessThanUsualAttr(), + }, + "metric_more_than_or_equals": types.ObjectType{ + AttrTypes: metricMoreThanOrEqualsAttr(), + }, + "metric_less_than_or_equals": types.ObjectType{ + AttrTypes: metricLessThanOrEqualsAttr(), + }, + "tracing_immediate": types.ObjectType{ + AttrTypes: tracingImmediateAttr(), + }, + "tracing_more_than": types.ObjectType{ + AttrTypes: tracingMoreThanAttr(), + }, + "flow": types.ObjectType{ + AttrTypes: flowAttr(), + }, } +} - if isPromQL { - parameters.MetricAlertPromqlParameters = &alerts.MetricAlertPromqlConditionParameters{ - PromqlText: text, - SampleThresholdPercentage: sampleThresholdPercentage, - NonNullPercentage: nonNullPercentage, - SwapNullValues: swapNullValues, - } - } else { - metricField := wrapperspb.String(conditionMap["metric_field"].(string)) - arithmeticOperator := expandArithmeticOperator(conditionMap["arithmetic_operator"].(string)) - arithmeticOperatorModifier := wrapperspb.UInt32(uint32(conditionMap["arithmetic_operator_modifier"].(int))) - groupBy := interfaceSliceToWrappedStringSlice(conditionMap["group_by"].([]interface{})) - parameters.GroupBy = groupBy - parameters.MetricAlertParameters = &alerts.MetricAlertConditionParameters{ - MetricSource: alerts.MetricAlertConditionParameters_METRIC_SOURCE_LOGS2METRICS_OR_UNSPECIFIED, - MetricField: metricField, - ArithmeticOperator: arithmeticOperator, - ArithmeticOperatorModifier: arithmeticOperatorModifier, - SampleThresholdPercentage: sampleThresholdPercentage, - NonNullPercentage: nonNullPercentage, - SwapNullValues: swapNullValues, - } +func metricLessThanOrEqualsAttr() map[string]attr.Type { + return map[string]attr.Type{ + "metric_filter": types.ObjectType{ + AttrTypes: metricFilterAttr(), + }, + "threshold": types.Int64Type, + "for_over_pct": types.Int64Type, + "of_the_last": types.ObjectType{ + AttrTypes: metricTimeWindowAttr(), + }, + "missing_values": types.ObjectType{ + AttrTypes: metricMissingValuesAttr(), + }, + "undetected_values_management": types.ObjectType{ + AttrTypes: undetectedValuesManagementAttr(), + }, } +} - return expandPromqlCondition(conditionMap, parameters) +func metricMoreThanOrEqualsAttr() map[string]attr.Type { + return map[string]attr.Type{ + "metric_filter": types.ObjectType{ + AttrTypes: metricFilterAttr(), + }, + "threshold": types.Int64Type, + "for_over_pct": types.Int64Type, + "of_the_last": types.ObjectType{ + AttrTypes: metricTimeWindowAttr(), + }, + "missing_values": types.ObjectType{ + AttrTypes: metricMissingValuesAttr(), + }, + } } -func expandArithmeticOperator(s string) alerts.MetricAlertConditionParameters_ArithmeticOperator { - arithmeticStr := alertSchemaArithmeticOperatorToProtoArithmetic[s] - arithmeticValue := alerts.MetricAlertConditionParameters_ArithmeticOperator_value[arithmeticStr] - return alerts.MetricAlertConditionParameters_ArithmeticOperator(arithmeticValue) +func logsImmediateAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{ + AttrTypes: logsFilterAttr(), + }, + "notification_payload_filter": types.SetType{ + ElemType: types.StringType, + }, + } } -func expandMetricFilters(m map[string]interface{}) *alerts.AlertFilters { - var text *wrapperspb.StringValue - if len(m["promql"].([]interface{})) == 0 { - luceneArr := m["lucene"].([]interface{}) - lucene := luceneArr[0].(map[string]interface{}) - text = wrapperspb.String(lucene["search_query"].(string)) +func logsFilterAttr() map[string]attr.Type { + return map[string]attr.Type{ + "lucene_filter": types.ObjectType{ + AttrTypes: luceneFilterAttr(), + }, } +} - return &alerts.AlertFilters{ - FilterType: alerts.AlertFilters_FILTER_TYPE_METRIC, - Text: text, +func luceneFilterAttr() map[string]attr.Type { + return map[string]attr.Type{ + "lucene_query": types.StringType, + "label_filters": types.ObjectType{ + AttrTypes: labelFiltersAttr(), + }, } } -func expandFlow(m map[string]interface{}) *alertParams { - stages := expandFlowStages(m["stage"]) - parameters := expandFlowParameters(m["group_by"]) - return &alertParams{ - Condition: &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_Flow{ - Flow: &alerts.FlowCondition{ - Stages: stages, - Parameters: parameters, - }, +func labelFiltersAttr() map[string]attr.Type { + return map[string]attr.Type{ + "application_name": types.SetType{ + ElemType: types.ObjectType{ + AttrTypes: labelFilterTypesAttr(), + }, + }, + "subsystem_name": types.SetType{ + ElemType: types.ObjectType{ + AttrTypes: labelFilterTypesAttr(), }, }, - Filters: &alerts.AlertFilters{ - FilterType: alerts.AlertFilters_FILTER_TYPE_FLOW, + "severities": types.SetType{ + ElemType: types.StringType, }, } } -func expandFlowParameters(i interface{}) *alerts.ConditionParameters { - if i == nil { - return nil - } - groupBy := interfaceSliceToWrappedStringSlice(i.([]interface{})) - if len(groupBy) == 0 { - return nil +func logsMoreThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "threshold": types.Int64Type, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "evaluation_window": types.StringType, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, } +} - return &alerts.ConditionParameters{ - GroupBy: groupBy, +func logsTimeWindowAttr() map[string]attr.Type { + return map[string]attr.Type{ + "specific_value": types.StringType, } } -func expandFlowStages(i interface{}) []*alerts.FlowStage { - l := i.([]interface{}) - result := make([]*alerts.FlowStage, 0, len(l)) - for _, v := range l { - stage := expandFlowStage(v) - result = append(result, stage) +func logsRatioMoreThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "numerator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "numerator_alias": types.StringType, + "denominator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "denominator_alias": types.StringType, + "threshold": types.Int64Type, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "ignore_infinity": types.BoolType, + "notification_payload_filter": types.SetType{ + ElemType: types.StringType, + }, + "group_by_for": types.StringType, } - - return result } -func expandFlowStage(i interface{}) *alerts.FlowStage { - m := i.(map[string]interface{}) - groups := expandGroups(m["group"]) - timeFrame := expandFlowTimeFrame(m["time_window"]) - return &alerts.FlowStage{Groups: groups, Timeframe: timeFrame} +func logsRatioLessThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "numerator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "numerator_alias": types.StringType, + "denominator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "denominator_alias": types.StringType, + "threshold": types.Int64Type, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "ignore_infinity": types.BoolType, + "notification_payload_filter": types.SetType{ + ElemType: types.StringType, + }, + "group_by_for": types.StringType, + "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, + } } -func expandGroups(v interface{}) []*alerts.FlowGroup { - groups := v.([]interface{}) - result := make([]*alerts.FlowGroup, 0, len(groups)) - for _, g := range groups { - group := expandFlowGroup(g) - result = append(result, group) +func logsMoreThanUsualAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "minimum_threshold": types.Int64Type, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, } +} - return result +func logsLessThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "threshold": types.Int64Type, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + } } -func expandFlowGroup(v interface{}) *alerts.FlowGroup { - m := v.(map[string]interface{}) - subAlerts := expandSubAlerts(m["sub_alerts"]) - operator := expandOperator(m["next_operator"]) - return &alerts.FlowGroup{ - Alerts: subAlerts, - NextOp: operator, +func undetectedValuesManagementAttr() map[string]attr.Type { + return map[string]attr.Type{ + "trigger_undetected_values": types.BoolType, + "auto_retire_timeframe": types.StringType, } } -func expandSubAlerts(v interface{}) *alerts.FlowAlerts { - l := v.([]interface{}) - if len(l) == 0 || l[0] == nil { - return nil +func alertScheduleAttr() map[string]attr.Type { + return map[string]attr.Type{ + "active_on": types.ObjectType{ + AttrTypes: alertScheduleActiveOnAttr(), + }, } - raw := l[0] - m := raw.(map[string]interface{}) +} - operator := expandOperator(m["operator"]) - values := expandInnerFlowAlerts(m["flow_alert"]) +func alertScheduleActiveOnAttr() map[string]attr.Type { + return map[string]attr.Type{ + "days_of_week": types.ListType{ + ElemType: types.StringType, + }, + "start_time": types.ObjectType{ + AttrTypes: timeOfDayAttr(), + }, + "end_time": types.ObjectType{ + AttrTypes: timeOfDayAttr(), + }, + } +} - return &alerts.FlowAlerts{ - Op: operator, - Values: values, +func timeOfDayAttr() map[string]attr.Type { + return map[string]attr.Type{ + "hours": types.Int64Type, + "minutes": types.Int64Type, } } -func expandInnerFlowAlerts(v interface{}) []*alerts.FlowAlert { - flowAlerts := v.([]interface{}) - result := make([]*alerts.FlowAlert, 0, len(flowAlerts)) - for _, fa := range flowAlerts { - flowAlert := expandInnerFlowAlert(fa) - result = append(result, flowAlert) +func logsNewValueAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "keypath_to_track": types.StringType, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, } - return result } -func expandInnerFlowAlert(v interface{}) *alerts.FlowAlert { - m := v.(map[string]interface{}) - return &alerts.FlowAlert{ - Id: wrapperspb.String(m["user_alert_id"].(string)), - Not: wrapperspb.Bool(m["not"].(bool)), +func logsUniqueCountAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "unique_count_keypath": types.StringType, + "max_unique_count": types.Int64Type, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "max_unique_count_per_group_by_key": types.Int64Type, } } -func expandOperator(i interface{}) alerts.FlowOperator { - operatorStr := i.(string) - return alerts.FlowOperator(alerts.FlowOperator_value[operatorStr]) +func metricMoreThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, + "threshold": types.Float64Type, + "for_over_pct": types.Int64Type, + "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, + "missing_values": types.ObjectType{AttrTypes: metricMissingValuesAttr()}, + } } -func expandFlowTimeFrame(i interface{}) *alerts.FlowTimeframe { - return &alerts.FlowTimeframe{ - Ms: wrapperspb.UInt32(uint32(expandTimeToMS(i))), +func metricFilterAttr() map[string]attr.Type { + return map[string]attr.Type{ + "promql": types.StringType, } } -func expandTracing(m map[string]interface{}) (*alertParams, *alerts.TracingAlert) { - tracingParams, _ := expandTracingParams(m) - tracingAlert := expandTracingAlert(m) +func metricTimeWindowAttr() map[string]attr.Type { + return map[string]attr.Type{ + "specific_value": types.StringType, + } +} - return tracingParams, tracingAlert +func metricMissingValuesAttr() map[string]attr.Type { + return map[string]attr.Type{ + "replace_with_zero": types.BoolType, + "min_non_null_values_pct": types.Int64Type, + } } -func expandTracingParams(m map[string]interface{}) (*alertParams, error) { - conditionMap := extractConditionMap(m) - condition, err := expandTracingCondition(conditionMap) - if err != nil { - return nil, err +func metricLessThanUsualAttr() map[string]attr.Type { + return map[string]attr.Type{ + "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, + "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, + "threshold": types.Int64Type, + "for_over_pct": types.Int64Type, + "min_non_null_values_pct": types.Int64Type, } - filters := expandTracingFilter() - return &alertParams{ - Condition: condition, - Filters: filters, - }, nil } -func expandTracingCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { - if immediately := m["immediately"]; immediately != nil && immediately.(bool) { - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_Immediate{}, - }, nil - } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { - parameters := expandTracingConditionParameters(m) - return &alerts.AlertCondition{ - Condition: &alerts.AlertCondition_MoreThan{ - MoreThan: &alerts.MoreThanCondition{Parameters: parameters}, +func flowAttr() map[string]attr.Type { + return map[string]attr.Type{ + "stages": types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: flowStageAttr(), }, - }, nil + }, + "enforce_suppression": types.BoolType, } - - return nil, fmt.Errorf("immediately or more_than have to be true") } -func expandTracingFilter() *alerts.AlertFilters { - return &alerts.AlertFilters{ - FilterType: alerts.AlertFilters_FILTER_TYPE_TRACING, +func flowStageAttr() map[string]attr.Type { + return map[string]attr.Type{ + "flow_stages_groups": types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: flowStageGroupAttr(), + }, + }, + "timeframe_ms": types.Int64Type, + "timeframe_type": types.StringType, } } -func expandTracingAlert(m map[string]interface{}) *alerts.TracingAlert { - conditionLatency := uint32(m["latency_threshold_milliseconds"].(float64) * (float64)(time.Millisecond.Microseconds())) - applications := m["applications"].(*schema.Set).List() - subsystems := m["subsystems"].(*schema.Set).List() - services := m["services"].(*schema.Set).List() - fieldFilters := expandFiltersData(applications, subsystems, services) - tagFilters := expandTagFilters(m["tag_filter"]) - return &alerts.TracingAlert{ - ConditionLatency: conditionLatency, - FieldFilters: fieldFilters, - TagFilters: tagFilters, +func flowStageGroupAttr() map[string]attr.Type { + return map[string]attr.Type{ + "alert_defs": types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: alertDefsAttr(), + }, + }, + "next_op": types.StringType, + "alerts_op": types.StringType, } } -func expandFiltersData(applications, subsystems, services []interface{}) []*alerts.FilterData { - result := make([]*alerts.FilterData, 0) - if len(applications) != 0 { - result = append(result, expandSpecificFilter("applicationName", applications)) - } - if len(subsystems) != 0 { - result = append(result, expandSpecificFilter("subsystemName", subsystems)) - } - if len(services) != 0 { - result = append(result, expandSpecificFilter("serviceName", services)) +func alertDefsAttr() map[string]attr.Type { + return map[string]attr.Type{ + "id": types.StringType, + "not": types.BoolType, } +} - return result +func tracingMoreThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "tracing_filter": types.ObjectType{AttrTypes: tracingQueryAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "span_amount": types.Int64Type, + } } -func expandTagFilters(i interface{}) []*alerts.FilterData { - if i == nil { - return nil +func tracingImmediateAttr() map[string]attr.Type { + return map[string]attr.Type{ + "tracing_filter": types.ObjectType{AttrTypes: tracingQueryAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, } - l := i.(*schema.Set).List() +} - result := make([]*alerts.FilterData, 0, len(l)) - for _, v := range l { - m := v.(map[string]interface{}) - field := m["field"].(string) - values := m["values"].(*schema.Set).List() - result = append(result, expandSpecificFilter(field, values)) +func metricMoreThanUsualAttr() map[string]attr.Type { + return map[string]attr.Type{ + "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, + "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, + "threshold": types.Int64Type, + "for_over_pct": types.Int64Type, + "min_non_null_values_pct": types.Int64Type, } - return result } -func expandSpecificFilter(filterName string, values []interface{}) *alerts.FilterData { - operatorToFilterValues := make(map[string]*alerts.Filters) - for _, val := range values { - operator, filterValue := expandFilter(val.(string)) - if _, ok := operatorToFilterValues[operator]; !ok { - operatorToFilterValues[operator] = new(alerts.Filters) - operatorToFilterValues[operator].Operator = operator - operatorToFilterValues[operator].Values = make([]string, 0) - } - operatorToFilterValues[operator].Values = append(operatorToFilterValues[operator].Values, filterValue) +func metricLessThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, + "threshold": types.Float64Type, + "for_over_pct": types.Int64Type, + "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, + "missing_values": types.ObjectType{AttrTypes: metricMissingValuesAttr()}, + "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, } +} - filterResult := make([]*alerts.Filters, 0, len(operatorToFilterValues)) - for _, filters := range operatorToFilterValues { - filterResult = append(filterResult, filters) +func logsTimeRelativeLessThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "threshold": types.Int64Type, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "compared_to": types.StringType, + "ignore_infinity": types.BoolType, + "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, } +} - return &alerts.FilterData{ - Field: filterName, - Filters: filterResult, +func logsTimeRelativeMoreThanAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "threshold": types.Int64Type, + "compared_to": types.StringType, + "ignore_infinity": types.BoolType, } } -func expandFilter(filterString string) (operator, filterValue string) { - operator, filterValue = "equals", filterString - if strings.HasPrefix(filterValue, "filter:") { - arr := strings.SplitN(filterValue, ":", 3) - operator, filterValue = arr[1], arr[2] +func tracingQueryAttr() map[string]attr.Type { + return map[string]attr.Type{ + "latency_threshold_ms": types.Int64Type, + "tracing_label_filters": types.ObjectType{AttrTypes: tracingLabelFiltersAttr()}, } +} - return +func labelFilterTypesAttr() map[string]attr.Type { + return map[string]attr.Type{ + "value": types.StringType, + "operation": types.StringType, + } } -func extractConditionMap(m map[string]interface{}) map[string]interface{} { - return m["condition"].([]interface{})[0].(map[string]interface{}) +func tracingLabelFiltersAttr() map[string]attr.Type { + return map[string]attr.Type{ + "application_name": types.SetType{ElemType: types.ObjectType{AttrTypes: tracingFiltersTypeAttr()}}, + "subsystem_name": types.SetType{ElemType: types.ObjectType{AttrTypes: tracingFiltersTypeAttr()}}, + "service_name": types.SetType{ElemType: types.ObjectType{AttrTypes: tracingFiltersTypeAttr()}}, + "operation_name": types.SetType{ElemType: types.ObjectType{AttrTypes: tracingFiltersTypeAttr()}}, + "span_fields": types.SetType{ElemType: types.ObjectType{AttrTypes: tracingSpanFieldsFilterAttr()}}, + } } -func expandTimeFrame(s string) alerts.Timeframe { - protoTimeFrame := alertSchemaTimeFrameToProtoTimeFrame[s] - return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) +func tracingFiltersTypeAttr() map[string]attr.Type { + return map[string]attr.Type{ + "operation": types.StringType, + "values": types.SetType{ElemType: types.StringType}, + } } -func expandMetricTimeFrame(s string) alerts.Timeframe { - protoTimeFrame := alertSchemaMetricTimeFrameToMetricProtoTimeFrame[s] - return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) +func tracingSpanFieldsFilterAttr() map[string]attr.Type { + return map[string]attr.Type{ + "key": types.StringType, + "filter_type": types.ObjectType{AttrTypes: tracingFiltersTypeAttr()}, + } } -func expandMetadata(m map[string]interface{}) *alerts.AlertFilters_MetadataFilters { - categories := interfaceSliceToWrappedStringSlice(m["categories"].(*schema.Set).List()) - applications := interfaceSliceToWrappedStringSlice(m["applications"].(*schema.Set).List()) - subsystems := interfaceSliceToWrappedStringSlice(m["subsystems"].(*schema.Set).List()) - computers := interfaceSliceToWrappedStringSlice(m["computers"].(*schema.Set).List()) - classes := interfaceSliceToWrappedStringSlice(m["classes"].(*schema.Set).List()) - methods := interfaceSliceToWrappedStringSlice(m["methods"].(*schema.Set).List()) - ipAddresses := interfaceSliceToWrappedStringSlice(m["ip_addresses"].(*schema.Set).List()) +func (r *AlertResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan *AlertResourceModel + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } - return &alerts.AlertFilters_MetadataFilters{ - Categories: categories, - Applications: applications, - Subsystems: subsystems, - Computers: computers, - Classes: classes, - Methods: methods, - IpAddresses: ipAddresses, + alertProperties, diags := extractAlertProperties(ctx, plan) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return } -} + updateAlertReq := &cxsdk.ReplaceAlertDefRequest{ + Id: typeStringToWrapperspbString(plan.ID), + AlertDefProperties: alertProperties, + } + log.Printf("[INFO] Updating Alert: %s", protojson.Format(updateAlertReq)) + alertUpdateResp, err := r.client.Replace(ctx, updateAlertReq) + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + resp.Diagnostics.AddError( + "Error updating Alert", + formatRpcErrors(err, updateAlertURL, protojson.Format(updateAlertReq)), + ) + return + } + log.Printf("[INFO] Submitted updated Alert: %s", protojson.Format(alertUpdateResp)) -func expandAlertFiltersSeverities(v interface{}) []alerts.AlertFilters_LogSeverity { - s := interfaceSliceToStringSlice(v.([]interface{})) - result := make([]alerts.AlertFilters_LogSeverity, 0, len(s)) - for _, v := range s { - logSeverityStr := alertSchemaLogSeverityToProtoLogSeverity[v] - result = append(result, alerts.AlertFilters_LogSeverity( - alerts.AlertFilters_LogSeverity_value[logSeverityStr])) + // Get refreshed Alert value from Coralogix + getAlertReq := &cxsdk.GetAlertDefRequest{Id: typeStringToWrapperspbString(plan.ID)} + getAlertResp, err := r.client.Get(ctx, getAlertReq) + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + if status.Code(err) == codes.NotFound { + resp.Diagnostics.AddWarning( + fmt.Sprintf("Alert %q is in state, but no longer exists in Coralogix backend", plan.ID.ValueString()), + fmt.Sprintf("%s will be recreated when you apply", plan.ID.ValueString()), + ) + resp.State.RemoveResource(ctx) + } else { + resp.Diagnostics.AddError( + "Error reading Alert", + formatRpcErrors(err, getAlertURL, protojson.Format(getAlertReq)), + ) + } + return } + log.Printf("[INFO] Received Alert: %s", protojson.Format(getAlertResp)) - return result -} + plan, diags = flattenAlert(ctx, getAlertResp.GetAlertDef()) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } -func expandNewValueTimeFrame(s string) alerts.Timeframe { - protoTimeFrame := alertSchemaNewValueTimeFrameToProtoTimeFrame[s] - return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } -func expandUniqueValueTimeFrame(s string) alerts.Timeframe { - protoTimeFrame := alertSchemaUniqueCountTimeFrameToProtoTimeFrame[s] - return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) -} +func (r *AlertResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state AlertResourceModel + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } -func expandTimeInDay(v interface{}) *alerts.Time { - timeArr := strings.Split(v.(string), ":") - hours := parseNumInt32(timeArr[0]) - minutes := parseNumInt32(timeArr[1]) - return &alerts.Time{ - Hours: hours, - Minutes: minutes, + id := state.ID.ValueString() + log.Printf("[INFO] Delteting Alert %s", id) + deleteReq := &cxsdk.DeleteAlertDefRequest{Id: wrapperspb.String(id)} + log.Printf("[INFO] Deleting Alert: %s", protojson.Format(deleteReq)) + if _, err := r.client.Delete(ctx, deleteReq); err != nil { + resp.Diagnostics.AddError( + fmt.Sprintf("Error Deleting Alert %s", id), + formatRpcErrors(err, deleteAlertURL, protojson.Format(deleteReq)), + ) + return } + log.Printf("[INFO] Alert %s deleted", id) } diff --git a/coralogix/resource_coralogix_alert.go.old b/coralogix/resource_coralogix_alert.go.old new file mode 100644 index 00000000..9e766044 --- /dev/null +++ b/coralogix/resource_coralogix_alert.go.old @@ -0,0 +1,3517 @@ +// Copyright 2024 Coralogix Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package coralogix + +import ( + "context" + "fmt" + "log" + "regexp" + "strconv" + "strings" + "time" + + "terraform-provider-coralogix/coralogix/clientset" + alerts "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v2" + + "google.golang.org/protobuf/encoding/protojson" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + . "github.com/ahmetalpbalkan/go-linq" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var ( + validAlertTypes = []string{ + "standard", "ratio", "new_value", "unique_count", "time_relative", "metric", "tracing", "flow"} + alertSchemaSeverityToProtoSeverity = map[string]string{ + "Info": "ALERT_SEVERITY_INFO_OR_UNSPECIFIED", + "Warning": "ALERT_SEVERITY_WARNING", + "Critical": "ALERT_SEVERITY_CRITICAL", + "Error": "ALERT_SEVERITY_ERROR", + } + alertProtoSeverityToSchemaSeverity = reverseMapStrings(alertSchemaSeverityToProtoSeverity) + alertValidSeverities = getKeysStrings(alertSchemaSeverityToProtoSeverity) + alertSchemaLogSeverityToProtoLogSeverity = map[string]string{ + "Debug": "LOG_SEVERITY_DEBUG_OR_UNSPECIFIED", + "Verbose": "LOG_SEVERITY_VERBOSE", + "Info": "LOG_SEVERITY_INFO", + "Warning": "LOG_SEVERITY_WARNING", + "Error": "LOG_SEVERITY_ERROR", + "Critical": "LOG_SEVERITY_CRITICAL", + } + alertProtoLogSeverityToSchemaLogSeverity = reverseMapStrings(alertSchemaLogSeverityToProtoLogSeverity) + alertValidLogSeverities = getKeysStrings(alertSchemaLogSeverityToProtoLogSeverity) + alertSchemaDayOfWeekToProtoDayOfWeek = map[string]string{ + "Monday": "DAY_OF_WEEK_MONDAY_OR_UNSPECIFIED", + "Tuesday": "DAY_OF_WEEK_TUESDAY", + "Wednesday": "DAY_OF_WEEK_WEDNESDAY", + "Thursday": "DAY_OF_WEEK_THURSDAY", + "Friday": "DAY_OF_WEEK_FRIDAY", + "Saturday": "DAY_OF_WEEK_SATURDAY", + "Sunday": "DAY_OF_WEEK_SUNDAY", + } + alertProtoDayOfWeekToSchemaDayOfWeek = reverseMapStrings(alertSchemaDayOfWeekToProtoDayOfWeek) + alertValidDaysOfWeek = getKeysStrings(alertSchemaDayOfWeekToProtoDayOfWeek) + alertSchemaTimeFrameToProtoTimeFrame = map[string]string{ + "5Min": "TIMEFRAME_5_MIN_OR_UNSPECIFIED", + "10Min": "TIMEFRAME_10_MIN", + "15Min": "TIMEFRAME_15_MIN", + "20Min": "TIMEFRAME_20_MIN", + "30Min": "TIMEFRAME_30_MIN", + "1H": "TIMEFRAME_1_H", + "2H": "TIMEFRAME_2_H", + "4H": "TIMEFRAME_4_H", + "6H": "TIMEFRAME_6_H", + "12H": "TIMEFRAME_12_H", + "24H": "TIMEFRAME_24_H", + "36H": "TIMEFRAME_36_H", + } + alertProtoTimeFrameToSchemaTimeFrame = reverseMapStrings(alertSchemaTimeFrameToProtoTimeFrame) + alertValidTimeFrames = getKeysStrings(alertSchemaTimeFrameToProtoTimeFrame) + alertSchemaUniqueCountTimeFrameToProtoTimeFrame = map[string]string{ + "1Min": "TIMEFRAME_1_MIN", + "5Min": "TIMEFRAME_5_MIN_OR_UNSPECIFIED", + "10Min": "TIMEFRAME_10_MIN", + "15Min": "TIMEFRAME_15_MIN", + "20Min": "TIMEFRAME_20_MIN", + "30Min": "TIMEFRAME_30_MIN", + "1H": "TIMEFRAME_1_H", + "2H": "TIMEFRAME_2_H", + "4H": "TIMEFRAME_4_H", + "6H": "TIMEFRAME_6_H", + "12H": "TIMEFRAME_12_H", + "24H": "TIMEFRAME_24_H", + } + alertProtoUniqueCountTimeFrameToSchemaTimeFrame = reverseMapStrings(alertSchemaUniqueCountTimeFrameToProtoTimeFrame) + alertValidUniqueCountTimeFrames = getKeysStrings(alertSchemaUniqueCountTimeFrameToProtoTimeFrame) + alertSchemaNewValueTimeFrameToProtoTimeFrame = map[string]string{ + "12H": "TIMEFRAME_12_H", + "24H": "TIMEFRAME_24_H", + "48H": "TIMEFRAME_48_H", + "72H": "TIMEFRAME_72_H", + "1W": "TIMEFRAME_1_W", + "1Month": "TIMEFRAME_1_M", + "2Month": "TIMEFRAME_2_M", + "3Month": "TIMEFRAME_3_M", + } + alertProtoNewValueTimeFrameToSchemaTimeFrame = reverseMapStrings(alertSchemaNewValueTimeFrameToProtoTimeFrame) + alertValidNewValueTimeFrames = getKeysStrings(alertSchemaNewValueTimeFrameToProtoTimeFrame) + alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame = map[string]protoTimeFrameAndRelativeTimeFrame{ + "Previous_hour": {timeFrame: alerts.Timeframe_TIMEFRAME_1_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_HOUR_OR_UNSPECIFIED}, + "Same_hour_yesterday": {timeFrame: alerts.Timeframe_TIMEFRAME_1_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_DAY}, + "Same_hour_last_week": {timeFrame: alerts.Timeframe_TIMEFRAME_1_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_WEEK}, + "Yesterday": {timeFrame: alerts.Timeframe_TIMEFRAME_24_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_DAY}, + "Same_day_last_week": {timeFrame: alerts.Timeframe_TIMEFRAME_24_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_WEEK}, + "Same_day_last_month": {timeFrame: alerts.Timeframe_TIMEFRAME_24_H, relativeTimeFrame: alerts.RelativeTimeframe_RELATIVE_TIMEFRAME_MONTH}, + } + alertProtoTimeFrameAndRelativeTimeFrameToSchemaRelativeTimeFrame = reverseMapRelativeTimeFrame(alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame) + alertValidRelativeTimeFrames = getKeysRelativeTimeFrame(alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame) + alertSchemaArithmeticOperatorToProtoArithmetic = map[string]string{ + "Avg": "ARITHMETIC_OPERATOR_AVG_OR_UNSPECIFIED", + "Min": "ARITHMETIC_OPERATOR_MIN", + "Max": "ARITHMETIC_OPERATOR_MAX", + "Sum": "ARITHMETIC_OPERATOR_SUM", + "Count": "ARITHMETIC_OPERATOR_COUNT", + "Percentile": "ARITHMETIC_OPERATOR_PERCENTILE", + } + alertProtoArithmeticOperatorToSchemaArithmetic = reverseMapStrings(alertSchemaArithmeticOperatorToProtoArithmetic) + alertValidArithmeticOperators = getKeysStrings(alertSchemaArithmeticOperatorToProtoArithmetic) + alertValidFlowOperator = getKeysInt32(alerts.FlowOperator_value) + alertSchemaMetricTimeFrameToMetricProtoTimeFrame = map[string]string{ + "1Min": "TIMEFRAME_1_MIN", + "5Min": "TIMEFRAME_5_MIN_OR_UNSPECIFIED", + "10Min": "TIMEFRAME_10_MIN", + "15Min": "TIMEFRAME_15_MIN", + "20Min": "TIMEFRAME_20_MIN", + "30Min": "TIMEFRAME_30_MIN", + "1H": "TIMEFRAME_1_H", + "2H": "TIMEFRAME_2_H", + "4H": "TIMEFRAME_4_H", + "6H": "TIMEFRAME_6_H", + "12H": "TIMEFRAME_12_H", + "24H": "TIMEFRAME_24_H", + } + alertProtoMetricTimeFrameToMetricSchemaTimeFrame = reverseMapStrings(alertSchemaMetricTimeFrameToMetricProtoTimeFrame) + alertValidMetricTimeFrames = getKeysStrings(alertSchemaMetricTimeFrameToMetricProtoTimeFrame) + alertSchemaDeadmanRatiosToProtoDeadmanRatios = map[string]string{ + "Never": "CLEANUP_DEADMAN_DURATION_NEVER_OR_UNSPECIFIED", + "5Min": "CLEANUP_DEADMAN_DURATION_5MIN", + "10Min": "CLEANUP_DEADMAN_DURATION_10MIN", + "1H": "CLEANUP_DEADMAN_DURATION_1H", + "2H": "CLEANUP_DEADMAN_DURATION_2H", + "6H": "CLEANUP_DEADMAN_DURATION_6H", + "12H": "CLEANUP_DEADMAN_DURATION_12H", + "24H": "CLEANUP_DEADMAN_DURATION_24H", + } + alertProtoDeadmanRatiosToSchemaDeadmanRatios = reverseMapStrings(alertSchemaDeadmanRatiosToProtoDeadmanRatios) + alertValidDeadmanRatioValues = getKeysStrings(alertSchemaDeadmanRatiosToProtoDeadmanRatios) + validTimeZones = []string{"UTC-11", "UTC-10", "UTC-9", "UTC-8", "UTC-7", "UTC-6", "UTC-5", "UTC-4", "UTC-3", "UTC-2", "UTC-1", + "UTC+0", "UTC+1", "UTC+2", "UTC+3", "UTC+4", "UTC+5", "UTC+6", "UTC+7", "UTC+8", "UTC+9", "UTC+10", "UTC+11", "UTC+12", "UTC+13", "UTC+14"} + alertSchemaNotifyOnToProtoNotifyOn = map[string]alerts.NotifyOn{ + "Triggered_only": alerts.NotifyOn_TRIGGERED_ONLY, + "Triggered_and_resolved": alerts.NotifyOn_TRIGGERED_AND_RESOLVED, + } + alertProtoNotifyOnToSchemaNotifyOn = map[alerts.NotifyOn]string{ + alerts.NotifyOn_TRIGGERED_ONLY: "Triggered_only", + alerts.NotifyOn_TRIGGERED_AND_RESOLVED: "Triggered_and_resolved", + } + validNotifyOn = []string{"Triggered_only", "Triggered_and_resolved"} + alertSchemaToProtoEvaluationWindow = map[string]alerts.EvaluationWindow{ + "Rolling": alerts.EvaluationWindow_EVALUATION_WINDOW_ROLLING_OR_UNSPECIFIED, + "Dynamic": alerts.EvaluationWindow_EVALUATION_WINDOW_DYNAMIC, + } + alertProtoToSchemaEvaluationWindow = map[alerts.EvaluationWindow]string{ + alerts.EvaluationWindow_EVALUATION_WINDOW_ROLLING_OR_UNSPECIFIED: "Rolling", + alerts.EvaluationWindow_EVALUATION_WINDOW_DYNAMIC: "Dynamic", + } + validEvaluationWindow = []string{"Rolling", "Dynamic"} + createAlertURL = "com.coralogix.alerts.v2.AlertService/CreateAlert" + getAlertURL = "com.coralogix.alerts.v2.AlertService/GetAlertByUniqueId" + updateAlertURL = "com.coralogix.alerts.v2.AlertService/UpdateAlertByUniqueId" + deleteAlertURL = "com.coralogix.alerts.v2.AlertService/DeleteAlertByUniqueId" +) + +type alertParams struct { + Condition *alerts.AlertCondition + Filters *alerts.AlertFilters +} + +type protoTimeFrameAndRelativeTimeFrame struct { + timeFrame alerts.Timeframe + relativeTimeFrame alerts.RelativeTimeframe +} + +func resourceCoralogixAlert() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceCoralogixAlertCreate, + ReadContext: resourceCoralogixAlertRead, + UpdateContext: resourceCoralogixAlertUpdate, + DeleteContext: resourceCoralogixAlertDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Second), + Read: schema.DefaultTimeout(30 * time.Second), + Update: schema.DefaultTimeout(60 * time.Second), + Delete: schema.DefaultTimeout(30 * time.Second), + }, + + Schema: AlertSchema(), + + Description: "Coralogix alert. More info: https://coralogix.com/docs/alerts-api/ .", + } +} + +func AlertSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Determines whether the alert will be active. True by default.", + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + Description: "Alert name.", + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Alert description.", + }, + "severity": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidSeverities, false), + Description: fmt.Sprintf("Determines the alert's severity. Can be one of %q", alertValidSeverities), + }, + "meta_labels": { + Type: schema.TypeMap, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + Description: "Labels allow you to easily filter by alert type and create views. Insert a new label or use an existing one. You can nest a label using key:value.", + ValidateDiagFunc: validation.MapKeyMatch(regexp.MustCompile(`^[A-Za-z\d_-]*$`), "not valid key for meta_label"), + }, + "expiration_date": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 31), + Description: `Day of a month. Must be from 1 to 31 and valid for the year and month.`, + }, + "month": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 12), + Description: `Month of a year. Must be from 1 to 12.`, + }, + "year": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 9999), + Description: `Year of the date. Must be from 1 to 9999.`, + }, + }, + }, + Description: "The expiration date of the alert (if declared).", + }, + "notifications_group": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: notificationGroupSchema(), + Set: schema.HashResource(notificationGroupSchema()), + Description: "Defines notifications settings over list of group-by keys (or on empty list).", + }, + "payload_filters": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "A list of log fields out of the log example which will be included with the alert notification.", + Set: schema.HashString, + }, + "incident_settings": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retriggering_period_minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "notify_on": { + Type: schema.TypeString, + Optional: true, + Default: "Triggered_only", + ValidateFunc: validation.StringInSlice(validNotifyOn, false), + Description: fmt.Sprintf("Defines the alert's triggering logic. Can be one of %q. Triggered_and_resolved conflicts with new_value, unique_count and flow alerts, and with immediately and more_than_usual conditions", validNotifyOn), + }, + }, + }, + //AtLeastOneOf: []string{"notifications_group", "show_in_insights", "incident_settings"}, + }, + "scheduling": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: schedulingSchema(), + }, + MaxItems: 1, + Description: "Limit the triggering of this alert to specific time frames. Active always by default.", + }, + "standard": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: standardSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert based on number of log occurrences.", + }, + "ratio": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: ratioSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert based on the ratio between queries.", + }, + "new_value": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: newValueSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert on never before seen log value.", + }, + "unique_count": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: uniqueCountSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert based on unique value count per key.", + }, + "time_relative": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: timeRelativeSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert based on ratio between timeframes.", + }, + "metric": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: metricSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert based on arithmetic operators for metrics.", + }, + "tracing": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: tracingSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert based on tracing latency.", + }, + "flow": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: flowSchema(), + }, + MaxItems: 1, + ExactlyOneOf: validAlertTypes, + Description: "Alert based on a combination of alerts in a specific timeframe.", + }, + } +} + +func notificationGroupSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group_by_fields": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "List of group-by fields to apply the notification logic on (can be empty). Every notification should contain unique group_by_fields permutation (the order doesn't matter).", + }, + "notification": { + Type: schema.TypeSet, + Optional: true, + Elem: notificationSubgroupSchema(), + Set: schema.HashResource(notificationSubgroupSchema()), + Description: "Defines notification logic with optional recipients. Can contain single webhook or email recipients list.", + }, + }, + } +} + +func notificationSubgroupSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retriggering_period_minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + Description: "By default, retriggering_period_minutes will be populated with min for immediate," + + " more_than and more_than_usual alerts. For less_than alert it will be populated with the chosen time" + + " frame for the less_than condition (in minutes). You may choose to change the suppress window so the " + + "alert will be suppressed for a longer period.", + ExactlyOneOf: []string{"incident_settings"}, + }, + "notify_on": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(validNotifyOn, false), + Description: fmt.Sprintf("Defines the alert's triggering logic. Can be one of %q. Triggered_and_resolved conflicts with new_value, unique_count and flow alerts, and with immediately and more_than_usual conditions", validNotifyOn), + ExactlyOneOf: []string{"incident_settings"}, + }, + "integration_id": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsNotEmpty, + Description: "Conflicts with emails.", + }, + "email_recipients": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + //ValidateDiagFunc: mailValidationFunc(), + }, + Set: schema.HashString, + Description: "Conflicts with integration_id.", + }, + }, + } +} + +func schedulingSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "time_zone": { + Type: schema.TypeString, + Optional: true, + Default: "UTC+0", + ValidateFunc: validation.StringInSlice(validTimeZones, false), + Description: fmt.Sprintf("Specifies the time zone to be used in interpreting the schedule. Can be one of %q", validTimeZones), + }, + "time_frame": { + Type: schema.TypeSet, + MaxItems: 1, + Required: true, + Elem: timeFrames(), + Set: hashTimeFrames(), + Description: "time_frame is a set of days and hours when the alert will be active. ***Currently, supported only for one time_frame***", + }, + } +} + +func timeFrames() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days_enabled": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(alertValidDaysOfWeek, false), + }, + Description: fmt.Sprintf("Days of week. Can be one of %q", alertValidDaysOfWeek), + Set: schema.HashString, + }, + "start_time": timeInDaySchema(`Limit the triggering of this alert to start at specific hour.`), + "end_time": timeInDaySchema(`Limit the triggering of this alert to end at specific hour.`), + }, + } +} + +func hashTimeFrames() schema.SchemaSetFunc { + return schema.HashResource(timeFrames()) +} + +func commonAlertSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "search_query": searchQuerySchema(), + "severities": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(alertValidLogSeverities, false), + }, + Description: fmt.Sprintf("An array of log severities that we interested in. Can be one of %q", alertValidLogSeverities), + Set: schema.HashString, + }, + "applications": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s application names that we want to be alerted on." + + " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + Set: schema.HashString, + }, + "subsystems": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s subsystem names that we want to be notified on. " + + "Subsystems can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + Set: schema.HashString, + }, + "categories": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s categories that we want to be notified on.", + Set: schema.HashString, + }, + "computers": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s computer names that we want to be notified on.", + Set: schema.HashString, + }, + "classes": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s class names that we want to be notified on.", + Set: schema.HashString, + }, + "methods": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s method names that we want to be notified on.", + Set: schema.HashString, + }, + "ip_addresses": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s IP addresses that we want to be notified on.", + Set: schema.HashString, + }, + } +} + +func searchQuerySchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The search_query that we wanted to be notified on.", + } +} + +func standardSchema() map[string]*schema.Schema { + standardSchema := commonAlertSchema() + standardSchema["condition"] = &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "immediately": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"standard.0.condition.0.immediately", + "standard.0.condition.0.more_than", + "standard.0.condition.0.less_than", + "standard.0.condition.0.more_than_usual"}, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than or more_than_usual.", + }, + "less_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"standard.0.condition.0.immediately", + "standard.0.condition.0.more_than", + "standard.0.condition.0.less_than", + "standard.0.condition.0.more_than_usual"}, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than or more_than_usual.", + RequiredWith: []string{"standard.0.condition.0.time_window", "standard.0.condition.0.threshold"}, + }, + "more_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"standard.0.condition.0.immediately", + "standard.0.condition.0.more_than", + "standard.0.condition.0.less_than", + "standard.0.condition.0.more_than_usual"}, + RequiredWith: []string{"standard.0.condition.0.time_window", "standard.0.condition.0.threshold"}, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than or more_than_usual.", + }, + "more_than_usual": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"standard.0.condition.0.immediately", + "standard.0.condition.0.more_than", + "standard.0.condition.0.less_than", + "standard.0.condition.0.more_than_usual"}, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than or more_than_usual.", + }, + "threshold": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"standard.0.condition.0.immediately"}, + Description: "The number of log occurrences that is needed to trigger the alert.", + }, + "time_window": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(alertValidTimeFrames, false), + ConflictsWith: []string{"standard.0.condition.0.immediately"}, + Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidTimeFrames), + }, + "group_by": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{"standard.0.condition.0.immediately"}, + Description: "The fields to 'group by' on. In case of immediately = true switch to group_by_key.", + }, + "group_by_key": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"standard.0.condition.0.more_than", "standard.0.condition.0.less_than", "standard.0.condition.0.more_than_usual"}, + Description: "The key to 'group by' on. When immediately = true, 'group_by_key' (single string) can be set instead of 'group_by'.", + }, + "manage_undetected_values": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_triggering_on_undetected_values": { + Type: schema.TypeBool, + Required: true, + Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", + }, + "auto_retire_ratio": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), + Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + }, + }, + }, + RequiredWith: []string{"standard.0.condition.0.less_than", "standard.0.condition.0.group_by"}, + Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", + }, + "evaluation_window": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(validEvaluationWindow, false), + RequiredWith: []string{"standard.0.condition.0.more_than"}, + Description: fmt.Sprintf("Defines the evaluation-window logic to determine if the threshold has been crossed. Relevant only for more_than condition. Can be one of %q.", validEvaluationWindow), + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + } + return standardSchema +} + +func ratioSchema() map[string]*schema.Schema { + query1Schema := commonAlertSchema() + query1Schema["alias"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "Query 1", + Description: "Query1 alias.", + } + + return map[string]*schema.Schema{ + "query_1": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: query1Schema, + }, + }, + "query_2": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alias": { + Type: schema.TypeString, + Optional: true, + Default: "Query 2", + Description: "Query2 alias.", + }, + "search_query": searchQuerySchema(), + "severities": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(alertValidLogSeverities, false), + }, + Description: fmt.Sprintf("An array of log severities that we interested in. Can be one of %q", alertValidLogSeverities), + Set: schema.HashString, + }, + "applications": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s application names that we want to be alerted on." + + " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + Set: schema.HashString, + }, + "subsystems": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s subsystem names that we want to be notified on. " + + "Subsystems can be filtered by prefix, suffix, and contains using the next patterns - filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + Set: schema.HashString, + }, + }, + }, + }, + "condition": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "more_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"ratio.0.condition.0.more_than", "ratio.0.condition.0.less_than"}, + Description: "Determines the condition operator." + + " Must be one of - less_than or more_than.", + }, + "less_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"ratio.0.condition.0.more_than", "ratio.0.condition.0.less_than"}, + }, + "ratio_threshold": { + Type: schema.TypeFloat, + Required: true, + Description: "The ratio(between the queries) threshold that is needed to trigger the alert.", + }, + "time_window": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidTimeFrames, false), + Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidTimeFrames), + }, + "ignore_infinity": { + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"ratio.0.condition.0.less_than"}, + Description: "Not triggered when threshold is infinity (divided by zero).", + }, + "group_by": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The fields to 'group by' on.", + }, + "group_by_q1": { + Type: schema.TypeBool, + Optional: true, + RequiredWith: []string{"ratio.0.condition.0.group_by"}, + ConflictsWith: []string{"ratio.0.condition.0.group_by_q2", + "ratio.0.condition.0.group_by_both"}, + }, + "group_by_q2": { + Type: schema.TypeBool, + Optional: true, + RequiredWith: []string{"ratio.0.condition.0.group_by"}, + ConflictsWith: []string{"ratio.0.condition.0.group_by_q1", + "ratio.0.condition.0.group_by_both"}, + }, + "group_by_both": { + Type: schema.TypeBool, + Optional: true, + RequiredWith: []string{"ratio.0.condition.0.group_by"}, + ConflictsWith: []string{"ratio.0.condition.0.group_by_q1", + "ratio.0.condition.0.group_by_q2"}, + }, + "manage_undetected_values": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_triggering_on_undetected_values": { + Type: schema.TypeBool, + Required: true, + Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", + }, + "auto_retire_ratio": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), + Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + }, + }, + }, + RequiredWith: []string{"ratio.0.condition.0.less_than", "ratio.0.condition.0.group_by"}, + Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + }, + } +} + +func newValueSchema() map[string]*schema.Schema { + newValueSchema := commonAlertSchema() + newValueSchema["condition"] = &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_to_track": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + Description: "Select a key to track. Note, this key needs to have less than 50K unique values in" + + " the defined timeframe.", + }, + "time_window": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidNewValueTimeFrames, false), + Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidNewValueTimeFrames), + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + } + return newValueSchema +} + +func uniqueCountSchema() map[string]*schema.Schema { + uniqueCountSchema := commonAlertSchema() + uniqueCountSchema["condition"] = &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "unique_count_key": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsNotEmpty, + Description: "Defines the key to match to track its unique count.", + }, + "max_unique_values": { + Type: schema.TypeInt, + Required: true, + }, + "time_window": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidUniqueCountTimeFrames, false), + Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidUniqueCountTimeFrames), + }, + "group_by_key": { + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"unique_count.0.condition.0.max_unique_values_for_group_by"}, + Description: "The key to 'group by' on.", + }, + "max_unique_values_for_group_by": { + Type: schema.TypeInt, + Optional: true, + RequiredWith: []string{"unique_count.0.condition.0.group_by_key"}, + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + } + return uniqueCountSchema +} + +func timeRelativeSchema() map[string]*schema.Schema { + timeRelativeSchema := commonAlertSchema() + timeRelativeSchema["condition"] = &schema.Schema{ + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "less_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"time_relative.0.condition.0.more_than", + "time_relative.0.condition.0.less_than"}, + Description: "Determines the condition operator." + + " Must be one of - less_than or more_than.", + }, + "more_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"time_relative.0.condition.0.more_than", + "time_relative.0.condition.0.less_than"}, + Description: "Determines the condition operator." + + " Must be one of - less_than or more_than.", + }, + "ratio_threshold": { + Type: schema.TypeFloat, + Required: true, + Description: "The ratio threshold that is needed to trigger the alert.", + }, + "relative_time_window": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidRelativeTimeFrames, false), + Description: fmt.Sprintf("Time-window to compare with. Can be one of %q.", alertValidRelativeTimeFrames), + }, + "ignore_infinity": { + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"time_relative.0.condition.0.less_than"}, + Description: "Not triggered when threshold is infinity (divided by zero).", + }, + "group_by": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The fields to 'group by' on.", + }, + "manage_undetected_values": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_triggering_on_undetected_values": { + Type: schema.TypeBool, + Required: true, + Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", + }, + "auto_retire_ratio": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), + Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + }, + }, + }, + RequiredWith: []string{"time_relative.0.condition.0.less_than", "time_relative.0.condition.0.group_by"}, + Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + } + return timeRelativeSchema +} + +func metricSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "lucene": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "search_query": { + Type: schema.TypeString, + Required: true, + Description: "Regular expiration. More info: https://coralogix.com/blog/regex-101/", + }, + "condition": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_field": { + Type: schema.TypeString, + Required: true, + Description: "The name of the metric field to alert on.", + }, + "arithmetic_operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidArithmeticOperators, false), + Description: fmt.Sprintf("The arithmetic operator to use on the alert. can be one of %q", alertValidArithmeticOperators), + }, + "arithmetic_operator_modifier": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100), + Description: "When arithmetic_operator = \"Percentile\" you need to supply the value in this property, 0 < value < 100.", + }, + "less_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"metric.0.lucene.0.condition.0.less_than", + "metric.0.lucene.0.condition.0.more_than"}, + Description: "Determines the condition operator." + + " Must be one of - less_than or more_than.", + }, + "more_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"metric.0.lucene.0.condition.0.less_than", + "metric.0.lucene.0.condition.0.more_than"}, + Description: "Determines the condition operator." + + " Must be one of - less_than or more_than.", + }, + "threshold": { + Type: schema.TypeFloat, + Required: true, + Description: "The number of log threshold that is needed to trigger the alert.", + }, + "sample_threshold_percentage": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), + Description: "The metric value must cross the threshold within this percentage of the timeframe (sum and count arithmetic operators do not use this parameter since they aggregate over the entire requested timeframe), increments of 10, 0 <= value <= 100.", + }, + "time_window": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidMetricTimeFrames, false), + Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidMetricTimeFrames), + }, + "group_by": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The fields to 'group by' on.", + }, + "replace_missing_value_with_zero": { + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"metric.0.lucene.0.condition.0.min_non_null_values_percentage"}, + Description: "If set to true, missing data will be considered as 0, otherwise, it will not be considered at all.", + }, + "min_non_null_values_percentage": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), + ConflictsWith: []string{"metric.0.lucene.0.condition.0.replace_missing_value_with_zero"}, + Description: "The minimum percentage of the timeframe that should have values for this alert to trigger", + }, + "manage_undetected_values": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_triggering_on_undetected_values": { + Type: schema.TypeBool, + Required: true, + Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", + }, + "auto_retire_ratio": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), + Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + }, + }, + }, + RequiredWith: []string{"metric.0.lucene.0.condition.0.less_than", "metric.0.lucene.0.condition.0.group_by"}, + Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + }, + }, + }, + ExactlyOneOf: []string{"metric.0.lucene", "metric.0.promql"}, + }, + "promql": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "search_query": { + Type: schema.TypeString, + Required: true, + Description: "Regular expiration. More info: https://coralogix.com/blog/regex-101/", + }, + "condition": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "less_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{ + "metric.0.promql.0.condition.0.more_than", + "metric.0.promql.0.condition.0.more_than_usual", + "metric.0.promql.0.condition.0.less_than_usual", + "metric.0.promql.0.condition.0.more_than_or_equal", + "metric.0.promql.0.condition.0.less_than_or_equal", + }, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", + }, + "more_than": { + Type: schema.TypeBool, + Optional: true, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", + }, + "more_than_usual": { + Type: schema.TypeBool, + Optional: true, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", + }, + "less_than_usual": { + Type: schema.TypeBool, + Optional: true, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", + }, + "more_than_or_equal": { + Type: schema.TypeBool, + Optional: true, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", + }, + "less_than_or_equal": { + Type: schema.TypeBool, + Optional: true, + Description: "Determines the condition operator." + + " Must be one of - immediately, less_than, more_than, more_than_usual, less_than_usual, more_than_or_equal or less_than_or_equal.", + }, + "threshold": { + Type: schema.TypeFloat, + Required: true, + Description: "The threshold that is needed to trigger the alert.", + }, + "time_window": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidMetricTimeFrames, false), + Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidMetricTimeFrames), + }, + "sample_threshold_percentage": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), + }, + "replace_missing_value_with_zero": { + Type: schema.TypeBool, + Optional: true, + ConflictsWith: []string{"metric.0.promql.0.condition.0.min_non_null_values_percentage", "metric.0.promql.0.condition.0.more_than_usual"}, + Description: "If set to true, missing data will be considered as 0, otherwise, it will not be considered at all.", + }, + "min_non_null_values_percentage": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"metric.0.promql.0.condition.0.replace_missing_value_with_zero"}, + ValidateFunc: validation.All(validation.IntDivisibleBy(10), validation.IntBetween(0, 100)), + }, + "manage_undetected_values": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_triggering_on_undetected_values": { + Type: schema.TypeBool, + Required: true, + Description: "Determines whether the deadman-option is enabled. When set to true, auto_retire_ratio is required otherwise auto_retire_ratio should be omitted.", + }, + "auto_retire_ratio": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(alertValidDeadmanRatioValues, false), + Description: fmt.Sprintf("Defines the triggering auto-retire ratio. Can be one of %q", alertValidDeadmanRatioValues), + }, + }, + }, + ConflictsWith: []string{"metric.0.promql.0.condition.0.more_than", "metric.0.promql.0.condition.0.more_than_or_equal", "metric.0.promql.0.condition.0.more_than_usual", "metric.0.promql.0.condition.0.less_than_usual"}, + Description: "Manage your logs undetected values - when relevant, enable/disable triggering on undetected values and change the auto retire interval. By default (when relevant), triggering is enabled with retire-ratio=NEVER.", + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + }, + }, + }, + ExactlyOneOf: []string{"metric.0.lucene", "metric.0.promql"}, + }, + } +} + +func tracingSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "applications": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s application names that we want to be alerted on." + + " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + Set: schema.HashString, + }, + "subsystems": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s subsystems names that we want to be alerted on." + + " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + Set: schema.HashString, + }, + "services": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "An array that contains log’s services names that we want to be alerted on." + + " Applications can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + Set: schema.HashString, + }, + "tag_filter": { + Type: schema.TypeSet, + Optional: true, + Elem: tagFilterSchema(), + Set: schema.HashResource(tagFilterSchema()), + }, + "latency_threshold_milliseconds": { + Type: schema.TypeFloat, + Optional: true, + ValidateFunc: validation.FloatAtLeast(0), + }, + "condition": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "immediately": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"tracing.0.condition.0.immediately", "tracing.0.condition.0.more_than"}, + Description: "Determines the condition operator." + + " Must be one of - immediately or more_than.", + }, + "more_than": { + Type: schema.TypeBool, + Optional: true, + ExactlyOneOf: []string{"tracing.0.condition.0.immediately", "tracing.0.condition.0.more_than"}, + RequiredWith: []string{"tracing.0.condition.0.time_window"}, + Description: "Determines the condition operator." + + " Must be one of - immediately or more_than.", + }, + "threshold": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"tracing.0.condition.0.immediately"}, + Description: "The number of log occurrences that is needed to trigger the alert.", + }, + "time_window": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(alertValidTimeFrames, false), + ConflictsWith: []string{"tracing.0.condition.0.immediately"}, + RequiredWith: []string{"tracing.0.condition.0.more_than"}, + Description: fmt.Sprintf("The bounded time frame for the threshold to be occurred within, to trigger the alert. Can be one of %q", alertValidTimeFrames), + }, + "group_by": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + ConflictsWith: []string{"tracing.0.condition.0.immediately"}, + Description: "The fields to 'group by' on.", + }, + }, + }, + Description: "Defines the conditions for triggering and notify by the alert", + }, + } +} + +func tagFilterSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field": { + Type: schema.TypeString, + Required: true, + }, + "values": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + Description: "Tag filter values can be filtered by prefix, suffix, and contains using the next patterns - filter:notEquals:xxx, filter:startsWith:xxx, filter:endsWith:xxx, filter:contains:xxx", + }, + }, + } +} + +func flowSchema() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "stage": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "group": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sub_alerts": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidFlowOperator, false), + Description: fmt.Sprintf("The operator to use on the alert. can be one of %q", alertValidFlowOperator), + }, + "flow_alert": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "not": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "user_alert_id": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + }, + }, + "next_operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(alertValidFlowOperator, false), + Description: fmt.Sprintf("The operator to use on the alert. can be one of %q", alertValidFlowOperator), + }, + }, + }, + }, + "time_window": timeSchema("Timeframe for flow stage."), + }, + }, + }, + "group_by": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + } +} + +func resourceCoralogixAlertCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + createAlertRequest, diags := extractCreateAlertRequest(d) + if len(diags) != 0 { + return diags + } + + createAlertStr := protojson.Format(createAlertRequest) + log.Printf("[INFO] Creating new alert: %s", createAlertStr) + AlertResp, err := meta.(*clientset.ClientSet).Alerts().CreateAlert(ctx, createAlertRequest) + + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + return diag.Errorf(formatRpcErrors(err, createAlertURL, createAlertStr)) + } + + alert := AlertResp.GetAlert() + log.Printf("[INFO] Submitted new alert: %s", protojson.Format(alert)) + d.SetId(alert.GetUniqueIdentifier().GetValue()) + + return resourceCoralogixAlertRead(ctx, d, meta) +} + +func resourceCoralogixAlertRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + id := wrapperspb.String(d.Id()) + getAlertRequest := &alerts.GetAlertByUniqueIdRequest{ + Id: id, + } + + log.Printf("[INFO] Reading alert %s", id) + alertResp, err := meta.(*clientset.ClientSet).Alerts().GetAlert(ctx, getAlertRequest) + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + if status.Code(err) == codes.NotFound { + d.SetId("") + return diag.Diagnostics{diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("Alert %q is in state, but no longer exists in Coralogix backend", id), + Detail: fmt.Sprintf("%s will be recreated when you apply", id), + }} + } + return diag.Errorf(formatRpcErrors(err, getAlertURL, protojson.Format(getAlertRequest))) + } + alert := alertResp.GetAlert() + alertStr := protojson.Format(alert) + log.Printf("[INFO] Received alert: %s", alertStr) + + return setAlert(d, alert) +} + +func resourceCoralogixAlertUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + req, diags := extractAlert(d) + if len(diags) != 0 { + return diags + } + + updateAlertRequest := &alerts.UpdateAlertByUniqueIdRequest{ + Alert: req, + } + updateAlertStr := protojson.Format(updateAlertRequest) + log.Printf("[INFO] Updating alert %s", updateAlertStr) + alertResp, err := meta.(*clientset.ClientSet).Alerts().UpdateAlert(ctx, updateAlertRequest) + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + return diag.Errorf(formatRpcErrors(err, updateAlertURL, updateAlertStr)) + } + updateAlertStr = protojson.Format(alertResp) + log.Printf("[INFO] Submitted updated alert: %s", updateAlertStr) + d.SetId(alertResp.GetAlert().GetUniqueIdentifier().GetValue()) + + return resourceCoralogixAlertRead(ctx, d, meta) +} + +func resourceCoralogixAlertDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + id := wrapperspb.String(d.Id()) + deleteAlertRequest := &alerts.DeleteAlertByUniqueIdRequest{ + Id: id, + } + + log.Printf("[INFO] Deleting alert %s", id) + _, err := meta.(*clientset.ClientSet).Alerts().DeleteAlert(ctx, deleteAlertRequest) + if err != nil { + log.Printf("[ERROR] Received error: %s", err.Error()) + return diag.Errorf(formatRpcErrors(err, deleteAlertURL, protojson.Format(deleteAlertRequest))) + } + log.Printf("[INFO] alert %s deleted", id) + + d.SetId("") + return nil +} + +func extractCreateAlertRequest(d *schema.ResourceData) (*alerts.CreateAlertRequest, diag.Diagnostics) { + var diags diag.Diagnostics + enabled := wrapperspb.Bool(d.Get("enabled").(bool)) + name := wrapperspb.String(d.Get("name").(string)) + description := wrapperspb.String(d.Get("description").(string)) + severity := expandAlertSeverity(d.Get("severity").(string)) + metaLabels := extractMetaLabels(d.Get("meta_labels")) + expirationDate := expandExpirationDate(d.Get("expiration_date")) + incidentSettings := expandIncidentSettings(d.Get("incident_settings")) + notificationGroups, dgs := expandNotificationGroups(d.Get("notifications_group")) + diags = append(diags, dgs...) + if len(diags) != 0 { + return nil, diags + } + payloadFilters := expandPayloadFilters(d.Get("payload_filters")) + scheduling := expandActiveWhen(d.Get("scheduling")) + alertTypeParams, tracingAlert, dgs := expandAlertType(d) + diags = append(diags, dgs...) + if len(diags) != 0 { + return nil, diags + } + + return &alerts.CreateAlertRequest{ + Name: name, + Description: description, + IsActive: enabled, + Severity: severity, + MetaLabels: metaLabels, + Expiration: expirationDate, + NotificationGroups: notificationGroups, + IncidentSettings: incidentSettings, + NotificationPayloadFilters: payloadFilters, + ActiveWhen: scheduling, + Filters: alertTypeParams.Filters, + Condition: alertTypeParams.Condition, + TracingAlert: tracingAlert, + }, diags +} + +func extractAlert(d *schema.ResourceData) (*alerts.Alert, diag.Diagnostics) { + var diags diag.Diagnostics + id := wrapperspb.String(d.Id()) + enabled := wrapperspb.Bool(d.Get("enabled").(bool)) + name := wrapperspb.String(d.Get("name").(string)) + description := wrapperspb.String(d.Get("description").(string)) + severity := expandAlertSeverity(d.Get("severity").(string)) + metaLabels := extractMetaLabels(d.Get("meta_labels")) + expirationDate := expandExpirationDate(d.Get("expiration_date")) + incidentSettings := expandIncidentSettings(d.Get("incident_settings")) + notificationGroups, dgs := expandNotificationGroups(d.Get("notifications_group")) + diags = append(diags, dgs...) + payloadFilters := expandPayloadFilters(d.Get("payload_filters")) + scheduling := expandActiveWhen(d.Get("scheduling")) + alertTypeParams, tracingAlert, dgs := expandAlertType(d) + diags = append(diags, dgs...) + if len(diags) != 0 { + return nil, diags + } + + return &alerts.Alert{ + UniqueIdentifier: id, + Name: name, + Description: description, + IsActive: enabled, + Severity: severity, + MetaLabels: metaLabels, + Expiration: expirationDate, + IncidentSettings: incidentSettings, + NotificationGroups: notificationGroups, + NotificationPayloadFilters: payloadFilters, + ActiveWhen: scheduling, + Filters: alertTypeParams.Filters, + Condition: alertTypeParams.Condition, + TracingAlert: tracingAlert, + }, diags +} + +func expandPayloadFilters(v interface{}) []*wrapperspb.StringValue { + return interfaceSliceToWrappedStringSlice(v.(*schema.Set).List()) +} + +func setAlert(d *schema.ResourceData, alert *alerts.Alert) diag.Diagnostics { + if err := d.Set("name", alert.GetName().GetValue()); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("description", alert.GetDescription().GetValue()); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("enabled", alert.GetIsActive().GetValue()); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("severity", flattenAlertSeverity(alert.GetSeverity().String())); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("meta_labels", flattenMetaLabels(alert.GetMetaLabels())); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("expiration_date", flattenExpirationDate(alert.GetExpiration())); err != nil { + return diag.FromErr(err) + } + + incidentSettings := flattenIncidentSettings(alert.GetIncidentSettings()) + if err := d.Set("incident_settings", incidentSettings); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("notifications_group", flattenNotificationGroups(alert.GetNotificationGroups(), incidentSettings != nil)); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("payload_filters", wrappedStringSliceToStringSlice(alert.GetNotificationPayloadFilters())); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("scheduling", flattenScheduling(d, alert.GetActiveWhen())); err != nil { + return diag.FromErr(err) + } + + alertType, alertTypeParams := flattenAlertType(alert) + if err := d.Set(alertType, alertTypeParams); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func flattenIncidentSettings(settings *alerts.AlertIncidentSettings) interface{} { + if settings == nil { + return nil + } + if !settings.GetUseAsNotificationSettings().GetValue() { + return nil + } + return []interface{}{ + map[string]interface{}{ + "retriggering_period_minutes": int(settings.GetRetriggeringPeriodSeconds().GetValue() / 60), + "notify_on": alertProtoNotifyOnToSchemaNotifyOn[settings.GetNotifyOn()], + }, + } +} + +func flattenAlertSeverity(str string) string { + return alertProtoSeverityToSchemaSeverity[str] +} + +func flattenMetaLabels(labels []*alerts.MetaLabel) interface{} { + result := make(map[string]interface{}) + for _, l := range labels { + key := l.GetKey().GetValue() + val := l.GetValue().GetValue() + result[key] = val + } + return result +} + +func flattenNotificationGroups(notificationGroups []*alerts.AlertNotificationGroups, incidentSettingsConfigured bool) interface{} { + result := make([]interface{}, 0, len(notificationGroups)) + for _, group := range notificationGroups { + notificationGroup := flattenNotificationGroup(group, incidentSettingsConfigured) + result = append(result, notificationGroup) + } + return result +} + +func flattenNotificationGroup(notificationGroup *alerts.AlertNotificationGroups, incidentSettingsConfigured bool) interface{} { + groupByFields := wrappedStringSliceToStringSlice(notificationGroup.GetGroupByFields()) + notifications := flattenNotifications(notificationGroup.GetNotifications(), incidentSettingsConfigured) + return map[string]interface{}{ + "group_by_fields": groupByFields, + "notification": notifications, + } +} + +func flattenNotifications(notifications []*alerts.AlertNotification, incidentSettingsConfigured bool) interface{} { + result := make([]interface{}, 0, len(notifications)) + for _, n := range notifications { + notificationSubgroup := flattenNotificationSubgroup(n, incidentSettingsConfigured) + result = append(result, notificationSubgroup) + } + return result +} + +func flattenNotificationSubgroup(notification *alerts.AlertNotification, incidentSettingsConfigured bool) interface{} { + notificationSchema := map[string]interface{}{} + if !incidentSettingsConfigured { + notificationSchema["retriggering_period_minutes"] = int(notification.GetRetriggeringPeriodSeconds().GetValue() / 60) + notificationSchema["notify_on"] = alertProtoNotifyOnToSchemaNotifyOn[notification.GetNotifyOn()] + } + switch integration := notification.GetIntegrationType().(type) { + case *alerts.AlertNotification_IntegrationId: + notificationSchema["integration_id"] = strconv.Itoa(int(integration.IntegrationId.GetValue())) + case *alerts.AlertNotification_Recipients: + notificationSchema["email_recipients"] = wrappedStringSliceToStringSlice(integration.Recipients.Emails) + } + + return notificationSchema +} + +func flattenScheduling(d *schema.ResourceData, activeWhen *alerts.AlertActiveWhen) interface{} { + scheduling, ok := d.GetOk("scheduling") + if !ok || activeWhen == nil { + return nil + } + + timeZone := scheduling.([]interface{})[0].(map[string]interface{})["time_zone"].(string) + + timeFrames := flattenTimeFrames(activeWhen, timeZone) + + return []interface{}{ + map[string]interface{}{ + "time_zone": timeZone, + "time_frame": timeFrames, + }, + } +} + +func flattenTimeFrames(activeWhen *alerts.AlertActiveWhen, timeZone string) interface{} { + timeFrames := activeWhen.GetTimeframes() + utc := flattenUtc(timeZone) + result := schema.NewSet(hashTimeFrames(), []interface{}{}) + for _, tf := range timeFrames { + m := flattenTimeFrame(tf, utc) + result.Add(m) + } + return result +} + +func flattenUtc(timeZone string) int32 { + utcStr := strings.Split(timeZone, "UTC")[1] + utc, _ := strconv.Atoi(utcStr) + return int32(utc) +} + +func flattenTimeFrame(tf *alerts.AlertActiveTimeframe, utc int32) map[string]interface{} { + tr := tf.GetRange() + activityStartGMT, activityEndGMT := tr.GetStart(), tr.GetEnd() + daysOffset := getDaysOffsetFromGMT(activityStartGMT, utc) + activityStartUTC := flattenTimeInDay(activityStartGMT, utc) + activityEndUTC := flattenTimeInDay(activityEndGMT, utc) + daysOfWeek := flattenDaysOfWeek(tf.GetDaysOfWeek(), daysOffset) + + return map[string]interface{}{ + "days_enabled": daysOfWeek, + "start_time": activityStartUTC, + "end_time": activityEndUTC, + } +} + +func getDaysOffsetFromGMT(activityStartGMT *alerts.Time, utc int32) int32 { + daysOffset := int32(activityStartGMT.GetHours()+utc) / 24 + if daysOffset < 0 { + daysOffset += 7 + } + + return daysOffset +} + +func flattenTimeInDay(t *alerts.Time, utc int32) string { + hours := convertGmtToUtc(t.GetHours(), utc) + hoursStr := toTwoDigitsFormat(hours) + minStr := toTwoDigitsFormat(t.GetMinutes()) + return fmt.Sprintf("%s:%s", hoursStr, minStr) +} + +func flattenDaysOfWeek(daysOfWeek []alerts.DayOfWeek, daysOffset int32) interface{} { + result := schema.NewSet(schema.HashString, []interface{}{}) + for _, d := range daysOfWeek { + dayConvertedFromGmtToUtc := alerts.DayOfWeek((int32(d) + daysOffset) % 7) + day := alertProtoDayOfWeekToSchemaDayOfWeek[dayConvertedFromGmtToUtc.String()] + result.Add(day) + } + return result +} + +func flattenAlertType(a *alerts.Alert) (alertType string, alertSchema interface{}) { + filters := a.GetFilters() + condition := a.GetCondition().GetCondition() + + switch filters.GetFilterType() { + case alerts.AlertFilters_FILTER_TYPE_TEXT_OR_UNSPECIFIED: + if _, ok := condition.(*alerts.AlertCondition_NewValue); ok { + alertType = "new_value" + alertSchema = flattenNewValueAlert(filters, condition) + } else { + alertType = "standard" + alertSchema = flattenStandardAlert(filters, condition) + } + case alerts.AlertFilters_FILTER_TYPE_RATIO: + alertType = "ratio" + alertSchema = flattenRatioAlert(filters, condition) + case alerts.AlertFilters_FILTER_TYPE_UNIQUE_COUNT: + alertType = "unique_count" + alertSchema = flattenUniqueCountAlert(filters, condition) + case alerts.AlertFilters_FILTER_TYPE_TIME_RELATIVE: + alertType = "time_relative" + alertSchema = flattenTimeRelativeAlert(filters, condition) + case alerts.AlertFilters_FILTER_TYPE_METRIC: + alertType = "metric" + alertSchema = flattenMetricAlert(filters, condition) + case alerts.AlertFilters_FILTER_TYPE_TRACING: + alertType = "tracing" + alertSchema = flattenTracingAlert(condition, a.TracingAlert) + case alerts.AlertFilters_FILTER_TYPE_FLOW: + alertType = "flow" + alertSchema = flattenFlowAlert(condition) + } + + return +} + +func flattenNewValueAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { + alertSchema := flattenCommonAlert(filters) + conditionMap := flattenNewValueCondition(condition) + alertSchema["condition"] = []interface{}{conditionMap} + return []interface{}{alertSchema} +} + +func flattenNewValueCondition(condition interface{}) interface{} { + conditionParams := condition.(*alerts.AlertCondition_NewValue).NewValue.GetParameters() + return map[string]interface{}{ + "time_window": alertProtoNewValueTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], + "key_to_track": conditionParams.GetGroupBy()[0].GetValue(), + } +} + +func flattenStandardAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { + alertSchemaMap := flattenCommonAlert(filters) + conditionSchema := flattenStandardCondition(condition) + alertSchemaMap["condition"] = conditionSchema + return []interface{}{alertSchemaMap} +} + +func flattenStandardCondition(condition interface{}) (conditionSchema interface{}) { + var conditionParams *alerts.ConditionParameters + switch condition := condition.(type) { + case *alerts.AlertCondition_Immediate: + conditionSchema = []interface{}{ + map[string]interface{}{ + "immediately": true, + }, + } + case *alerts.AlertCondition_LessThan: + conditionParams = condition.LessThan.GetParameters() + groupBy := wrappedStringSliceToStringSlice(conditionParams.GroupBy) + m := map[string]interface{}{ + "less_than": true, + "threshold": int(conditionParams.GetThreshold().GetValue()), + "group_by": groupBy, + "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.Timeframe.String()], + } + + if len(groupBy) > 0 { + m["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) + } + + conditionSchema = []interface{}{m} + case *alerts.AlertCondition_MoreThan: + conditionParams = condition.MoreThan.GetParameters() + conditionSchema = []interface{}{ + map[string]interface{}{ + "more_than": true, + "threshold": int(conditionParams.GetThreshold().GetValue()), + "group_by": wrappedStringSliceToStringSlice(conditionParams.GroupBy), + "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.Timeframe.String()], + "evaluation_window": alertProtoToSchemaEvaluationWindow[condition.MoreThan.GetEvaluationWindow()], + }, + } + case *alerts.AlertCondition_MoreThanUsual: + conditionParams = condition.MoreThanUsual.GetParameters() + conditionMap := map[string]interface{}{ + "more_than_usual": true, + "threshold": int(conditionParams.GetThreshold().GetValue()), + "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], + "group_by": wrappedStringSliceToStringSlice(conditionParams.GroupBy), + } + conditionSchema = []interface{}{ + conditionMap, + } + } + + return +} + +func flattenManageUndetectedValues(data *alerts.RelatedExtendedData) interface{} { + if data == nil { + return []map[string]interface{}{ + { + "enable_triggering_on_undetected_values": true, + "auto_retire_ratio": flattenDeadmanRatio(alerts.CleanupDeadmanDuration_CLEANUP_DEADMAN_DURATION_NEVER_OR_UNSPECIFIED), + }, + } + } else if data.GetShouldTriggerDeadman().GetValue() { + return []map[string]interface{}{ + { + "enable_triggering_on_undetected_values": true, + "auto_retire_ratio": flattenDeadmanRatio(data.GetCleanupDeadmanDuration()), + }, + } + } + + return []map[string]interface{}{ + { + "enable_triggering_on_undetected_values": false, + }, + } +} + +func flattenDeadmanRatio(cleanupDeadmanDuration alerts.CleanupDeadmanDuration) string { + deadmanRatioStr := alerts.CleanupDeadmanDuration_name[int32(cleanupDeadmanDuration)] + deadmanRatio := alertProtoDeadmanRatiosToSchemaDeadmanRatios[deadmanRatioStr] + return deadmanRatio +} + +func flattenRatioAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { + query1Map := flattenCommonAlert(filters) + query1Map["alias"] = filters.GetAlias().GetValue() + query2 := filters.GetRatioAlerts()[0] + query2Map := flattenQuery2ParamsMap(query2) + conditionMap := flattenRatioCondition(condition, query2) + + return []interface{}{ + map[string]interface{}{ + "query_1": []interface{}{query1Map}, + "query_2": []interface{}{query2Map}, + "condition": []interface{}{conditionMap}, + }, + } +} + +func flattenRatioCondition(condition interface{}, query2 *alerts.AlertFilters_RatioAlert) interface{} { + var conditionParams *alerts.ConditionParameters + ratioParamsMap := make(map[string]interface{}) + + lessThan := false + switch condition := condition.(type) { + case *alerts.AlertCondition_LessThan: + conditionParams = condition.LessThan.GetParameters() + ratioParamsMap["less_than"] = true + lessThan = true + case *alerts.AlertCondition_MoreThan: + conditionParams = condition.MoreThan.GetParameters() + ratioParamsMap["more_than"] = true + default: + return nil + } + + ratioParamsMap["ratio_threshold"] = conditionParams.GetThreshold().GetValue() + ratioParamsMap["time_window"] = alertProtoTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()] + ratioParamsMap["ignore_infinity"] = conditionParams.GetIgnoreInfinity().GetValue() + + groupByQ1 := conditionParams.GetGroupBy() + groupByQ2 := query2.GetGroupBy() + var groupBy []string + if len(groupByQ1) > 0 { + groupBy = wrappedStringSliceToStringSlice(groupByQ1) + if len(groupByQ2) > 0 { + ratioParamsMap["group_by_both"] = true + } else { + ratioParamsMap["group_by_q1"] = true + } + } else if len(groupByQ2) > 0 { + groupBy = wrappedStringSliceToStringSlice(groupByQ2) + ratioParamsMap["group_by_q1"] = true + } + ratioParamsMap["group_by"] = groupBy + + if len(groupBy) > 0 && lessThan { + ratioParamsMap["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) + } + + return ratioParamsMap +} + +func flattenQuery2ParamsMap(query2 *alerts.AlertFilters_RatioAlert) interface{} { + return map[string]interface{}{ + "alias": query2.GetAlias().GetValue(), + "search_query": query2.GetText().GetValue(), + "severities": extractSeverities(query2.GetSeverities()), + "applications": wrappedStringSliceToStringSlice(query2.GetApplications()), + "subsystems": wrappedStringSliceToStringSlice(query2.GetSubsystems()), + } +} + +func flattenUniqueCountAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { + alertSchema := flattenCommonAlert(filters) + conditionMap := flattenUniqueCountCondition(condition) + alertSchema["condition"] = []interface{}{conditionMap} + return []interface{}{alertSchema} +} + +func flattenUniqueCountCondition(condition interface{}) interface{} { + conditionParams := condition.(*alerts.AlertCondition_UniqueCount).UniqueCount.GetParameters() + conditionMap := map[string]interface{}{ + "unique_count_key": conditionParams.GetCardinalityFields()[0].GetValue(), + "max_unique_values": conditionParams.GetThreshold().GetValue(), + "time_window": alertProtoUniqueCountTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], + } + + if groupBy := conditionParams.GetGroupBy(); len(groupBy) > 0 { + conditionMap["group_by_key"] = conditionParams.GetGroupBy()[0].GetValue() + conditionMap["max_unique_values_for_group_by"] = conditionParams.GetMaxUniqueCountValuesForGroupByKey().GetValue() + } + + return conditionMap +} + +func flattenTimeRelativeAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { + alertSchema := flattenCommonAlert(filters) + conditionMap := flattenTimeRelativeCondition(condition) + alertSchema["condition"] = []interface{}{conditionMap} + return []interface{}{alertSchema} +} + +func flattenTimeRelativeCondition(condition interface{}) interface{} { + var conditionParams *alerts.ConditionParameters + timeRelativeCondition := make(map[string]interface{}) + switch condition := condition.(type) { + case *alerts.AlertCondition_LessThan: + conditionParams = condition.LessThan.GetParameters() + timeRelativeCondition["less_than"] = true + if len(conditionParams.GroupBy) > 0 { + timeRelativeCondition["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) + } + case *alerts.AlertCondition_MoreThan: + conditionParams = condition.MoreThan.GetParameters() + timeRelativeCondition["more_than"] = true + default: + return nil + } + + timeRelativeCondition["ignore_infinity"] = conditionParams.GetIgnoreInfinity().GetValue() + timeRelativeCondition["ratio_threshold"] = conditionParams.GetThreshold().GetValue() + timeRelativeCondition["group_by"] = wrappedStringSliceToStringSlice(conditionParams.GroupBy) + timeFrame := conditionParams.GetTimeframe() + relativeTimeFrame := conditionParams.GetRelativeTimeframe() + timeRelativeCondition["relative_time_window"] = flattenRelativeTimeWindow(timeFrame, relativeTimeFrame) + + return timeRelativeCondition +} + +func flattenRelativeTimeWindow(timeFrame alerts.Timeframe, relativeTimeFrame alerts.RelativeTimeframe) string { + p := protoTimeFrameAndRelativeTimeFrame{timeFrame: timeFrame, relativeTimeFrame: relativeTimeFrame} + return alertProtoTimeFrameAndRelativeTimeFrameToSchemaRelativeTimeFrame[p] +} + +func flattenMetricAlert(filters *alerts.AlertFilters, condition interface{}) interface{} { + var conditionParams *alerts.ConditionParameters + var conditionStr string + switch condition := condition.(type) { + case *alerts.AlertCondition_LessThan: + conditionParams = condition.LessThan.GetParameters() + conditionStr = "less_than" + case *alerts.AlertCondition_MoreThan: + conditionParams = condition.MoreThan.GetParameters() + conditionStr = "more_than" + case *alerts.AlertCondition_MoreThanUsual: + conditionParams = condition.MoreThanUsual.GetParameters() + conditionStr = "more_than_usual" + case *alerts.AlertCondition_LessThanUsual: + conditionParams = condition.LessThanUsual.GetParameters() + conditionStr = "less_than_usual" + case *alerts.AlertCondition_MoreThanOrEqual: + conditionParams = condition.MoreThanOrEqual.GetParameters() + conditionStr = "more_than_or_equal" + case *alerts.AlertCondition_LessThanOrEqual: + conditionParams = condition.LessThanOrEqual.GetParameters() + conditionStr = "less_than_or_equal" + default: + return nil + } + + var metricTypeStr string + var searchQuery string + var conditionMap map[string]interface{} + promqlParams := conditionParams.GetMetricAlertPromqlParameters() + if promqlParams != nil { + metricTypeStr = "promql" + searchQuery = promqlParams.GetPromqlText().GetValue() + conditionMap = flattenPromQLCondition(conditionParams) + } else { + metricTypeStr = "lucene" + searchQuery = filters.GetText().GetValue() + conditionMap = flattenLuceneCondition(conditionParams) + } + conditionMap[conditionStr] = true + if conditionStr == "less_than" || conditionStr == "less_than_or_equal" { + conditionMap["manage_undetected_values"] = flattenManageUndetectedValues(conditionParams.GetRelatedExtendedData()) + } + + metricMap := map[string]interface{}{ + "search_query": searchQuery, + "condition": []interface{}{conditionMap}, + } + + return []interface{}{ + map[string]interface{}{ + metricTypeStr: []interface{}{metricMap}, + }, + } +} + +func flattenPromQLCondition(params *alerts.ConditionParameters) (promQLConditionMap map[string]interface{}) { + promqlParams := params.GetMetricAlertPromqlParameters() + promQLConditionMap = + map[string]interface{}{ + "threshold": params.GetThreshold().GetValue(), + "time_window": alertProtoMetricTimeFrameToMetricSchemaTimeFrame[params.GetTimeframe().String()], + "sample_threshold_percentage": promqlParams.GetSampleThresholdPercentage().GetValue(), + "replace_missing_value_with_zero": promqlParams.GetSwapNullValues().GetValue(), + "min_non_null_values_percentage": promqlParams.GetNonNullPercentage().GetValue(), + } + return +} + +func flattenLuceneCondition(params *alerts.ConditionParameters) map[string]interface{} { + metricParams := params.GetMetricAlertParameters() + return map[string]interface{}{ + "metric_field": metricParams.GetMetricField().GetValue(), + "arithmetic_operator": alertProtoArithmeticOperatorToSchemaArithmetic[metricParams.GetArithmeticOperator().String()], + "threshold": params.GetThreshold().GetValue(), + "arithmetic_operator_modifier": metricParams.GetArithmeticOperatorModifier().GetValue(), + "sample_threshold_percentage": metricParams.GetSampleThresholdPercentage().GetValue(), + "time_window": alertProtoMetricTimeFrameToMetricSchemaTimeFrame[params.GetTimeframe().String()], + "group_by": wrappedStringSliceToStringSlice(params.GetGroupBy()), + "replace_missing_value_with_zero": metricParams.GetSwapNullValues().GetValue(), + "min_non_null_values_percentage": metricParams.GetNonNullPercentage().GetValue(), + } +} + +func flattenTracingAlert(condition interface{}, tracingAlert *alerts.TracingAlert) interface{} { + latencyThresholdMS := float64(tracingAlert.GetConditionLatency()) / float64(time.Millisecond.Microseconds()) + applications, subsystems, services := flattenTracingFilters(tracingAlert.GetFieldFilters()) + tagFilters := flattenTagFiltersData(tracingAlert.GetTagFilters()) + conditionSchema := flattenTracingCondition(condition) + + return []interface{}{ + map[string]interface{}{ + "latency_threshold_milliseconds": latencyThresholdMS, + "applications": applications, + "subsystems": subsystems, + "services": services, + "tag_filter": tagFilters, + "condition": conditionSchema, + }, + } +} + +func flattenTracingFilters(tracingFilters []*alerts.FilterData) (applications, subsystems, services interface{}) { + filtersData := flattenFiltersData(tracingFilters) + applications = filtersData["applicationName"] + subsystems = filtersData["subsystemName"] + services = filtersData["serviceName"] + return +} + +func flattenFlowAlert(condition interface{}) interface{} { + return []interface{}{flattenFlowAlertsCondition(condition.(*alerts.AlertCondition_Flow))} +} + +func flattenFlowAlertsCondition(condition *alerts.AlertCondition_Flow) interface{} { + stages := flattenStages(condition.Flow.GetStages()) + + m := map[string]interface{}{ + "stage": stages, + } + + if flowParams := condition.Flow.GetParameters(); flowParams != nil { + groupBy := wrappedStringSliceToStringSlice(flowParams.GetGroupBy()) + if len(groupBy) != 0 { + m["group_by"] = groupBy + } + } + + return m +} + +func flattenStages(stages []*alerts.FlowStage) []interface{} { + result := make([]interface{}, 0, len(stages)) + for _, stage := range stages { + result = append(result, flattenStage(stage)) + } + return result +} + +func flattenStage(stage *alerts.FlowStage) interface{} { + timeMS := int(stage.GetTimeframe().GetMs().GetValue()) + return map[string]interface{}{ + "group": flattenGroups(stage.GetGroups()), + "time_window": flattenTimeframe(timeMS), + } +} + +func flattenGroups(groups []*alerts.FlowGroup) []interface{} { + result := make([]interface{}, 0, len(groups)) + for _, g := range groups { + result = append(result, flattenGroup(g)) + } + return result +} + +func flattenGroup(fg *alerts.FlowGroup) interface{} { + subAlerts := flattenSubAlerts(fg.GetAlerts()) + operator := fg.GetNextOp().String() + return map[string]interface{}{ + "sub_alerts": subAlerts, + "next_operator": operator, + } +} + +func flattenSubAlerts(subAlerts *alerts.FlowAlerts) interface{} { + operator := subAlerts.GetOp().String() + flowAlerts := make([]interface{}, 0, len(subAlerts.GetValues())) + for _, sa := range subAlerts.GetValues() { + flowAlerts = append(flowAlerts, flattenInnerFlowAlert(sa)) + } + + return []interface{}{ + map[string]interface{}{ + "operator": operator, + "flow_alert": flowAlerts, + }, + } +} + +func flattenInnerFlowAlert(subAlert *alerts.FlowAlert) interface{} { + return map[string]interface{}{ + "not": subAlert.GetNot().GetValue(), + "user_alert_id": subAlert.GetId().GetValue(), + } +} + +func flattenFiltersData(filtersData []*alerts.FilterData) map[string]interface{} { + result := make(map[string]interface{}, len(filtersData)) + for _, filter := range filtersData { + field := filter.GetField() + result[field] = flattenFilters(filter.GetFilters()) + } + return result +} + +func flattenTagFiltersData(filtersData []*alerts.FilterData) interface{} { + fieldToFilters := flattenFiltersData(filtersData) + result := make([]interface{}, 0, len(fieldToFilters)) + for field, filters := range fieldToFilters { + filterSchema := map[string]interface{}{ + "field": field, + "values": filters, + } + result = append(result, filterSchema) + } + return result +} + +func flattenFilters(filters []*alerts.Filters) []string { + result := make([]string, 0) + for _, f := range filters { + values := f.GetValues() + switch operator := f.GetOperator(); operator { + case "notEquals", "contains", "startsWith", "endsWith": + for i, val := range values { + values[i] = fmt.Sprintf("filter:%s:%s", operator, val) + } + } + result = append(result, values...) + } + return result +} + +func flattenTracingCondition(condition interface{}) interface{} { + switch condition := condition.(type) { + case *alerts.AlertCondition_Immediate: + return []interface{}{ + map[string]interface{}{ + "immediately": true, + }, + } + case *alerts.AlertCondition_MoreThan: + conditionParams := condition.MoreThan.GetParameters() + return []interface{}{ + map[string]interface{}{ + "more_than": true, + "threshold": conditionParams.GetThreshold().GetValue(), + "time_window": alertProtoTimeFrameToSchemaTimeFrame[conditionParams.GetTimeframe().String()], + "group_by": wrappedStringSliceToStringSlice(conditionParams.GetGroupBy()), + }, + } + default: + return nil + } +} + +func flattenCommonAlert(filters *alerts.AlertFilters) map[string]interface{} { + metadata := filters.GetMetadata() + return map[string]interface{}{ + "search_query": filters.GetText().GetValue(), + "severities": extractSeverities(filters.GetSeverities()), + "applications": wrappedStringSliceToStringSlice(metadata.GetApplications()), + "subsystems": wrappedStringSliceToStringSlice(metadata.GetSubsystems()), + "categories": wrappedStringSliceToStringSlice(metadata.GetCategories()), + "computers": wrappedStringSliceToStringSlice(metadata.GetComputers()), + "classes": wrappedStringSliceToStringSlice(metadata.GetClasses()), + "methods": wrappedStringSliceToStringSlice(metadata.GetMethods()), + "ip_addresses": wrappedStringSliceToStringSlice(metadata.GetIpAddresses()), + } +} + +func extractSeverities(severities []alerts.AlertFilters_LogSeverity) []string { + result := make([]string, 0, len(severities)) + for _, s := range severities { + result = append(result, alertProtoLogSeverityToSchemaLogSeverity[s.String()]) + } + return result +} + +func flattenExpirationDate(expiration *alerts.Date) []map[string]int { + if expiration == nil { + return nil + } + m := map[string]int{ + "year": int(expiration.GetYear()), + "month": int(expiration.GetMonth()), + "day": int(expiration.GetDay()), + } + + return []map[string]int{m} +} + +func expandAlertSeverity(severity string) alerts.AlertSeverity { + severityStr := alertSchemaSeverityToProtoSeverity[severity] + formatStandardVal := alerts.AlertSeverity_value[severityStr] + return alerts.AlertSeverity(formatStandardVal) +} + +func expandExpirationDate(v interface{}) *alerts.Date { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + raw := l[0] + m := raw.(map[string]interface{}) + return &alerts.Date{ + Year: int32(m["year"].(int)), + Month: int32(m["month"].(int)), + Day: int32(m["day"].(int)), + } +} + +func expandIncidentSettings(v interface{}) *alerts.AlertIncidentSettings { + l, ok := v.([]interface{}) + if !ok || len(l) == 0 || l[0] == nil { + return nil + } + raw := l[0] + m := raw.(map[string]interface{}) + + retriggeringPeriodSeconds := wrapperspb.UInt32(uint32(m["retriggering_period_minutes"].(int)) * 60) + notifyOn := alertSchemaNotifyOnToProtoNotifyOn[m["notify_on"].(string)] + + return &alerts.AlertIncidentSettings{ + RetriggeringPeriodSeconds: retriggeringPeriodSeconds, + NotifyOn: notifyOn, + UseAsNotificationSettings: wrapperspb.Bool(true), + } + +} + +func expandNotificationGroups(v interface{}) ([]*alerts.AlertNotificationGroups, diag.Diagnostics) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + result := make([]*alerts.AlertNotificationGroups, 0, len(l)) + var diags diag.Diagnostics + for _, s := range l { + ml, dgs := expandNotificationGroup(s) + diags = append(diags, dgs...) + result = append(result, ml) + } + return result, diags +} + +func expandNotificationGroup(v interface{}) (*alerts.AlertNotificationGroups, diag.Diagnostics) { + if v == nil { + return nil, nil + } + m := v.(map[string]interface{}) + + groupByFields := interfaceSliceToWrappedStringSlice(m["group_by_fields"].([]interface{})) + notifications, diags := expandNotificationSubgroups(m["notification"]) + if len(diags) != 0 { + return nil, diags + } + + return &alerts.AlertNotificationGroups{ + GroupByFields: groupByFields, + Notifications: notifications, + }, nil +} + +func expandNotificationSubgroups(v interface{}) ([]*alerts.AlertNotification, diag.Diagnostics) { + v = v.(*schema.Set).List() + notifications := v.([]interface{}) + result := make([]*alerts.AlertNotification, 0, len(notifications)) + var diags diag.Diagnostics + for _, n := range notifications { + notification, err := expandNotificationSubgroup(n) + if err != nil { + diags = append(diags, diag.FromErr(err)...) + } + result = append(result, notification) + } + return result, diags +} + +func expandNotificationSubgroup(v interface{}) (*alerts.AlertNotification, error) { + if v == nil { + return nil, nil + } + m := v.(map[string]interface{}) + + var notifyEverySec *wrapperspb.UInt32Value + if minutes, ok := m["retriggering_period_minutes"].(int); ok && minutes != 0 { + notifyEverySec = wrapperspb.UInt32(uint32(minutes) * 60) + } + + var notifyOn *alerts.NotifyOn + if notifyOnStr, ok := m["notify_on"].(string); ok { + notifyOn = new(alerts.NotifyOn) + *notifyOn = alertSchemaNotifyOnToProtoNotifyOn[notifyOnStr] + } + + notification := &alerts.AlertNotification{ + RetriggeringPeriodSeconds: notifyEverySec, + NotifyOn: notifyOn, + } + + var isWebhookIdDefined bool + if webhookID, ok := m["integration_id"].(string); ok && webhookID != "" { + isWebhookIdDefined = true + id := parseNumUint32(webhookID) + notification.IntegrationType = &alerts.AlertNotification_IntegrationId{ + IntegrationId: wrapperspb.UInt32(id), + } + } + + if emails := m["email_recipients"].(*schema.Set).List(); len(emails) != 0 { + if isWebhookIdDefined { + return nil, fmt.Errorf("required exactly on of 'integration_id' or 'email_recipients'") + } + + notification.IntegrationType = &alerts.AlertNotification_Recipients{ + Recipients: &alerts.Recipients{ + Emails: interfaceSliceToWrappedStringSlice(emails), + }, + } + } + + return notification, nil +} + +func extractMetaLabels(v interface{}) []*alerts.MetaLabel { + m := v.(map[string]interface{}) + result := make([]*alerts.MetaLabel, 0, len(m)) + for key, val := range m { + ml := &alerts.MetaLabel{ + Key: wrapperspb.String(key), + Value: wrapperspb.String(val.(string)), + } + result = append(result, ml) + } + return result +} + +func expandActiveWhen(v interface{}) *alerts.AlertActiveWhen { + l := v.([]interface{}) + if len(l) == 0 { + return nil + } + + schedulingMap := l[0].(map[string]interface{}) + utc := flattenUtc(schedulingMap["time_zone"].(string)) + timeFrames := schedulingMap["time_frame"].(*schema.Set).List() + + expandedTimeframes := expandActiveTimeframes(timeFrames, utc) + + return &alerts.AlertActiveWhen{ + Timeframes: expandedTimeframes, + } +} + +func expandActiveTimeframes(timeFrames []interface{}, utc int32) []*alerts.AlertActiveTimeframe { + result := make([]*alerts.AlertActiveTimeframe, 0, len(timeFrames)) + for _, tf := range timeFrames { + alertActiveTimeframe := expandActiveTimeFrame(tf, utc) + result = append(result, alertActiveTimeframe) + } + return result +} + +func expandActiveTimeFrame(timeFrame interface{}, utc int32) *alerts.AlertActiveTimeframe { + m := timeFrame.(map[string]interface{}) + daysOfWeek := expandDaysOfWeek(m["days_enabled"]) + frameRange := expandRange(m["start_time"], m["end_time"]) + frameRange, daysOfWeek = convertTimeFramesToGMT(frameRange, daysOfWeek, utc) + + alertActiveTimeframe := &alerts.AlertActiveTimeframe{ + DaysOfWeek: daysOfWeek, + Range: frameRange, + } + return alertActiveTimeframe +} + +func convertTimeFramesToGMT(frameRange *alerts.TimeRange, daysOfWeek []alerts.DayOfWeek, utc int32) (*alerts.TimeRange, []alerts.DayOfWeek) { + daysOfWeekOffset := daysOfWeekOffsetToGMT(frameRange, utc) + frameRange.Start.Hours = convertUtcToGmt(frameRange.GetStart().GetHours(), utc) + frameRange.End.Hours = convertUtcToGmt(frameRange.GetEnd().GetHours(), utc) + if daysOfWeekOffset != 0 { + for i, d := range daysOfWeek { + daysOfWeek[i] = alerts.DayOfWeek((int32(d) + daysOfWeekOffset) % 7) + } + } + + return frameRange, daysOfWeek +} + +func daysOfWeekOffsetToGMT(frameRange *alerts.TimeRange, utc int32) int32 { + daysOfWeekOffset := int32(frameRange.Start.Hours-utc) / 24 + if daysOfWeekOffset < 0 { + daysOfWeekOffset += 7 + } + return daysOfWeekOffset +} + +func convertUtcToGmt(hours, utc int32) int32 { + hours -= utc + if hours < 0 { + hours += 24 + } else if hours >= 24 { + hours -= 24 + } + + return hours +} + +func convertGmtToUtc(hours, utc int32) int32 { + hours += utc + if hours < 0 { + hours += 24 + } else if hours >= 24 { + hours -= 24 + } + + return hours +} + +func expandDaysOfWeek(v interface{}) []alerts.DayOfWeek { + l := v.(*schema.Set).List() + result := make([]alerts.DayOfWeek, 0, len(l)) + for _, v := range l { + dayOfWeekStr := alertSchemaDayOfWeekToProtoDayOfWeek[v.(string)] + dayOfWeekVal := alerts.DayOfWeek_value[dayOfWeekStr] + result = append(result, alerts.DayOfWeek(dayOfWeekVal)) + } + return result +} + +func expandRange(activityStarts, activityEnds interface{}) *alerts.TimeRange { + start := expandTimeInDay(activityStarts) + end := expandTimeInDay(activityEnds) + + return &alerts.TimeRange{ + Start: start, + End: end, + } +} + +func expandAlertType(d *schema.ResourceData) (alertTypeParams *alertParams, tracingAlert *alerts.TracingAlert, diags diag.Diagnostics) { + alertTypeStr := From(validAlertTypes).FirstWith(func(key interface{}) bool { + return len(d.Get(key.(string)).([]interface{})) > 0 + }).(string) + + alertType := d.Get(alertTypeStr).([]interface{})[0].(map[string]interface{}) + + switch alertTypeStr { + case "standard": + alertTypeParams, diags = expandStandard(alertType) + case "ratio": + alertTypeParams, diags = expandRatio(alertType) + case "new_value": + alertTypeParams = expandNewValue(alertType) + case "unique_count": + alertTypeParams = expandUniqueCount(alertType) + case "time_relative": + alertTypeParams, diags = expandTimeRelative(alertType) + case "metric": + alertTypeParams, diags = expandMetric(alertType) + case "tracing": + alertTypeParams, tracingAlert = expandTracing(alertType) + case "flow": + alertTypeParams = expandFlow(alertType) + } + + return +} + +func expandStandard(m map[string]interface{}) (*alertParams, diag.Diagnostics) { + conditionMap := extractConditionMap(m) + condition, err := expandStandardCondition(conditionMap) + if err != nil { + return nil, diag.FromErr(err) + } + filters := expandStandardFilter(m) + return &alertParams{ + Condition: condition, + Filters: filters, + }, nil +} + +func expandStandardCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { + if immediately := m["immediately"]; immediately != nil && immediately.(bool) { + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_Immediate{}, + }, nil + } else if moreThenUsual := m["more_than_usual"]; moreThenUsual != nil && moreThenUsual.(bool) { + threshold := wrapperspb.Double(float64(m["threshold"].(int))) + groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) + parameters := &alerts.ConditionParameters{ + Threshold: threshold, + GroupBy: groupBy, + Timeframe: expandTimeFrame(m["time_window"].(string)), + } + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_MoreThanUsual{ + MoreThanUsual: &alerts.MoreThanUsualCondition{Parameters: parameters}, + }, + }, nil + } else { + parameters, err := expandStandardConditionParameters(m) + if err != nil { + return nil, err + } + if lessThan := m["less_than"]; lessThan != nil && lessThan.(bool) { + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_LessThan{ + LessThan: &alerts.LessThanCondition{Parameters: parameters}, + }, + }, nil + } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { + evaluationWindow := expandEvaluationWindow(m) + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_MoreThan{ + MoreThan: &alerts.MoreThanCondition{ + Parameters: parameters, + EvaluationWindow: evaluationWindow, + }, + }, + }, nil + } + } + + return nil, fmt.Errorf("immediately, less_than, more_than or more_than_usual have to be true") +} + +func expandEvaluationWindow(m map[string]interface{}) *alerts.EvaluationWindow { + var evaluationWindow *alerts.EvaluationWindow + if evaluationWindowStr, ok := m["evaluation_window"].(string); ok && evaluationWindowStr != "" { + evaluationWindow = new(alerts.EvaluationWindow) + *evaluationWindow = alertSchemaToProtoEvaluationWindow[evaluationWindowStr] + } + return evaluationWindow +} + +func expandRelatedExtendedData(m map[string]interface{}) (*alerts.RelatedExtendedData, error) { + if v, ok := m["less_than"]; !(ok && v.(bool)) { + return nil, nil + } + + if v, ok := m["manage_undetected_values"]; ok { + if manageUndetectedValues, ok := v.([]interface{}); ok && len(manageUndetectedValues) != 0 { + raw := manageUndetectedValues[0].(map[string]interface{}) + if enable, autoRetireRatio := raw["enable_triggering_on_undetected_values"], raw["auto_retire_ratio"]; enable.(bool) { + if autoRetireRatio == nil || autoRetireRatio.(string) == "" { + return nil, fmt.Errorf("auto_retire_ratio is required when enable_triggering_on_undetected_values = true") + } + cleanupDeadmanDurationStr := alertSchemaDeadmanRatiosToProtoDeadmanRatios[autoRetireRatio.(string)] + cleanupDeadmanDuration := alerts.CleanupDeadmanDuration(alerts.CleanupDeadmanDuration_value[cleanupDeadmanDurationStr]) + return &alerts.RelatedExtendedData{ + CleanupDeadmanDuration: &cleanupDeadmanDuration, + ShouldTriggerDeadman: wrapperspb.Bool(true), + }, nil + } else { + if autoRetireRatio != nil && autoRetireRatio.(string) != "" { + return nil, fmt.Errorf("auto_retire_ratio is not allowed when enable_triggering_on_undetected_values = false") + } + return &alerts.RelatedExtendedData{ + ShouldTriggerDeadman: wrapperspb.Bool(false), + }, nil + } + } + } + + return nil, nil +} + +func expandStandardConditionParameters(m map[string]interface{}) (*alerts.ConditionParameters, error) { + timeFrame := expandTimeFrame(m["time_window"].(string)) + groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) + threshold := wrapperspb.Double(float64(m["threshold"].(int))) + relatedExtendedData, err := expandRelatedExtendedData(m) + if err != nil { + return nil, err + } + + return &alerts.ConditionParameters{ + Threshold: threshold, + Timeframe: timeFrame, + GroupBy: groupBy, + RelatedExtendedData: relatedExtendedData, + }, nil +} + +func expandTracingConditionParameters(m map[string]interface{}) *alerts.ConditionParameters { + timeFrame := expandTimeFrame(m["time_window"].(string)) + groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) + threshold := wrapperspb.Double(float64(m["threshold"].(int))) + + return &alerts.ConditionParameters{ + Threshold: threshold, + Timeframe: timeFrame, + GroupBy: groupBy, + } +} + +func expandStandardFilter(m map[string]interface{}) *alerts.AlertFilters { + filters := expandCommonAlertFilter(m) + filters.FilterType = alerts.AlertFilters_FILTER_TYPE_TEXT_OR_UNSPECIFIED + return filters +} + +func expandRatio(m map[string]interface{}) (*alertParams, diag.Diagnostics) { + conditionMap := extractConditionMap(m) + groupBy := interfaceSliceToWrappedStringSlice(conditionMap["group_by"].([]interface{})) + var groupByQ1, groupByQ2 []*wrapperspb.StringValue + if len(groupBy) > 0 { + if conditionMap["group_by_q1"].(bool) { + groupByQ1 = groupBy + } else if conditionMap["group_by_q2"].(bool) { + groupByQ2 = groupBy + } else if conditionMap["group_by_both"].(bool) { + groupByQ1 = groupBy + groupByQ2 = groupBy + } else { + return nil, diag.Errorf("group_by is required with one of - group_by_q1/group_by_q1/group_by_both") + } + } + + condition, err := expandRatioCondition(conditionMap, groupByQ1) + if err != nil { + return nil, diag.FromErr(err) + } + filters := expandRatioFilters(m, groupByQ2) + + return &alertParams{ + Condition: condition, + Filters: filters, + }, nil +} + +func expandRatioFilters(m map[string]interface{}, groupBy []*wrapperspb.StringValue) *alerts.AlertFilters { + query1 := m["query_1"].([]interface{})[0].(map[string]interface{}) + filters := expandCommonAlertFilter(query1) + filters.FilterType = alerts.AlertFilters_FILTER_TYPE_RATIO + filters.Alias = wrapperspb.String(query1["alias"].(string)) + query2 := expandQuery2(m["query_2"], groupBy) + filters.RatioAlerts = []*alerts.AlertFilters_RatioAlert{query2} + return filters +} + +func expandRatioCondition(m map[string]interface{}, groupBy []*wrapperspb.StringValue) (*alerts.AlertCondition, error) { + parameters, err := expandRatioParams(m, groupBy) + if err != nil { + return nil, err + } + + return expandLessThanOrMoreThanAlertCondition(m, parameters) +} + +func expandRatioParams(m map[string]interface{}, groupBy []*wrapperspb.StringValue) (*alerts.ConditionParameters, error) { + threshold := wrapperspb.Double(m["ratio_threshold"].(float64)) + timeFrame := expandTimeFrame(m["time_window"].(string)) + ignoreInfinity := wrapperspb.Bool(m["ignore_infinity"].(bool)) + relatedExtendedData, err := expandRelatedExtendedData(m) + if err != nil { + return nil, err + } + + return &alerts.ConditionParameters{ + Threshold: threshold, + Timeframe: timeFrame, + GroupBy: groupBy, + IgnoreInfinity: ignoreInfinity, + RelatedExtendedData: relatedExtendedData, + }, nil +} + +func expandQuery2(v interface{}, groupBy []*wrapperspb.StringValue) *alerts.AlertFilters_RatioAlert { + m := v.([]interface{})[0].(map[string]interface{}) + alias := wrapperspb.String(m["alias"].(string)) + text := wrapperspb.String(m["search_query"].(string)) + severities := expandAlertFiltersSeverities(m["severities"].(*schema.Set).List()) + applications := interfaceSliceToWrappedStringSlice(m["applications"].(*schema.Set).List()) + subsystems := interfaceSliceToWrappedStringSlice(m["subsystems"].(*schema.Set).List()) + return &alerts.AlertFilters_RatioAlert{ + Alias: alias, + Text: text, + Severities: severities, + Applications: applications, + Subsystems: subsystems, + GroupBy: groupBy, + } +} + +func expandNewValue(m map[string]interface{}) *alertParams { + conditionMap := extractConditionMap(m) + condition := expandNewValueCondition(conditionMap) + filters := expandNewValueFilters(m) + + return &alertParams{ + Condition: condition, + Filters: filters, + } +} + +func expandNewValueCondition(m map[string]interface{}) *alerts.AlertCondition { + parameters := expandNewValueConditionParameters(m) + condition := &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_NewValue{ + NewValue: &alerts.NewValueCondition{ + Parameters: parameters, + }, + }, + } + return condition +} + +func expandNewValueConditionParameters(m map[string]interface{}) *alerts.ConditionParameters { + timeFrame := expandNewValueTimeFrame(m["time_window"].(string)) + groupBy := []*wrapperspb.StringValue{wrapperspb.String(m["key_to_track"].(string))} + parameters := &alerts.ConditionParameters{ + Timeframe: timeFrame, + GroupBy: groupBy, + } + return parameters +} + +func expandNewValueFilters(m map[string]interface{}) *alerts.AlertFilters { + filters := expandCommonAlertFilter(m) + filters.FilterType = alerts.AlertFilters_FILTER_TYPE_TEXT_OR_UNSPECIFIED + return filters +} + +func expandUniqueCount(m map[string]interface{}) *alertParams { + conditionMap := extractConditionMap(m) + condition := expandUniqueCountCondition(conditionMap) + filters := expandUniqueCountFilters(m) + + return &alertParams{ + Condition: condition, + Filters: filters, + } +} + +func expandUniqueCountCondition(m map[string]interface{}) *alerts.AlertCondition { + parameters := expandUniqueCountConditionParameters(m) + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_UniqueCount{ + UniqueCount: &alerts.UniqueCountCondition{ + Parameters: parameters, + }, + }, + } +} + +func expandUniqueCountConditionParameters(m map[string]interface{}) *alerts.ConditionParameters { + uniqueCountKey := []*wrapperspb.StringValue{wrapperspb.String(m["unique_count_key"].(string))} + threshold := wrapperspb.Double(float64(m["max_unique_values"].(int))) + timeFrame := expandUniqueValueTimeFrame(m["time_window"].(string)) + + var groupByThreshold *wrapperspb.UInt32Value + var groupBy []*wrapperspb.StringValue + if groupByKey := m["group_by_key"]; groupByKey != nil && groupByKey.(string) != "" { + groupBy = []*wrapperspb.StringValue{wrapperspb.String(groupByKey.(string))} + groupByThreshold = wrapperspb.UInt32(uint32(m["max_unique_values_for_group_by"].(int))) + } + + return &alerts.ConditionParameters{ + CardinalityFields: uniqueCountKey, + Threshold: threshold, + Timeframe: timeFrame, + GroupBy: groupBy, + MaxUniqueCountValuesForGroupByKey: groupByThreshold, + } +} + +func expandUniqueCountFilters(m map[string]interface{}) *alerts.AlertFilters { + filters := expandCommonAlertFilter(m) + filters.FilterType = alerts.AlertFilters_FILTER_TYPE_UNIQUE_COUNT + return filters +} + +func expandCommonAlertFilter(m map[string]interface{}) *alerts.AlertFilters { + severities := expandAlertFiltersSeverities(m["severities"].(*schema.Set).List()) + metadata := expandMetadata(m) + text := wrapperspb.String(m["search_query"].(string)) + + return &alerts.AlertFilters{ + Severities: severities, + Metadata: metadata, + Text: text, + } +} + +func expandTimeRelative(m map[string]interface{}) (*alertParams, diag.Diagnostics) { + conditionMap := extractConditionMap(m) + condition, err := expandTimeRelativeCondition(conditionMap) + if err != nil { + return nil, diag.FromErr(err) + } + filters := expandTimeRelativeFilters(m) + + return &alertParams{ + Condition: condition, + Filters: filters, + }, nil +} + +func expandTimeRelativeCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { + parameters, err := expandTimeRelativeConditionParameters(m) + if err != nil { + return nil, err + } + + return expandLessThanOrMoreThanAlertCondition(m, parameters) +} + +func expandLessThanOrMoreThanAlertCondition( + m map[string]interface{}, parameters *alerts.ConditionParameters) (*alerts.AlertCondition, error) { + lessThan, err := trueIfIsLessThanFalseIfMoreThanAndErrorOtherwise(m) + if err != nil { + return nil, err + } + + if lessThan { + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_LessThan{ + LessThan: &alerts.LessThanCondition{Parameters: parameters}, + }, + }, nil + } + + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_MoreThan{ + MoreThan: &alerts.MoreThanCondition{Parameters: parameters}, + }, + }, nil +} + +func trueIfIsLessThanFalseIfMoreThanAndErrorOtherwise(m map[string]interface{}) (bool, error) { + if lessThan := m["less_than"]; lessThan != nil && lessThan.(bool) { + return true, nil + } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { + return false, nil + } + return false, fmt.Errorf("less_than or more_than have to be true") +} + +func expandPromqlCondition(m map[string]interface{}, parameters *alerts.ConditionParameters) (*alerts.AlertCondition, error) { + conditionsStr, err := returnAlertConditionString(m) + if err != nil { + return nil, err + } + + switch conditionsStr { + case "less_than": + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_LessThan{ + LessThan: &alerts.LessThanCondition{Parameters: parameters}, + }, + }, nil + case "more_than": + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_MoreThan{ + MoreThan: &alerts.MoreThanCondition{Parameters: parameters}, + }, + }, nil + case "more_than_usual": + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_MoreThanUsual{ + MoreThanUsual: &alerts.MoreThanUsualCondition{Parameters: parameters}, + }, + }, nil + case "less_than_usual": + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_LessThanUsual{ + LessThanUsual: &alerts.LessThanUsualCondition{Parameters: parameters}, + }, + }, nil + case "less_than_or_equal": + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_LessThanOrEqual{ + LessThanOrEqual: &alerts.LessThanOrEqualCondition{Parameters: parameters}, + }, + }, nil + case "more_than_or_equal": + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_MoreThanOrEqual{ + MoreThanOrEqual: &alerts.MoreThanOrEqualCondition{Parameters: parameters}, + }, + }, nil + } + + return nil, fmt.Errorf("less_than, more_than, more_than_usual, less_than_usual, less_than_or_equal, or more_than_or_equal must be set to true") +} + +func returnAlertConditionString(m map[string]interface{}) (string, error) { + if lessThan := m["less_than"]; lessThan != nil && lessThan.(bool) { + return "less_than", nil + } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { + return "more_than", nil + } else if moreThanUsual := m["more_than_usual"]; moreThanUsual != nil && moreThanUsual.(bool) { + return "more_than_usual", nil + } else if lessThanUsual := m["less_than_usual"]; lessThanUsual != nil && lessThanUsual.(bool) { + return "less_than_usual", nil + } else if lessThanOrEqual := m["less_than_or_equal"]; lessThanOrEqual != nil && lessThanOrEqual.(bool) { + return "less_than_or_equal", nil + } else if moreThanOrEqual := m["more_than_or_equal"]; moreThanOrEqual != nil && moreThanOrEqual.(bool) { + return "more_than_or_equal", nil + } + + return "", fmt.Errorf("less_than, more_than, more_than_usual, less_than_usual, less_than_or_equal, or more_than_or_equal must be set to true") +} + +func expandTimeRelativeConditionParameters(m map[string]interface{}) (*alerts.ConditionParameters, error) { + timeFrame, relativeTimeframe := expandTimeFrameAndRelativeTimeframe(m["relative_time_window"].(string)) + ignoreInfinity := wrapperspb.Bool(m["ignore_infinity"].(bool)) + groupBy := interfaceSliceToWrappedStringSlice(m["group_by"].([]interface{})) + threshold := wrapperspb.Double(m["ratio_threshold"].(float64)) + relatedExtendedData, err := expandRelatedExtendedData(m) + if err != nil { + return nil, err + } + + return &alerts.ConditionParameters{ + Timeframe: timeFrame, + RelativeTimeframe: relativeTimeframe, + GroupBy: groupBy, + Threshold: threshold, + IgnoreInfinity: ignoreInfinity, + RelatedExtendedData: relatedExtendedData, + }, nil +} + +func expandTimeFrameAndRelativeTimeframe(relativeTimeframeStr string) (alerts.Timeframe, alerts.RelativeTimeframe) { + p := alertSchemaRelativeTimeFrameToProtoTimeFrameAndRelativeTimeFrame[relativeTimeframeStr] + return p.timeFrame, p.relativeTimeFrame +} + +func expandTimeRelativeFilters(m map[string]interface{}) *alerts.AlertFilters { + filters := expandCommonAlertFilter(m) + filters.FilterType = alerts.AlertFilters_FILTER_TYPE_TIME_RELATIVE + return filters +} + +func expandMetric(m map[string]interface{}) (*alertParams, diag.Diagnostics) { + condition, err := expandMetricCondition(m) + if err != nil { + return nil, diag.FromErr(err) + } + filters := expandMetricFilters(m) + + return &alertParams{ + Condition: condition, + Filters: filters, + }, nil +} + +func expandMetricCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { + isPromQL := len(m["promql"].([]interface{})) > 0 + var metricType string + if isPromQL { + metricType = "promql" + } else { + metricType = "lucene" + } + + metricMap := (m[metricType].([]interface{}))[0].(map[string]interface{}) + text := wrapperspb.String(metricMap["search_query"].(string)) + conditionMap := extractConditionMap(metricMap) + threshold := wrapperspb.Double(conditionMap["threshold"].(float64)) + sampleThresholdPercentage := wrapperspb.UInt32(uint32(conditionMap["sample_threshold_percentage"].(int))) + nonNullPercentage := wrapperspb.UInt32(uint32(conditionMap["min_non_null_values_percentage"].(int))) + swapNullValues := wrapperspb.Bool(conditionMap["replace_missing_value_with_zero"].(bool)) + timeFrame := expandMetricTimeFrame(conditionMap["time_window"].(string)) + relatedExtendedData, err := expandRelatedExtendedData(conditionMap) + if err != nil { + return nil, err + } + + parameters := &alerts.ConditionParameters{ + Threshold: threshold, + Timeframe: timeFrame, + RelatedExtendedData: relatedExtendedData, + } + + if isPromQL { + parameters.MetricAlertPromqlParameters = &alerts.MetricAlertPromqlConditionParameters{ + PromqlText: text, + SampleThresholdPercentage: sampleThresholdPercentage, + NonNullPercentage: nonNullPercentage, + SwapNullValues: swapNullValues, + } + } else { + metricField := wrapperspb.String(conditionMap["metric_field"].(string)) + arithmeticOperator := expandArithmeticOperator(conditionMap["arithmetic_operator"].(string)) + arithmeticOperatorModifier := wrapperspb.UInt32(uint32(conditionMap["arithmetic_operator_modifier"].(int))) + groupBy := interfaceSliceToWrappedStringSlice(conditionMap["group_by"].([]interface{})) + parameters.GroupBy = groupBy + parameters.MetricAlertParameters = &alerts.MetricAlertConditionParameters{ + MetricSource: alerts.MetricAlertConditionParameters_METRIC_SOURCE_LOGS2METRICS_OR_UNSPECIFIED, + MetricField: metricField, + ArithmeticOperator: arithmeticOperator, + ArithmeticOperatorModifier: arithmeticOperatorModifier, + SampleThresholdPercentage: sampleThresholdPercentage, + NonNullPercentage: nonNullPercentage, + SwapNullValues: swapNullValues, + } + } + + return expandPromqlCondition(conditionMap, parameters) +} + +func expandArithmeticOperator(s string) alerts.MetricAlertConditionParameters_ArithmeticOperator { + arithmeticStr := alertSchemaArithmeticOperatorToProtoArithmetic[s] + arithmeticValue := alerts.MetricAlertConditionParameters_ArithmeticOperator_value[arithmeticStr] + return alerts.MetricAlertConditionParameters_ArithmeticOperator(arithmeticValue) +} + +func expandMetricFilters(m map[string]interface{}) *alerts.AlertFilters { + var text *wrapperspb.StringValue + if len(m["promql"].([]interface{})) == 0 { + luceneArr := m["lucene"].([]interface{}) + lucene := luceneArr[0].(map[string]interface{}) + text = wrapperspb.String(lucene["search_query"].(string)) + } + + return &alerts.AlertFilters{ + FilterType: alerts.AlertFilters_FILTER_TYPE_METRIC, + Text: text, + } +} + +func expandFlow(m map[string]interface{}) *alertParams { + stages := expandFlowStages(m["stage"]) + parameters := expandFlowParameters(m["group_by"]) + return &alertParams{ + Condition: &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_Flow{ + Flow: &alerts.FlowCondition{ + Stages: stages, + Parameters: parameters, + }, + }, + }, + Filters: &alerts.AlertFilters{ + FilterType: alerts.AlertFilters_FILTER_TYPE_FLOW, + }, + } +} + +func expandFlowParameters(i interface{}) *alerts.ConditionParameters { + if i == nil { + return nil + } + groupBy := interfaceSliceToWrappedStringSlice(i.([]interface{})) + if len(groupBy) == 0 { + return nil + } + + return &alerts.ConditionParameters{ + GroupBy: groupBy, + } +} + +func expandFlowStages(i interface{}) []*alerts.FlowStage { + l := i.([]interface{}) + result := make([]*alerts.FlowStage, 0, len(l)) + for _, v := range l { + stage := expandFlowStage(v) + result = append(result, stage) + } + + return result +} + +func expandFlowStage(i interface{}) *alerts.FlowStage { + m := i.(map[string]interface{}) + groups := expandGroups(m["group"]) + timeFrame := expandFlowTimeFrame(m["time_window"]) + return &alerts.FlowStage{Groups: groups, Timeframe: timeFrame} +} + +func expandGroups(v interface{}) []*alerts.FlowGroup { + groups := v.([]interface{}) + result := make([]*alerts.FlowGroup, 0, len(groups)) + for _, g := range groups { + group := expandFlowGroup(g) + result = append(result, group) + } + + return result +} + +func expandFlowGroup(v interface{}) *alerts.FlowGroup { + m := v.(map[string]interface{}) + subAlerts := expandSubAlerts(m["sub_alerts"]) + operator := expandOperator(m["next_operator"]) + return &alerts.FlowGroup{ + Alerts: subAlerts, + NextOp: operator, + } +} + +func expandSubAlerts(v interface{}) *alerts.FlowAlerts { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + raw := l[0] + m := raw.(map[string]interface{}) + + operator := expandOperator(m["operator"]) + values := expandInnerFlowAlerts(m["flow_alert"]) + + return &alerts.FlowAlerts{ + Op: operator, + Values: values, + } +} + +func expandInnerFlowAlerts(v interface{}) []*alerts.FlowAlert { + flowAlerts := v.([]interface{}) + result := make([]*alerts.FlowAlert, 0, len(flowAlerts)) + for _, fa := range flowAlerts { + flowAlert := expandInnerFlowAlert(fa) + result = append(result, flowAlert) + } + return result +} + +func expandInnerFlowAlert(v interface{}) *alerts.FlowAlert { + m := v.(map[string]interface{}) + return &alerts.FlowAlert{ + Id: wrapperspb.String(m["user_alert_id"].(string)), + Not: wrapperspb.Bool(m["not"].(bool)), + } +} + +func expandOperator(i interface{}) alerts.FlowOperator { + operatorStr := i.(string) + return alerts.FlowOperator(alerts.FlowOperator_value[operatorStr]) +} + +func expandFlowTimeFrame(i interface{}) *alerts.FlowTimeframe { + return &alerts.FlowTimeframe{ + Ms: wrapperspb.UInt32(uint32(expandTimeToMS(i))), + } +} + +func expandTracing(m map[string]interface{}) (*alertParams, *alerts.TracingAlert) { + tracingParams, _ := expandTracingParams(m) + tracingAlert := expandTracingAlert(m) + + return tracingParams, tracingAlert +} + +func expandTracingParams(m map[string]interface{}) (*alertParams, error) { + conditionMap := extractConditionMap(m) + condition, err := expandTracingCondition(conditionMap) + if err != nil { + return nil, err + } + filters := expandTracingFilter() + return &alertParams{ + Condition: condition, + Filters: filters, + }, nil +} + +func expandTracingCondition(m map[string]interface{}) (*alerts.AlertCondition, error) { + if immediately := m["immediately"]; immediately != nil && immediately.(bool) { + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_Immediate{}, + }, nil + } else if moreThan := m["more_than"]; moreThan != nil && moreThan.(bool) { + parameters := expandTracingConditionParameters(m) + return &alerts.AlertCondition{ + Condition: &alerts.AlertCondition_MoreThan{ + MoreThan: &alerts.MoreThanCondition{Parameters: parameters}, + }, + }, nil + } + + return nil, fmt.Errorf("immediately or more_than have to be true") +} + +func expandTracingFilter() *alerts.AlertFilters { + return &alerts.AlertFilters{ + FilterType: alerts.AlertFilters_FILTER_TYPE_TRACING, + } +} + +func expandTracingAlert(m map[string]interface{}) *alerts.TracingAlert { + conditionLatency := uint32(m["latency_threshold_milliseconds"].(float64) * (float64)(time.Millisecond.Microseconds())) + applications := m["applications"].(*schema.Set).List() + subsystems := m["subsystems"].(*schema.Set).List() + services := m["services"].(*schema.Set).List() + fieldFilters := expandFiltersData(applications, subsystems, services) + tagFilters := expandTagFilters(m["tag_filter"]) + return &alerts.TracingAlert{ + ConditionLatency: conditionLatency, + FieldFilters: fieldFilters, + TagFilters: tagFilters, + } +} + +func expandFiltersData(applications, subsystems, services []interface{}) []*alerts.FilterData { + result := make([]*alerts.FilterData, 0) + if len(applications) != 0 { + result = append(result, expandSpecificFilter("applicationName", applications)) + } + if len(subsystems) != 0 { + result = append(result, expandSpecificFilter("subsystemName", subsystems)) + } + if len(services) != 0 { + result = append(result, expandSpecificFilter("serviceName", services)) + } + + return result +} + +func expandTagFilters(i interface{}) []*alerts.FilterData { + if i == nil { + return nil + } + l := i.(*schema.Set).List() + + result := make([]*alerts.FilterData, 0, len(l)) + for _, v := range l { + m := v.(map[string]interface{}) + field := m["field"].(string) + values := m["values"].(*schema.Set).List() + result = append(result, expandSpecificFilter(field, values)) + } + return result +} + +func expandSpecificFilter(filterName string, values []interface{}) *alerts.FilterData { + operatorToFilterValues := make(map[string]*alerts.Filters) + for _, val := range values { + operator, filterValue := expandFilter(val.(string)) + if _, ok := operatorToFilterValues[operator]; !ok { + operatorToFilterValues[operator] = new(alerts.Filters) + operatorToFilterValues[operator].Operator = operator + operatorToFilterValues[operator].Values = make([]string, 0) + } + operatorToFilterValues[operator].Values = append(operatorToFilterValues[operator].Values, filterValue) + } + + filterResult := make([]*alerts.Filters, 0, len(operatorToFilterValues)) + for _, filters := range operatorToFilterValues { + filterResult = append(filterResult, filters) + } + + return &alerts.FilterData{ + Field: filterName, + Filters: filterResult, + } +} + +func expandFilter(filterString string) (operator, filterValue string) { + operator, filterValue = "equals", filterString + if strings.HasPrefix(filterValue, "filter:") { + arr := strings.SplitN(filterValue, ":", 3) + operator, filterValue = arr[1], arr[2] + } + + return +} + +func extractConditionMap(m map[string]interface{}) map[string]interface{} { + return m["condition"].([]interface{})[0].(map[string]interface{}) +} + +func expandTimeFrame(s string) alerts.Timeframe { + protoTimeFrame := alertSchemaTimeFrameToProtoTimeFrame[s] + return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) +} + +func expandMetricTimeFrame(s string) alerts.Timeframe { + protoTimeFrame := alertSchemaMetricTimeFrameToMetricProtoTimeFrame[s] + return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) +} + +func expandMetadata(m map[string]interface{}) *alerts.AlertFilters_MetadataFilters { + categories := interfaceSliceToWrappedStringSlice(m["categories"].(*schema.Set).List()) + applications := interfaceSliceToWrappedStringSlice(m["applications"].(*schema.Set).List()) + subsystems := interfaceSliceToWrappedStringSlice(m["subsystems"].(*schema.Set).List()) + computers := interfaceSliceToWrappedStringSlice(m["computers"].(*schema.Set).List()) + classes := interfaceSliceToWrappedStringSlice(m["classes"].(*schema.Set).List()) + methods := interfaceSliceToWrappedStringSlice(m["methods"].(*schema.Set).List()) + ipAddresses := interfaceSliceToWrappedStringSlice(m["ip_addresses"].(*schema.Set).List()) + + return &alerts.AlertFilters_MetadataFilters{ + Categories: categories, + Applications: applications, + Subsystems: subsystems, + Computers: computers, + Classes: classes, + Methods: methods, + IpAddresses: ipAddresses, + } +} + +func expandAlertFiltersSeverities(v interface{}) []alerts.AlertFilters_LogSeverity { + s := interfaceSliceToStringSlice(v.([]interface{})) + result := make([]alerts.AlertFilters_LogSeverity, 0, len(s)) + for _, v := range s { + logSeverityStr := alertSchemaLogSeverityToProtoLogSeverity[v] + result = append(result, alerts.AlertFilters_LogSeverity( + alerts.AlertFilters_LogSeverity_value[logSeverityStr])) + } + + return result +} + +func expandNewValueTimeFrame(s string) alerts.Timeframe { + protoTimeFrame := alertSchemaNewValueTimeFrameToProtoTimeFrame[s] + return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) +} + +func expandUniqueValueTimeFrame(s string) alerts.Timeframe { + protoTimeFrame := alertSchemaUniqueCountTimeFrameToProtoTimeFrame[s] + return alerts.Timeframe(alerts.Timeframe_value[protoTimeFrame]) +} + +func expandTimeInDay(v interface{}) *alerts.Time { + timeArr := strings.Split(v.(string), ":") + hours := parseNumInt32(timeArr[0]) + minutes := parseNumInt32(timeArr[1]) + return &alerts.Time{ + Hours: hours, + Minutes: minutes, + } +} diff --git a/coralogix/resource_coralogix_alert_test.go b/coralogix/resource_coralogix_alert_test.go index b0688f0b..3ef8db30 100644 --- a/coralogix/resource_coralogix_alert_test.go +++ b/coralogix/resource_coralogix_alert_test.go @@ -1,665 +1,1305 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package coralogix import ( "context" "fmt" - "math" - "strconv" "testing" - "terraform-provider-coralogix/coralogix/clientset" - alertsv1 "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v2" - - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "google.golang.org/protobuf/types/known/wrapperspb" + "terraform-provider-coralogix/coralogix/clientset" + alertsv3 "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v3" ) var alertResourceName = "coralogix_alert.test" -func TestAccCoralogixResourceAlert_standard(t *testing.T) { - alert := standardAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - occurrencesThreshold: acctest.RandIntRange(1, 1000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), - } - checks := extractStandardAlertChecks(alert) - - updatedAlert := standardAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - occurrencesThreshold: acctest.RandIntRange(1, 1000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), - } - updatedAlertChecks := extractStandardAlertChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_immediate(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertStandard(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsImmediate(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs immediate alert"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs immediate alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P1"), + resource.TestCheckResourceAttr(alertResourceName, "labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "labels.security_severity", "high"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "recipients.#": "1", + "recipients.0": "example@coralogix.com", + }), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered and Resolved"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "1"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Wednesday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "8"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_immediate.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertStandard(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsImmediateUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs immediate alert updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs immediate alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.advanced_target_settings.*", + map[string]string{ + "retriggering_period.minutes": "10", + "notify_on": "Triggered Only", + "recipients.#": "1", + "recipients.0": "example@coralogix.com", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered and Resolved"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "10"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Wednesday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "9"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "21"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_immediate.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + ), }, }, }) } -func TestAccCoralogixResourceAlert_ratio(t *testing.T) { - alert := ratioAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - q2Severities: selectManyRandomlyFromSlice(alertValidLogSeverities), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - ratio: randFloat(), - groupBy: []string{"EventType"}, - q2SearchQuery: "remote_addr_enriched:/.*/", - ignoreInfinity: randBool(), - } - checks := extractRatioAlertChecks(alert) - - updatedAlert := ratioAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - q2Severities: selectManyRandomlyFromSlice(alertValidLogSeverities), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - ratio: randFloat(), - groupBy: []string{"EventType"}, - q2SearchQuery: "remote_addr_enriched:/.*/", - ignoreInfinity: randBool(), - } - updatedAlertChecks := extractRatioAlertChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_more_than(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertRatio(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsMoreThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-more-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-more-than alert example from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "labels.security_severity", "high"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "integration_id": "17730", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "recipients.#": "1", + "recipients.0": "example@coralogix.com", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered and Resolved"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "1"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Wednesday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "8"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.time_window.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.evaluation_window", "Dynamic"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than.logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "value": "subsystem-name", + }, + ), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_more_than.logs_filter.lucene_filter.label_filters.severities.*", "Warning"), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertRatio(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsMoreThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-more_-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of standard alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "labels.security_severity", "low"), + resource.TestCheckResourceAttr(alertResourceName, "notification_group.simple_target_settings.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "integration_id": "17730", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered Only"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "10"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Monday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "8"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.threshold", "20"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.time_window.specific_value", "2_HOURS"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.evaluation_window", "Rolling"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "NOT", + "value": "application_name", + }, + ), + ), }, }, }) } -func TestAccCoralogixResourceAlert_newValue(t *testing.T) { - alert := newValueAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - keyToTrack: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidNewValueTimeFrames), - } - alert.notifyOn = "Triggered_only" - checks := extractNewValueChecks(alert) - - updatedAlert := newValueAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - keyToTrack: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidNewValueTimeFrames), - } - updatedAlert.notifyOn = "Triggered_only" - updatedAlertChecks := extractNewValueChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_less_than(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertNewValue(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsLessThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-less-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-less-than alert example from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "labels.security_severity", "high"), + resource.TestCheckResourceAttr(alertResourceName, "notification_group.simple_target_settings.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "integration_id": "17730", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "recipients.#": "1", + "recipients.0": "example@coralogix.com", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered and Resolved"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "1"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Wednesday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "8"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.time_window.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "value": "subsystem-name", + }, + ), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.severities.*", "Warning"), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertNewValue(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsLessThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-less-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-less-than alert example from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "labels.security_severity", "low"), + resource.TestCheckResourceAttr(alertResourceName, "notification_group.advanced_target_settings.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.advanced_target_settings.*", + map[string]string{ + "integration_id": "17730", + "notify_on": "Triggered Only", + "retriggering_period.minutes": "10", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered Only"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "10"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Monday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "8"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.threshold", "20"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.time_window.specific_value", "2_HOURS"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "NOT", + "value": "application_name", + }, + ), + ), }, }, - }) + }, + ) } -func TestAccCoralogixResourceAlert_uniqueCount(t *testing.T) { - alert := uniqueCountAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - uniqueCountKey: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidUniqueCountTimeFrames), - groupByKey: "metadata.name", - maxUniqueValues: 2, - maxUniqueValuesForGroupBy: 20, - } - checks := extractUniqueCountAlertChecks(alert) - - updatedAlert := uniqueCountAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - uniqueCountKey: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidUniqueCountTimeFrames), - groupByKey: "metadata.name", - maxUniqueValues: 2, - maxUniqueValuesForGroupBy: 20, - } - updatedAlertChecks := extractUniqueCountAlertChecks(updatedAlert) - updatedAlertChecks = updatedAlertChecks[:len(updatedAlertChecks)-1] // remove group_by check - +func TestAccCoralogixResourceAlert_logs_more_than_usual(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertUniqueCount(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsMoreThanUsual(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-more-than-usual alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-more-than-usual alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P4"), + resource.TestCheckResourceAttr(alertResourceName, "labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "labels.security_severity", "high"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.advanced_target_settings.*", + map[string]string{ + "retriggering_period.minutes": "1", + "notify_on": "Triggered and Resolved", + "recipients.#": "1", + "recipients.0": "example@coralogix.com", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.advanced_target_settings.*", + map[string]string{ + "integration_id": "17730", + "notify_on": "Triggered and Resolved", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered and Resolved"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "1"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Wednesday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "8"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "value": "subsystem-name", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.label_filters.severities.*", "Warning"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than_usual.time_window.specific_value", "10_MINUTES"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_more_than_usual.notification_payload_filter.*", "coralogix.metadata.sdkId"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_more_than_usual.notification_payload_filter.*", "coralogix.metadata.sdkName"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_more_than_usual.notification_payload_filter.*", "coralogix.metadata.sdkVersion"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than_usual.minimum_threshold", "2"), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertUniqueCount(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsMoreThanUsualUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-more-than-usual alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-more-than-usual alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P1"), + resource.TestCheckResourceAttr(alertResourceName, "labels.#", "0"), + resource.TestCheckResourceAttr(alertResourceName, "notification_group.advanced_target_settings.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.advanced_target_settings.*", + map[string]string{ + "integration_id": "17730", + "notify_on": "Triggered and Resolved", + "retriggering_period.minutes": "10", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered and Resolved"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.lucene_query", "message:\"updated_error\""), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "value": "subsystem-name", + }, + ), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.label_filters.severities.*", "Warning"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_more_than_usual.logs_filter.lucene_filter.label_filters.severities.*", "Error"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_more_than_usual.time_window.specific_value", "1_HOUR"), + ), }, }, - }) + }, + ) } -func TestAccCoralogixResourceAlert_timeRelative(t *testing.T) { - alert := timeRelativeAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - ratioThreshold: acctest.RandIntRange(0, 1000), - relativeTimeWindow: selectRandomlyFromSlice(alertValidRelativeTimeFrames), - groupBy: []string{"EventType"}, - ignoreInfinity: randBool(), - } - checks := extractTimeRelativeChecks(alert) - - updatedAlert := timeRelativeAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - ratioThreshold: acctest.RandIntRange(0, 1000), - relativeTimeWindow: selectRandomlyFromSlice(alertValidRelativeTimeFrames), - groupBy: []string{"EventType"}, - ignoreInfinity: randBool(), - } - updatedAlertChecks := extractTimeRelativeChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_less_than_usual(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertTimeRelative(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsLessThanUsual(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-less-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-less-than alert example from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "labels.security_severity", "high"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "recipients.#": "2", + "recipients.0": "example2@coralogix.com", + "recipients.1": "example@coralogix.com", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "integration_id": "17730", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered and Resolved"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "1"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Wednesday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "10"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.time_window.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "NOT", + "value": "application_name", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "STARTS_WITH", + "value": "subsystem-name", + }, + ), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.severities.*", "Warning"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.severities.*", "Error"), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertTimeRelative(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsLessThanUsualUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-less-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-less-than alert example from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "notification_group.simple_target_settings.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "integration_id": "17730", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.notify_on", "Triggered Only"), + resource.TestCheckResourceAttr(alertResourceName, "incidents_settings.retriggering_period.minutes", "10"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.days_of_week.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Monday"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "schedule.active_on.days_of_week.*", "Thursday"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.hours", "8"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.start_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.hours", "20"), + resource.TestCheckResourceAttr(alertResourceName, "schedule.active_on.end_time.minutes", "30"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.threshold", "20"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.time_window.specific_value", "2_HOURS"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.lucene_query", "message:\"error\""), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_less_than.logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "NOT", + "value": "application_name", + }, + ), + ), }, }, - }) + }, + ) } -func TestAccCoralogixResourceAlert_metricLucene(t *testing.T) { - alert := metricLuceneAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - metricField: "subsystem", - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - threshold: acctest.RandIntRange(0, 1000), - arithmeticOperator: selectRandomlyFromSlice(alertValidArithmeticOperators), - } - if alert.arithmeticOperator == "Percentile" { - alert.arithmeticOperatorModifier = acctest.RandIntRange(0, 100) - } - checks := extractLuceneMetricChecks(alert) - - updatedAlert := metricLuceneAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - metricField: "subsystem", - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - threshold: acctest.RandIntRange(0, 1000), - arithmeticOperator: selectRandomlyFromSlice(alertValidArithmeticOperators), - } - if updatedAlert.arithmeticOperator == "Percentile" { - updatedAlert.arithmeticOperatorModifier = acctest.RandIntRange(0, 100) - } - updatedAlertChecks := extractLuceneMetricChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_ratio_more_than(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertMetricLucene(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsRatioMoreThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-ratio-more-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-ratio-more-than alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P1"), + resource.TestCheckResourceAttr(alertResourceName, "group_by.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "coralogix.metadata.alert_id"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "coralogix.metadata.alert_name"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "recipients.#": "1", + "recipients.0": "example@coralogix.com", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.denominator_alias", "denominator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_alias", "numerator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.time_window.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.denominator_logs_filter.lucene_filter.lucene_query", "mod_date:[20020101 TO 20030101]"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.denominator_logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.denominator_logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "value": "subsystem-name", + }, + ), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_ratio_more_than.denominator_logs_filter.lucene_filter.label_filters.severities.*", "Warning"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.lucene_query", "mod_date:[20030101 TO 20040101]"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "value": "subsystem-name", + }, + ), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.severities.*", "Error"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.group_by_for", "Both"), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertMetricLucene(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsRatioMoreThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-ratio-more-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-ratio-more-than alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "group_by.#", "3"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "coralogix.metadata.alert_id"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "coralogix.metadata.alert_name"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "coralogix.metadata.alert_description"), + resource.TestCheckResourceAttr(alertResourceName, "notification_group.simple_target_settings.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "integration_id": "17730", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notification_group.simple_target_settings.*", + map[string]string{ + "recipients.#": "1", + "recipients.0": "example@coralogix.com", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.denominator_alias", "updated-denominator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_alias", "updated-numerator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.time_window.specific_value", "1_HOUR"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.threshold", "120"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.group_by_for", "Numerator Only"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.denominator_logs_filter.lucene_filter.lucene_query", "mod_date:[20030101 TO 20040101]"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.denominator_logs_filter.lucene_filter.label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "value": "nginx", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.denominator_logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "value": "subsystem-name", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.lucene_query", "mod_date:[20040101 TO 20050101]"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.application_name.#", "0"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.severities.#", "0"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.subsystem_name.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "ENDS_WITH", + "value": "updated-subsystem-name", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.logs_ratio_more_than.numerator_logs_filter.lucene_filter.label_filters.subsystem_name.*", + map[string]string{ + "operation": "NOT", + "value": "subsystem-name", + }, + ), + ), }, }, - }) + }, + ) } -func TestAccCoralogixResourceAlert_metricPromql(t *testing.T) { - alert := metricPromqlAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - threshold: acctest.RandIntRange(0, 1000), - nonNullPercentage: 10 * acctest.RandIntRange(0, 10), - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - condition: "less_than", - } - checks := extractMetricPromqlAlertChecks(alert) - - updatedAlert := metricPromqlAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - threshold: acctest.RandIntRange(0, 1000), - nonNullPercentage: 10 * acctest.RandIntRange(0, 10), - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - condition: "more_than", - } - updatedAlertChecks := extractMetricPromqlAlertChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_ratio_less_than(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertMetricPromql(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsRatioLessThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-ratio-less-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-ratio-less-than alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "group_by.#", "2"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "coralogix.metadata.alert_id"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "coralogix.metadata.alert_name"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.denominator_alias", "denominator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.numerator_alias", "numerator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.time_window.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.group_by_for", "Denominator Only"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.undetected_values_management.trigger_undetected_values", "false"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.undetected_values_management.auto_retire_timeframe", "Never"), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertMetricPromql(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsRatioLessThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-ratio-less-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-ratio-less-than alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "group_by.#", "0"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.denominator_alias", "updated-denominator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.numerator_alias", "updated-numerator"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.time_window.specific_value", "2_HOURS"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.threshold", "20"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.undetected_values_management.trigger_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_ratio_less_than.undetected_values_management.auto_retire_timeframe", "6_Hours"), + ), }, }, }) } -func TestAccCoralogixResourceAlert_tracing(t *testing.T) { - alert := tracingAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - conditionLatencyMs: math.Round(randFloat()*1000) / 1000, - occurrencesThreshold: acctest.RandIntRange(1, 10000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - groupBy: []string{"EventType"}, - } - checks := extractTracingAlertChecks(alert) - - updatedAlert := tracingAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - conditionLatencyMs: math.Round(randFloat()*1000) / 1000, - occurrencesThreshold: acctest.RandIntRange(1, 10000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - groupBy: []string{"EventType"}, - } - updatedAlertChecks := extractTracingAlertChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_new_value(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertTracing(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsNewValue(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-new-value alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-new-value alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_new_value.notification_payload_filter.#", "3"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_new_value.notification_payload_filter.*", "coralogix.metadata.sdkId"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_new_value.notification_payload_filter.*", "coralogix.metadata.sdkName"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "type_definition.logs_new_value.notification_payload_filter.*", "coralogix.metadata.sdkVersion"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_new_value.time_window.specific_value", "24_HOURS"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_new_value.keypath_to_track", "remote_addr_geoip.country_name"), + ), }, { ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertTracing(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsNewValueUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-new-value alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-new-value alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_new_value.notification_payload_filter.#", "0"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_new_value.time_window.specific_value", "12_HOURS"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_new_value.keypath_to_track", "remote_addr_geoip.city_name"), + ), }, }, - }) + }, + ) } -func TestAccCoralogixResourceAlert_flow(t *testing.T) { - resourceName := "coralogix_alert.test" - - alert := flowAlertTestParams{ - name: acctest.RandomWithPrefix("tf-acc-test"), - description: acctest.RandomWithPrefix("tf-acc-test"), - emailRecipients: []string{"user@example.com"}, - webhookID: "10761", - severity: selectRandomlyFromSlice(alertValidSeverities), - activeWhen: randActiveWhen(), - notifyEveryMin: acctest.RandIntRange(1500 /*to avoid notify_every < condition.0.time_window*/, 3600), - notifyOn: "Triggered_only", - } - checks := extractFlowAlertChecks(alert) - - updatedAlert := flowAlertTestParams{ - name: acctest.RandomWithPrefix("tf-acc-test"), - description: acctest.RandomWithPrefix("tf-acc-test"), - emailRecipients: []string{"user@example.com"}, - webhookID: "10761", - severity: selectRandomlyFromSlice(alertValidSeverities), - activeWhen: randActiveWhen(), - notifyEveryMin: acctest.RandIntRange(1500 /*to avoid notify_every < condition.0.time_window*/, 3600), - notifyOn: "Triggered_only", - } - updatedAlertChecks := extractFlowAlertChecks(updatedAlert) - +func TestAccCoralogixResourceAlert_logs_unique_count(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertFLow(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), + Config: testAccCoralogixResourceAlertLogsUniqueCount(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-unique-count alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-unique-count alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "group_by.#", "1"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "group_by.*", "remote_addr_geoip.city_name"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_unique_count.unique_count_keypath", "remote_addr_geoip.country_name"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_unique_count.max_unique_count", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_unique_count.time_window.specific_value", "5_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_unique_count.max_unique_count_per_group_by_key", "500"), + ), }, { - ResourceName: resourceName, + ResourceName: alertResourceName, ImportState: true, }, { - Config: testAccCoralogixResourceAlertFLow(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + Config: testAccCoralogixResourceAlertLogsUniqueCountUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-unique-count alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-unique-count alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "group_by.#", "0"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_unique_count.unique_count_keypath", "remote_addr_geoip.city_name"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_unique_count.max_unique_count", "5"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_unique_count.time_window.specific_value", "20_MINUTES"), + ), }, }, - }) + }, + ) } -func getRandomAlert() *alertCommonTestParams { - return &alertCommonTestParams{ - name: acctest.RandomWithPrefix("tf-acc-test"), - description: acctest.RandomWithPrefix("tf-acc-test"), - webhookID: "10761", - emailRecipients: []string{"user@example.com"}, - searchQuery: "remote_addr_enriched:/.*/", - severity: selectRandomlyFromSlice(alertValidSeverities), - activeWhen: randActiveWhen(), - notifyEveryMin: acctest.RandIntRange(2160 /*to avoid notify_every < condition.0.time_window*/, 3600), - notifyOn: selectRandomlyFromSlice(validNotifyOn), - alertFilters: alertFilters{ - severities: selectManyRandomlyFromSlice(alertValidLogSeverities), +func TestAccCoralogixResourceAlert_logs_time_relative_more_than(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertLogsTimeRelativeMoreThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-time-relative-more-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-time-relative-more-than alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P4"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_more_than.threshold", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_more_than.compared_to", "Same Hour Yesterday"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_more_than.ignore_infinity", "true"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertLogsTimeRelativeMoreThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-time-relative-more-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-time-relative-more-than alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_more_than.threshold", "50"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_more_than.compared_to", "Same Day Last Week"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_more_than.ignore_infinity", "false"), + ), + }, }, - } + }, + ) } -func extractStandardAlertChecks(alert standardAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "standard") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "meta_labels.alert_type", "security"), - resource.TestCheckResourceAttr(alertResourceName, "meta_labels.security_severity", "high"), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.threshold", strconv.Itoa(alert.occurrencesThreshold)), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.less_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "true"), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.manage_undetected_values.0.auto_retire_ratio", alert.deadmanRatio), - ) - return checks -} - -func extractRatioAlertChecks(alert ratioAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "ratio.0.query_1") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.query_2.0.search_query", alert.q2SearchQuery), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.more_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.ratio_threshold", fmt.Sprintf("%f", alert.ratio)), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.group_by_q1", "true"), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.ignore_infinity", fmt.Sprintf("%t", alert.ignoreInfinity)), +func TestAccCoralogixResourceAlert_logs_time_relative_less_than(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertLogsTimeRelativeLessThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-time-relative-more-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-time-relative-more-than alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P4"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.threshold", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.compared_to", "Same Hour Yesterday"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.ignore_infinity", "true"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertLogsTimeRelativeLessThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "logs-time-relative-more-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of logs-time-relative-more-than alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.threshold", "50"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.compared_to", "Same Day Last Week"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.ignore_infinity", "false"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.undetected_values_management.trigger_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.logs_time_relative_less_than.undetected_values_management.auto_retire_timeframe", "6_Hours"), + ), + }, + }, + }, ) - checks = appendSeveritiesCheck(checks, alert.alertFilters.severities, "ratio.0.query_2") - - return checks } -func extractNewValueChecks(alert newValueAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "new_value") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "new_value.0.condition.0.key_to_track", alert.keyToTrack), - resource.TestCheckResourceAttr(alertResourceName, "new_value.0.condition.0.time_window", alert.timeWindow), +func TestAccCoralogixResourceAlert_metric_more_than(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricMoreThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-more-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-more-than alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.for_over_pct", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.missing_values.min_non_null_values_pct", "50"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricMoreThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-more-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-more-than alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P4"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.threshold", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.for_over_pct", "15"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.of_the_last.specific_value", "1_HOUR"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than.missing_values.replace_with_zero", "true"), + ), + }, + }, + }, ) - return checks } -func extractUniqueCountAlertChecks(alert uniqueCountAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "unique_count") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.unique_count_key", alert.uniqueCountKey), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.unique_count_key", alert.uniqueCountKey), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.max_unique_values", strconv.Itoa(alert.maxUniqueValues)), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.max_unique_values_for_group_by", strconv.Itoa(alert.maxUniqueValuesForGroupBy)), +func TestAccCoralogixResourceAlert_metric_less_than(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricLessThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-less-than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-less-than alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P4"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.for_over_pct", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.missing_values.replace_with_zero", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.undetected_values_management.trigger_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.undetected_values_management.auto_retire_timeframe", "5_Minutes"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricLessThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-less-than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-less-than alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.threshold", "5"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.for_over_pct", "15"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.missing_values.min_non_null_values_pct", "50"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.undetected_values_management.trigger_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than.undetected_values_management.auto_retire_timeframe", "5_Minutes"), + ), + }, + }, + }, ) - return checks } -func extractTimeRelativeChecks(alert timeRelativeAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "time_relative") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.ratio_threshold", strconv.Itoa(alert.ratioThreshold)), - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.relative_time_window", alert.relativeTimeWindow), - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.ignore_infinity", fmt.Sprintf("%t", alert.ignoreInfinity)), +func TestAccCoralogixResourceAlert_metric_less_than_usual(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricsLessThanUsual(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-less-than-usual alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-less-than-usual alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.threshold", "20"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.for_over_pct", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.of_the_last.specific_value", "12_HOURS"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.min_non_null_values_pct", "15"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricsLessThanUsualUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-less-than-usual alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-less-than-usual alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.for_over_pct", "15"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_usual.min_non_null_values_pct", "10"), + ), + }, + }, + }, ) +} - return checks -} - -func extractLuceneMetricChecks(alert metricLuceneAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.search_query", alert.searchQuery), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.metric_field", alert.metricField), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.arithmetic_operator", alert.arithmeticOperator), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.less_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.threshold", strconv.Itoa(alert.threshold)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.arithmetic_operator_modifier", strconv.Itoa(alert.arithmeticOperatorModifier)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.sample_threshold_percentage", strconv.Itoa(alert.sampleThresholdPercentage)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "false"), - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractMetricPromqlAlertChecks(alert metricPromqlAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.search_query", "http_requests_total{status!~\"4..\"}"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.threshold", strconv.Itoa(alert.threshold)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.sample_threshold_percentage", strconv.Itoa(alert.sampleThresholdPercentage)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.min_non_null_values_percentage", strconv.Itoa(alert.nonNullPercentage)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.time_window", alert.timeWindow), - } - if alert.condition == "less_than" { - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.less_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "true"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.manage_undetected_values.0.auto_retire_ratio", "Never"), - ) - } else { - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.more_than", "true"), - ) - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractTracingAlertChecks(alert tracingAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.latency_threshold_milliseconds", fmt.Sprintf("%.3f", alert.conditionLatencyMs)), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.more_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.threshold", strconv.Itoa(alert.occurrencesThreshold)), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.applications.0", "nginx"), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.subsystems.0", "subsystem-name"), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.tag_filter.0.field", "Status"), - resource.TestCheckTypeSetElemAttr(alertResourceName, "tracing.0.tag_filter.0.values.*", "filter:contains:400"), - resource.TestCheckTypeSetElemAttr(alertResourceName, "tracing.0.tag_filter.0.values.*", "500"), - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractFlowAlertChecks(alert flowAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.notify_on", alert.notifyOn), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.retriggering_period_minutes", strconv.Itoa(alert.notifyEveryMin)), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.0.sub_alerts.0.operator", "OR"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.0.next_operator", "OR"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.sub_alerts.0.operator", "AND"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.sub_alerts.0.flow_alert.0.not", "true"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.next_operator", "AND"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.time_window.0.minutes", "20"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.group_by.0", "coralogix.metadata.sdkId"), - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractCommonChecks(alert *alertCommonTestParams, alertType string) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.notify_on", alert.notifyOn), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.retriggering_period_minutes", strconv.Itoa(alert.notifyEveryMin)), - resource.TestCheckResourceAttr(alertResourceName, fmt.Sprintf("%s.0.search_query", alertType), alert.searchQuery), - } +func TestAccCoralogixResourceAlert_metric_more_than_usual(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricsMoreThanUsual(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric_more_than_usual alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric_more_than_usual alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.for_over_pct", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.min_non_null_values_pct", "10"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricsMoreThanUsualUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric_more_than_usual alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric_more_than_usual alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.threshold", "20"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.for_over_pct", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_usual.min_non_null_values_pct", "10"), + ), + }, + }, + }, + ) +} - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) +func TestAccCoralogixResourceAlert_metric_less_than_or_equals(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricLessThanOrEquals(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-less-than-or-equals alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-less-than-or-equals alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.for_over_pct", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.missing_values.replace_with_zero", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.undetected_values_management.trigger_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.undetected_values_management.auto_retire_timeframe", "5_Minutes"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricLessThanOrEqualsUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-less-than-or-equals alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-less-than-or-equals alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.threshold", "5"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.for_over_pct", "15"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.missing_values.min_non_null_values_pct", "50"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.undetected_values_management.trigger_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_less_than_or_equals.undetected_values_management.auto_retire_timeframe", "5_Minutes"), + ), + }, + }, + }, + ) +} - checks = appendSeveritiesCheck(checks, alert.alertFilters.severities, alertType) +func TestAccCoralogixResourceAlert_metric_more_than_or_equals(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricMoreThanOrEquals(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-more-than-or-equals alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-more-than-or-equals alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.threshold", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.for_over_pct", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.of_the_last.specific_value", "10_MINUTES"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.missing_values.replace_with_zero", "true"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricMoreThanOrEqualsUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "metric-more-than-or-equals alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of metric-more-than-or-equals alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P4"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.metric_filter.promql", "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.threshold", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.for_over_pct", "15"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.of_the_last.specific_value", "1_HOUR"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.metric_more_than_or_equals.missing_values.replace_with_zero", "true"), + ), + }, + }, + }, + ) +} - return checks +func TestAccCoralogixResourceAlert_tracing_immediate(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertTracingImmediate(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "tracing_immediate alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of tracing_immediate alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.latency_threshold_ms", "100"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.application_name.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "values.#": "2", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "STARTS_WITH", + "values.#": "1", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.subsystem_name.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "values.#": "1", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.operation_name.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.operation_name.*", + map[string]string{ + "operation": "IS", + "values.#": "1", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.span_fields.*", + map[string]string{ + "key": "status", + "filter_type.operation": "IS", + "filter_type.values.#": "1", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.span_fields.*", + map[string]string{ + "key": "status", + "filter_type.operation": "STARTS_WITH", + "filter_type.values.#": "2", + }, + ), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertTracingImmediateUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "tracing_immediate alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of tracing_immediate alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.latency_threshold_ms", "200"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.application_name.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "values.#": "2", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "STARTS_WITH", + "values.#": "1", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.subsystem_name.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.subsystem_name.*", + map[string]string{ + "operation": "IS", + "values.#": "1", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.operation_name.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.operation_name.*", + map[string]string{ + "operation": "IS", + "values.#": "1", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.span_fields.#", "3"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.span_fields.*", + map[string]string{ + "key": "status", + "filter_type.operation": "STARTS_WITH", + "filter_type.values.#": "2", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.span_fields.*", + map[string]string{ + "key": "status", + "filter_type.operation": "ENDS_WITH", + "filter_type.values.#": "2", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_immediate.tracing_query.tracing_label_filters.span_fields.*", + map[string]string{ + "key": "status", + "filter_type.operation": "IS", + "filter_type.values.#": "1", + }, + ), + ), + }, + }, + }) } -func appendSeveritiesCheck(checks []resource.TestCheckFunc, severities []string, alertType string) []resource.TestCheckFunc { - for _, s := range severities { - checks = append(checks, - resource.TestCheckTypeSetElemAttr(alertResourceName, fmt.Sprintf("%s.0.severities.*", alertType), s)) - } - return checks +func TestAccCoralogixResourceAlert_tracing_more_than(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertTracingMoreThan(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "tracing_more_than alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of tracing_more_than alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_more_than.tracing_query.latency_threshold_ms", "100"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_more_than.tracing_query.tracing_label_filters.application_name.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_more_than.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "values.#": "2", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_more_than.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "STARTS_WITH", + "values.#": "1", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_more_than.span_amount", "5"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_more_than.time_window.specific_value", "10_MINUTES"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertTracingMoreThanUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "tracing_more_than alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of tracing_more_than alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_more_than.tracing_query.latency_threshold_ms", "200"), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_more_than.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "IS", + "values.#": "2", + }, + ), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "type_definition.tracing_more_than.tracing_query.tracing_label_filters.application_name.*", + map[string]string{ + "operation": "STARTS_WITH", + "values.#": "1", + }, + ), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_more_than.span_amount", "5"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.tracing_more_than.time_window.specific_value", "1_HOUR"), + ), + }, + }, + }) } -func appendSchedulingChecks(checks []resource.TestCheckFunc, daysOfWeek []string, startTime, endTime string) []resource.TestCheckFunc { - for _, d := range daysOfWeek { - checks = append(checks, resource.TestCheckTypeSetElemAttr(alertResourceName, "scheduling.0.time_frame.0.days_enabled.*", d)) - } - checks = append(checks, resource.TestCheckResourceAttr(alertResourceName, "scheduling.0.time_frame.0.start_time", startTime)) - checks = append(checks, resource.TestCheckResourceAttr(alertResourceName, "scheduling.0.time_frame.0.end_time", endTime)) - return checks +func TestAccCoralogixResourceAlert_flow(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertFlow(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "flow alert example"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of flow alert from terraform"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.#", "1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.#", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.0.alerts_op", "OR"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.0.next_op", "AND"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.0.alert_defs.#", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.timeframe_ms", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.timeframe_type", "Up To"), + ), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertFlowUpdated(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertResourceName, "name", "flow alert example updated"), + resource.TestCheckResourceAttr(alertResourceName, "description", "Example of flow alert from terraform updated"), + resource.TestCheckResourceAttr(alertResourceName, "priority", "P3"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.#", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.#", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.0.alerts_op", "AND"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.0.next_op", "OR"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.flow_stages_groups.0.alert_defs.#", "2"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.timeframe_ms", "10"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.0.timeframe_type", "Up To"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.1.flow_stages_groups.#", "1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.1.flow_stages_groups.0.alerts_op", "AND"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.1.flow_stages_groups.0.next_op", "OR"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.1.flow_stages_groups.0.alert_defs.#", "1"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.1.timeframe_ms", "20"), + resource.TestCheckResourceAttr(alertResourceName, "type_definition.flow.stages.1.timeframe_type", "Up To"), + ), + }, + }, + }) } func testAccCheckAlertDestroy(s *terraform.State) error { @@ -672,13 +1312,13 @@ func testAccCheckAlertDestroy(s *terraform.State) error { continue } - req := &alertsv1.GetAlertByUniqueIdRequest{ + req := &alertsv3.GetAlertDefRequest{ Id: wrapperspb.String(rs.Primary.ID), } resp, err := client.GetAlert(ctx, req) if err == nil { - if resp.Alert.Id.Value == rs.Primary.ID { + if resp.GetAlertDef().Id.Value == rs.Primary.ID { return fmt.Errorf("alert still exists: %s", rs.Primary.ID) } } @@ -687,600 +1327,1653 @@ func testAccCheckAlertDestroy(s *terraform.State) error { return nil } -func testAccCoralogixResourceAlertStandard(a *standardAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" +func testAccCoralogixResourceAlertLogsImmediate() string { + return `resource "coralogix_alert" "test" { + name = "logs immediate alert" + description = "Example of logs immediate alert from terraform" + priority = "P1" - notifications_group { - notification { - integration_id = "%s" - } - notification { - email_recipients = %s - } + labels = { + alert_type = "security" + security_severity = "high" } - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } + notification_group = { + simple_target_settings = [ + { + recipients = ["example@coralogix.com"] + } + ] + } - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } + incidents_settings = { + notify_on = "Triggered and Resolved" + retriggering_period = { + minutes = 1 + } } - meta_labels = { - alert_type = "security" - security_severity = "high" - } + schedule = { + active_on = { + days_of_week = ["Wednesday", "Thursday"] + start_time = { + hours = 8 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } + } + } - standard { - severities = %s - search_query = "%s" - condition { - group_by = %s - less_than = true - threshold = %d - time_window = "%s" - manage_undetected_values { - enable_triggering_on_undetected_values = true - auto_retire_ratio = "%s" - } + type_definition = { + logs_immediate = { + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + } + } + } } } } -`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, sliceToString(a.groupBy), a.occurrencesThreshold, a.timeWindow, a.deadmanRatio) +` } -func testAccCoralogixResourceAlertRatio(a *ratioAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" +func testAccCoralogixResourceAlertLogsImmediateUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs immediate alert updated" + description = "Example of logs immediate alert from terraform updated" + priority = "P2" + + notification_group = { + advanced_target_settings = [ + { + retriggering_period = { + minutes = 10 + } + notify_on = "Triggered Only" + recipients = ["example@coralogix.com"] + } + ] + } - notifications_group { - notification { - integration_id = "%s" - } - notification { - email_recipients = %s + incidents_settings = { + notify_on = "Triggered and Resolved" + retriggering_period = { + minutes = 10 } } - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } + schedule = { + active_on = { + days_of_week = ["Wednesday", "Thursday"] + start_time = { + hours = 9 + minutes = 30 + } + end_time = { + hours = 21 + minutes = 30 + } + } } - ratio { - query_1 { - severities = %s - search_query = "%s" - } - query_2 { - severities = %s - search_query = "%s" - } - condition { - more_than = true - ratio_threshold = %f - time_window = "%s" - group_by = %s - group_by_q1 = true - ignore_infinity = %t + type_definition = { + logs_immediate = { + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + } + } + } } } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, sliceToString(a.q2Severities), a.q2SearchQuery, - a.ratio, a.timeWindow, sliceToString(a.groupBy), a.ignoreInfinity) -} - -func testAccCoralogixResourceAlertNewValue(a *newValueAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } +} +` +} - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } +func testAccCoralogixResourceAlertLogsMoreThan() string { + return `resource "coralogix_alert" "test" { + name = "logs-more-than alert example" + description = "Example of logs-more-than alert example from terraform" + priority = "P2" - new_value { - severities = %s - search_query = "%s" - condition { - key_to_track = "%s" - time_window = "%s" - } + labels = { + alert_type = "security" + security_severity = "high" } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, a.keyToTrack, a.timeWindow) -} - -func testAccCoralogixResourceAlertUniqueCount(a *uniqueCountAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - group_by_fields = %s - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } + notification_group = { + simple_target_settings = [ + { + integration_id = "17730" + }, + { + recipients = ["example@coralogix.com"] + } + ] } - unique_count { - severities = %s - search_query = "%s" - condition { - unique_count_key = "%s" - max_unique_values = %d - time_window = "%s" - group_by_key = "%s" - max_unique_values_for_group_by = %d + incidents_settings = { + notify_on = "Triggered and Resolved" + retriggering_period = { + minutes = 1 } } -}`, - a.name, a.description, a.severity, sliceToString([]string{a.groupByKey}), a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, sliceToString(a.severities), - a.searchQuery, a.uniqueCountKey, a.maxUniqueValues, a.timeWindow, a.groupByKey, a.maxUniqueValuesForGroupBy) -} - -func testAccCoralogixResourceAlertTimeRelative(a *timeRelativeAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } + schedule = { + active_on = { + days_of_week = ["Wednesday", "Thursday"] + start_time = { + hours = 8 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } + } } - time_relative { - severities = %s - search_query = "%s" - condition { - more_than = true - group_by = %s - ratio_threshold = %d - relative_time_window = "%s" - ignore_infinity = %t + type_definition = { + logs_more_than = { + threshold = 2 + time_window = { + specific_value = "10_MINUTES" + } + evaluation_window = "Dynamic" + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + } + ] + subsystem_name = [ + { + operation = "IS" + value = "subsystem-name" + } + ] + severities = ["Warning"] + } + } + } } } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, sliceToString(a.groupBy), a.ratioThreshold, a.relativeTimeWindow, a.ignoreInfinity) -} - -func testAccCoralogixResourceAlertMetricLucene(a *metricLuceneAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } +} +` +} - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } +func testAccCoralogixResourceAlertLogsMoreThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-more_-than alert example updated" + description = "Example of standard alert from terraform updated" + priority = "P3" + + labels = { + alert_type = "security" + security_severity = "low" } - metric { - lucene { - search_query = "%s" - condition { - metric_field = "%s" - arithmetic_operator = "%s" - less_than = true - threshold = %d - arithmetic_operator_modifier = %d - sample_threshold_percentage = %d - time_window = "%s" - group_by = %s - manage_undetected_values{ - enable_triggering_on_undetected_values = false - } + notification_group = { + simple_target_settings = [ + { + integration_id = "17730" } - } + ] } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, a.searchQuery, a.metricField, a.arithmeticOperator, - a.threshold, a.arithmeticOperatorModifier, a.sampleThresholdPercentage, a.timeWindow, sliceToString(a.groupBy)) -} - -func testAccCoralogixResourceAlertMetricPromql(a *metricPromqlAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d + incidents_settings = { + notify_on = "Triggered Only" + retriggering_period = { + minutes = 10 + } } - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } + schedule = { + active_on = { + days_of_week = ["Monday", "Thursday"] + start_time = { + hours = 8 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } + } } - metric { - promql { - search_query = "http_requests_total{status!~\"4..\"}" - condition { - %s = true - threshold = %d - sample_threshold_percentage = %d - time_window = "%s" - min_non_null_values_percentage = %d + type_definition = { + logs_more_than = { + threshold = 20 + time_window = { + specific_value = "2_HOURS" + } + evaluation_window = "Rolling" + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + }, + { + operation = "NOT" + value = "application_name" + } + ] + } + } } } } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, a.condition, a.threshold, a.sampleThresholdPercentage, - a.timeWindow, a.nonNullPercentage) -} - -func testAccCoralogixResourceAlertTracing(a *tracingAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" +} +` +} + +func testAccCoralogixResourceAlertLogsLessThan() string { + return `resource "coralogix_alert" "test" { + name = "logs-less-than alert example" + description = "Example of logs-less-than alert example from terraform" + priority = "P2" + + labels = { + alert_type = "security" + security_severity = "high" + } + + notification_group = { + simple_target_settings = [ + { + integration_id = "17730" + }, + { + recipients = ["example@coralogix.com"] + } + ] + } + + incidents_settings = { + notify_on = "Triggered and Resolved" + retriggering_period = { + minutes = 1 } - notification{ - email_recipients = %s - } } - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } + schedule = { + active_on = { + days_of_week = ["Wednesday", "Thursday"] + start_time = { + hours = 8 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } + } + } + + type_definition = { + logs_less_than = { + threshold = 2 + time_window = { + specific_value = "10_MINUTES" + } + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + } + ] + subsystem_name = [ + { + operation = "IS" + value = "subsystem-name" + } + ] + severities= ["Warning"] + } + } + } + } + } + } +` +} + +func testAccCoralogixResourceAlertLogsLessThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-less-than alert example updated" + description = "Example of logs-less-than alert example from terraform updated" + priority = "P3" + + labels = { + alert_type = "security" + security_severity = "low" + } + + notification_group = { + advanced_target_settings = [ + { + integration_id = "17730" + } + ] + } + + incidents_settings = { + notify_on = "Triggered Only" + retriggering_period = { + minutes = 10 + } + } + + schedule = { + active_on = { + days_of_week = ["Monday", "Thursday"] + start_time = { + hours = 8 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } + } + } + + type_definition = { + logs_less_than = { + threshold = 20 + time_window = { + specific_value = "2_HOURS" + } + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + }, + { + operation = "NOT" + value = "application_name" + } + ] + } + } + } + } + } +} +` +} + +func testAccCoralogixResourceAlertLogsMoreThanUsual() string { + return `resource "coralogix_alert" "test" { + name = "logs-more-than-usual alert example" + description = "Example of logs-more-than-usual alert from terraform" + priority = "P4" + + labels = { + alert_type = "security" + security_severity = "high" + } + + notification_group = { + advanced_target_settings = [ + { + integration_id = "17730" + notify_on = "Triggered and Resolved" + }, + { + retriggering_period = { + minutes = 1 + } + notify_on = "Triggered and Resolved" + recipients = ["example@coralogix.com"] + } + ] } - tracing { - latency_threshold_milliseconds = %f - applications = ["nginx"] - subsystems = ["subsystem-name"] - tag_filter { - field = "Status" - values = ["filter:contains:400", "500"] + incidents_settings = { + notify_on = "Triggered and Resolved" + retriggering_period = { + minutes = 1 } + } - condition { - more_than = true - time_window = "%s" - threshold = %d + schedule = { + active_on = { + days_of_week = ["Wednesday", "Thursday"] + start_time = { + hours = 8 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } } } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - a.conditionLatencyMs, a.timeWindow, a.occurrencesThreshold) + + type_definition = { + logs_more_than_usual = { + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + } + ] + subsystem_name = [ + { + operation = "IS" + value = "subsystem-name" + } + ] + severities = ["Warning"] + } + } + } + notification_payload_filter = [ + "coralogix.metadata.sdkId", "coralogix.metadata.sdkName", "coralogix.metadata.sdkVersion" + ] + time_window = { + specific_value = "10_MINUTES" + } + minimum_threshold = 2 + } + } +} +` } -func testAccCoralogixResourceAlertFLow(a *flowAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "standard_alert" { - name = "standard" - severity = "Info" +func testAccCoralogixResourceAlertLogsMoreThanUsualUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-more-than-usual alert example updated" + description = "Example of logs-more-than-usual alert from terraform updated" + priority = "P1" + + notification_group = { + advanced_target_settings = [ + { + integration_id = "17730" + notify_on = "Triggered and Resolved" + } + ] + } - notifications_group { - notification { - email_recipients = ["example@coralogix.com"] - retriggering_period_minutes = 1 - notify_on = "Triggered_only" - } - } + type_definition = { + logs_more_than_usual = { + logs_filter = { + lucene_filter = { + lucene_query = "message:\"updated_error\"" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + } + ] + subsystem_name = [ + { + operation = "IS" + value = "subsystem-name" + } + ] + severities = ["Warning", "Error"] + } + } + } + time_window = { + specific_value = "1_HOUR" + } + minimum_threshold = 20 + } + } +} +` +} - standard { - condition { - more_than = true - threshold = 5 - time_window = "30Min" - group_by = ["coralogix.metadata.sdkId"] - } +func testAccCoralogixResourceAlertLogsLessThanUsual() string { + return `resource "coralogix_alert" "test" { + name = "logs-less-than alert example" + description = "Example of logs-less-than alert example from terraform" + priority = "P2" + + labels = { + alert_type = "security" + security_severity = "high" + } + + notification_group = { + simple_target_settings = [ + { + recipients = ["example@coralogix.com", "example2@coralogix.com"] + }, + { + integration_id = "17730" + } + ] + } + + incidents_settings = { + notify_on = "Triggered and Resolved" + retriggering_period = { + minutes = 1 + } + } + + schedule = { + active_on = { + days_of_week = ["Wednesday", "Thursday"] + start_time = { + hours = 10 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } + } + } + + type_definition = { + logs_less_than = { + threshold = 2 + time_window = { + specific_value = "10_MINUTES" + } + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + application_name = [ + { + operation = "NOT" + value = "application_name" + } + ] + subsystem_name = [ + { + operation = "STARTS_WITH" + value = "subsystem-name" + } + ] + severities = ["Warning", "Error"] + } + } + } + } + } } + ` } - resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" +func testAccCoralogixResourceAlertLogsLessThanUsualUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-less-than alert example updated" + description = "Example of logs-less-than alert example from terraform updated" + priority = "P3" + + notification_group = { + simple_target_settings = [ + { + integration_id = "17730" } - notification{ - email_recipients = %s - } - } + ] + } - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } + incidents_settings = { + notify_on = "Triggered Only" + retriggering_period = { + minutes = 10 + } + } + + schedule = { + active_on = { + days_of_week = ["Monday", "Thursday"] + start_time = { + hours = 8 + minutes = 30 + } + end_time = { + hours = 20 + minutes = 30 + } + } + } - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } + type_definition = { + logs_less_than = { + threshold = 20 + time_window = { + specific_value = "2_HOURS" + } + logs_filter = { + lucene_filter = { + lucene_query = "message:\"error\"" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + }, + { + operation = "NOT" + value = "application_name" + } + ] + } + } + } + } + } } + ` +} - flow { - stage { - group { - sub_alerts { - operator = "OR" - flow_alert{ - user_alert_id = coralogix_alert.standard_alert.id - } - } - next_operator = "OR" +func testAccCoralogixResourceAlertLogsRatioMoreThan() string { + return `resource "coralogix_alert" "test" { + name = "logs-ratio-more-than alert example" + description = "Example of logs-ratio-more-than alert from terraform" + priority = "P1" + group_by = ["coralogix.metadata.alert_id", "coralogix.metadata.alert_name"] + + notification_group = { + simple_target_settings = [ + { + recipients = ["example@coralogix.com"] } - group { - sub_alerts { - operator = "AND" - flow_alert{ - not = true - user_alert_id = coralogix_alert.standard_alert.id + ] + } + + type_definition = { + logs_ratio_more_than = { + denominator_alias = "denominator" + denominator_logs_filter = { + lucene_filter = { + lucene_query = "mod_date:[20020101 TO 20030101]" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + } + ] + subsystem_name = [ + { + operation = "IS" + value = "subsystem-name" + } + ] + severities = ["Warning"] } } - next_operator = "AND" } - time_window { - minutes = 20 + numerator_alias = "numerator" + numerator_logs_filter = { + lucene_filter = { + lucene_query = "mod_date:[20030101 TO 20040101]" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + } + ] + subsystem_name = [ + { + operation = "IS" + value = "subsystem-name" + } + ] + severities = ["Error"] + } + } + } + time_window = { + specific_value = "10_MINUTES" } + threshold = 2 } - stage { - group { - sub_alerts { - operator = "AND" - flow_alert { - user_alert_id = coralogix_alert.standard_alert.id + } +} +` +} + +func testAccCoralogixResourceAlertLogsRatioMoreThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-ratio-more-than alert example updated" + description = "Example of logs-ratio-more-than alert from terraform updated" + priority = "P2" + group_by = ["coralogix.metadata.alert_id", "coralogix.metadata.alert_name", "coralogix.metadata.alert_description"] + + notification_group = { + simple_target_settings = [ + { + recipients = ["example@coralogix.com"] + }, + { + integration_id = "17730" + } + ] + } + + type_definition = { + logs_ratio_more_than = { + denominator_alias = "updated-denominator" + denominator_logs_filter = { + lucene_filter = { + lucene_query = "mod_date:[20030101 TO 20040101]" + label_filters = { + application_name = [ + { + operation = "IS" + value = "nginx" + } + ] + subsystem_name = [ + { + operation = "IS" + value = "subsystem-name" + } + ] + severities = ["Warning"] } - flow_alert { - not = true - user_alert_id = coralogix_alert.standard_alert.id + } + } + numerator_alias = "updated-numerator" + numerator_logs_filter = { + lucene_filter = { + lucene_query = "mod_date:[20040101 TO 20050101]" + label_filters = { + subsystem_name = [ + { + operation = "ENDS_WITH" + value = "updated-subsystem-name" + }, + { + operation = "NOT" + value = "subsystem-name" + } + ] } } - next_operator = "OR" } + time_window = { + specific_value = "1_HOUR" + } + threshold = 120 + group_by_for = "Numerator Only" } - group_by = ["coralogix.metadata.sdkId"] } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds) +} +` } -type standardAlertTestParams struct { - groupBy []string - occurrencesThreshold int - timeWindow string - deadmanRatio string - alertCommonTestParams +func testAccCoralogixResourceAlertLogsRatioLessThan() string { + return `resource "coralogix_alert" "test" { + name = "logs-ratio-less-than alert example" + description = "Example of logs-ratio-less-than alert from terraform" + priority = "P3" + + group_by = ["coralogix.metadata.alert_id", "coralogix.metadata.alert_name"] + type_definition = { + logs_ratio_less_than = { + numerator_alias = "numerator" + denominator_alias = "denominator" + threshold = 2 + time_window = { + specific_value = "10_MINUTES" + } + group_by_for = "Denominator Only" + } + } +} +` } -type ratioAlertTestParams struct { - q2Severities, groupBy []string - ratio float64 - timeWindow, q2SearchQuery string - ignoreInfinity bool - alertCommonTestParams +func testAccCoralogixResourceAlertLogsRatioLessThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-ratio-less-than alert example updated" + description = "Example of logs-ratio-less-than alert from terraform updated" + priority = "P2" + + type_definition = { + logs_ratio_less_than = { + numerator_alias = "updated-numerator" + denominator_alias = "updated-denominator" + threshold = 20 + time_window = { + specific_value = "2_HOURS" + } + undetected_values_management = { + trigger_undetected_values = true + auto_retire_timeframe = "6_Hours" + } + } + } +} +` } -type newValueAlertTestParams struct { - keyToTrack, timeWindow string - alertCommonTestParams +func testAccCoralogixResourceAlertLogsNewValue() string { + return `resource "coralogix_alert" "test" { + name = "logs-new-value alert example" + description = "Example of logs-new-value alert from terraform" + priority = "P2" + + type_definition = { + logs_new_value = { + notification_payload_filter = ["coralogix.metadata.sdkId", "coralogix.metadata.sdkName", "coralogix.metadata.sdkVersion"] + time_window = { + specific_value = "24_HOURS" + } + keypath_to_track = "remote_addr_geoip.country_name" + } + } +} +` } -type uniqueCountAlertTestParams struct { - uniqueCountKey, timeWindow, groupByKey string - maxUniqueValues, maxUniqueValuesForGroupBy int - alertCommonTestParams +func testAccCoralogixResourceAlertLogsNewValueUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-new-value alert example updated" + description = "Example of logs-new-value alert from terraform updated" + priority = "P3" + + type_definition = { + logs_new_value = { + time_window = { + specific_value = "12_HOURS" + } + keypath_to_track = "remote_addr_geoip.city_name" + } + } +} +` } -type timeRelativeAlertTestParams struct { - alertCommonTestParams - ratioThreshold int - relativeTimeWindow string - groupBy []string - ignoreInfinity bool +func testAccCoralogixResourceAlertLogsUniqueCount() string { + return `resource "coralogix_alert" "test" { + name = "logs-unique-count alert example" + description = "Example of logs-unique-count alert from terraform" + priority = "P2" + + group_by = ["remote_addr_geoip.city_name"] + type_definition = { + logs_unique_count = { + unique_count_keypath = "remote_addr_geoip.country_name" + max_unique_count = 2 + time_window = { + specific_value = "5_MINUTES" + } + max_unique_count_per_group_by_key = 500 + } + } +} +` } -type metricLuceneAlertTestParams struct { - alertCommonTestParams - groupBy []string - metricField, timeWindow, arithmeticOperator string - threshold, arithmeticOperatorModifier, sampleThresholdPercentage int +func testAccCoralogixResourceAlertLogsUniqueCountUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-unique-count alert example updated" + description = "Example of logs-unique-count alert from terraform updated" + priority = "P2" + + type_definition = { + logs_unique_count = { + unique_count_keypath = "remote_addr_geoip.city_name" + max_unique_count = 5 + time_window = { + specific_value = "20_MINUTES" + } + } + } +} +` } -type metricPromqlAlertTestParams struct { - alertCommonTestParams - threshold, nonNullPercentage, sampleThresholdPercentage int - timeWindow string - condition string +func testAccCoralogixResourceAlertLogsTimeRelativeMoreThan() string { + return `resource "coralogix_alert" "test" { + name = "logs-time-relative-more-than alert example" + description = "Example of logs-time-relative-more-than alert from terraform" + priority = "P4" + + type_definition = { + logs_time_relative_more_than = { + threshold = 10 + compared_to = "Same Hour Yesterday" + ignore_infinity = true + } + } +} +` } -type tracingAlertTestParams struct { - alertCommonTestParams - occurrencesThreshold int - conditionLatencyMs float64 - timeWindow string - groupBy []string +func testAccCoralogixResourceAlertLogsTimeRelativeMoreThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-time-relative-more-than alert example updated" + description = "Example of logs-time-relative-more-than alert from terraform updated" + priority = "P3" + + type_definition = { + logs_time_relative_more_than = { + threshold = 50 + compared_to = "Same Day Last Week" + } + } +} +` } -type flowAlertTestParams struct { - name, description, severity string - emailRecipients []string - webhookID string - notifyEveryMin int - notifyOn string - activeWhen +func testAccCoralogixResourceAlertLogsTimeRelativeLessThan() string { + return `resource "coralogix_alert" "test" { + name = "logs-time-relative-more-than alert example" + description = "Example of logs-time-relative-more-than alert from terraform" + priority = "P4" + + type_definition = { + logs_time_relative_less_than = { + threshold = 10 + compared_to = "Same Hour Yesterday" + ignore_infinity = true + } + } +} +` } -type alertCommonTestParams struct { - name, description, severity string - webhookID string - emailRecipients []string - notifyEveryMin int - notifyOn string - searchQuery string - alertFilters - activeWhen +func testAccCoralogixResourceAlertLogsTimeRelativeLessThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "logs-time-relative-more-than alert example updated" + description = "Example of logs-time-relative-more-than alert from terraform updated" + priority = "P3" + + type_definition = { + logs_time_relative_less_than = { + threshold = 50 + compared_to = "Same Day Last Week" + ignore_infinity = false + undetected_values_management = { + trigger_undetected_values = true + auto_retire_timeframe = "6_Hours" + } + } + } } +` +} + +func testAccCoralogixResourceAlertMetricMoreThan() string { + return `resource "coralogix_alert" "test" { + name = "metric-more-than alert example" + description = "Example of metric-more-than alert from terraform" + priority = "P3" -type alertFilters struct { - severities []string + type_definition = { + metric_more_than = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 2 + for_over_pct = 10 + of_the_last = { + specific_value = "10_MINUTES" + } + missing_values = { + min_non_null_values_pct = 50 + } + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricMoreThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "metric-more-than alert example updated" + description = "Example of metric-more-than alert from terraform updated" + priority = "P4" + + type_definition = { + metric_more_than = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 10 + for_over_pct = 15 + of_the_last = { + specific_value = "1_HOUR" + } + missing_values = { + replace_with_zero = true + } + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricLessThan() string { + return `resource "coralogix_alert" "test" { + name = "metric-less-than alert example" + description = "Example of metric-less-than alert from terraform" + priority = "P4" + + type_definition = { + metric_less_than = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 2 + for_over_pct = 10 + of_the_last = { + specific_value = "10_MINUTES" + } + missing_values = { + replace_with_zero = true + } + undetected_values_management = { + trigger_undetected_values = true + auto_retire_timeframe = "5_Minutes" + } + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricLessThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "metric-less-than alert example updated" + description = "Example of metric-less-than alert from terraform updated" + priority = "P3" + + type_definition = { + metric_less_than = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 5 + for_over_pct = 15 + of_the_last = { + specific_value = "10_MINUTES" + } + missing_values = { + min_non_null_values_pct = 50 + } + undetected_values_management = { + trigger_undetected_values = true + auto_retire_timeframe = "5_Minutes" + } + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricsLessThanUsual() string { + return `resource "coralogix_alert" "test" { + name = "metric-less-than-usual alert example" + description = "Example of metric-less-than-usual alert from terraform" + priority = "P1" + + type_definition = { + metric_less_than_usual = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 2 + for_over_pct = 10 + of_the_last = { + specific_value = "12_HOURS" + } + threshold = 20 + min_non_null_values_pct = 15 + } + } +} +` } -type activeWhen struct { - daysOfWeek []string - activityStarts, activityEnds, timeZone string +func testAccCoralogixResourceAlertMetricsLessThanUsualUpdated() string { + return `resource "coralogix_alert" "test" { + name = "metric-less-than-usual alert example updated" + description = "Example of metric-less-than-usual alert from terraform updated" + priority = "P1" + + type_definition = { + metric_less_than_usual = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + for_over_pct = 15 + of_the_last = { + specific_value = "10_MINUTES" + } + threshold = 2 + min_non_null_values_pct = 10 + } + } +} +` } -func randActiveWhen() activeWhen { - return activeWhen{ - timeZone: selectRandomlyFromSlice(validTimeZones), - daysOfWeek: selectManyRandomlyFromSlice(alertValidDaysOfWeek), - activityStarts: randHourStr(), - activityEnds: randHourStr(), +func testAccCoralogixResourceAlertMetricsMoreThanUsual() string { + return `resource "coralogix_alert" "test" { + name = "metric_more_than_usual alert example" + description = "Example of metric_more_than_usual alert from terraform" + priority = "P2" + + type_definition = { + metric_more_than_usual = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 2 + for_over_pct = 10 + of_the_last = { + specific_value = "10_MINUTES" + } + min_non_null_values_pct = 10 + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricsMoreThanUsualUpdated() string { + return `resource "coralogix_alert" "test" { + name = "metric_more_than_usual alert example updated" + description = "Example of metric_more_than_usual alert from terraform updated" + priority = "P3" + + type_definition = { + metric_more_than_usual = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 20 + for_over_pct = 10 + of_the_last = { + specific_value = "10_MINUTES" + } + min_non_null_values_pct = 10 } + } +} +` } -func randHourStr() string { - return fmt.Sprintf("%s:%s", - toTwoDigitsFormat(int32(acctest.RandIntRange(0, 24))), - toTwoDigitsFormat(int32(acctest.RandIntRange(0, 60)))) +func testAccCoralogixResourceAlertMetricLessThanOrEquals() string { + return `resource "coralogix_alert" "test" { + name = "metric-less-than-or-equals alert example" + description = "Example of metric-less-than-or-equals alert from terraform" + priority = "P1" + + type_definition = { + metric_less_than_or_equals = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 2 + for_over_pct = 10 + of_the_last = { + specific_value = "10_MINUTES" + } + missing_values = { + replace_with_zero = true + } + undetected_values_management = { + trigger_undetected_values = true + auto_retire_timeframe = "5_Minutes" + } + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricLessThanOrEqualsUpdated() string { + return `resource "coralogix_alert" "test" { + name = "metric-less-than-or-equals alert example updated" + description = "Example of metric-less-than-or-equals alert from terraform updated" + priority = "P2" + + type_definition = { + metric_less_than_or_equals = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 5 + for_over_pct = 15 + of_the_last = { + specific_value = "10_MINUTES" + } + missing_values = { + min_non_null_values_pct = 50 + } + undetected_values_management = { + trigger_undetected_values = true + auto_retire_timeframe = "5_Minutes" + } + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricMoreThanOrEquals() string { + return `resource "coralogix_alert" "test" { + name = "metric-more-than-or-equals alert example" + description = "Example of metric-more-than-or-equals alert from terraform" + priority = "P3" + + type_definition = { + metric_more_than_or_equals = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 2 + for_over_pct = 10 + of_the_last = { + specific_value = "10_MINUTES" + } + missing_values = { + replace_with_zero = true + } + } + } +} +` +} + +func testAccCoralogixResourceAlertMetricMoreThanOrEqualsUpdated() string { + return `resource "coralogix_alert" "test" { + name = "metric-more-than-or-equals alert example updated" + description = "Example of metric-more-than-or-equals alert from terraform updated" + priority = "P4" + + type_definition = { + metric_more_than_or_equals = { + metric_filter = { + promql = "sum(rate(http_requests_total{job=\"api-server\"}[5m])) by (status)" + } + threshold = 10 + for_over_pct = 15 + of_the_last = { + specific_value = "1_HOUR" + } + missing_values = { + replace_with_zero = true + } + } + } +} +` +} + +func testAccCoralogixResourceAlertTracingImmediate() string { + return `resource "coralogix_alert" "test" { + name = "tracing_immediate alert example" + description = "Example of tracing_immediate alert from terraform" + priority = "P1" + + type_definition = { + tracing_immediate = { + tracing_query = { + latency_threshold_ms = 100 + tracing_label_filters = { + application_name = [ + { + operation = "IS" + values = ["nginx", "apache"] + }, + { + operation = "STARTS_WITH" + values = ["application-name:"] + } + ] + subsystem_name = [ + { + values = ["subsystem-name"] + } + ] + operation_name = [ + { + values = ["operation-name"] + } + ] + span_fields = [ + { + key = "status" + filter_type = { + values = ["200"] + } + }, + { + key = "status" + filter_type = { + operation = "STARTS_WITH" + values = ["40", "50"] + } + }, + ] + } + } + } + } +} +` +} + +func testAccCoralogixResourceAlertTracingImmediateUpdated() string { + return `resource "coralogix_alert" "test" { + name = "tracing_immediate alert example updated" + description = "Example of tracing_immediate alert from terraform updated" + priority = "P2" + + type_definition = { + tracing_immediate = { + tracing_query = { + latency_threshold_ms = 200 + tracing_label_filters = { + application_name = [ + { + operation = "IS" + values = ["nginx", "apache"] + }, + { + operation = "STARTS_WITH" + values = ["application-name:"] + } + ] + subsystem_name = [ + { + operation = "IS" + values = ["subsystem-name"] + } + ] + operation_name = [ + { + operation = "IS" + values = ["operation-name"] + } + ] + span_fields = [ + { + key = "status" + filter_type = { + values = ["200"] + } + }, + { + key = "status" + filter_type = { + operation = "STARTS_WITH" + values = ["40", "50"] + } + }, + { + key = "status" + filter_type = { + operation = "ENDS_WITH" + values = ["500", "404"] + } + }, + ] + } + } + } + } +} +` +} + +func testAccCoralogixResourceAlertTracingMoreThan() string { + return `resource "coralogix_alert" "test" { + name = "tracing_more_than alert example" + description = "Example of tracing_more_than alert from terraform" + priority = "P2" + + type_definition = { + tracing_more_than = { + tracing_query = { + latency_threshold_ms = 100 + tracing_label_filters = { + application_name = [ + { + operation = "IS" + values = ["nginx", "apache"] + }, + { + operation = "STARTS_WITH" + values = ["application-name:"] + } + ] + } + } + span_amount = 5 + time_window = { + specific_value = "10_MINUTES" + } + } + } +} +` +} + +func testAccCoralogixResourceAlertTracingMoreThanUpdated() string { + return `resource "coralogix_alert" "test" { + name = "tracing_more_than alert example updated" + description = "Example of tracing_more_than alert from terraform updated" + priority = "P3" + + type_definition = { + tracing_more_than = { + tracing_query = { + latency_threshold_ms = 200 + tracing_label_filters = { + application_name = [ + { + values = ["nginx", "apache"] + }, + { + operation = "STARTS_WITH" + values = ["application-name:"] + } + ] + } + } + span_amount = 5 + time_window = { + specific_value = "1_HOUR" + } + } + } +} +` +} + +func testAccCoralogixResourceAlertFlow() string { + return `resource "coralogix_alert" "test_1"{ + name = "logs immediate alert 1" + priority = "P1" + type_definition = { + logs_immediate = { + } + } +} + +resource "coralogix_alert" "test_2"{ + name = "logs immediate alert 2" + priority = "P2" + type_definition = { + logs_immediate = { + } + } +} + +resource "coralogix_alert" "test_3"{ + name = "logs immediate alert 3" + priority = "P3" + type_definition = { + logs_immediate = { + } + } +} + +resource "coralogix_alert" "test" { + name = "flow alert example" + description = "Example of flow alert from terraform" + priority = "P3" + type_definition = { + flow = { + stages = [ + { + flow_stages_groups = [ + { + alert_defs = [ + { + id = coralogix_alert.test_1.id + }, + { + id = coralogix_alert.test_2.id + }, + ] + next_op = "AND" + alerts_op = "OR" + }, + { + alert_defs = [ + { + id = coralogix_alert.test_3.id + }, + { + id = coralogix_alert.test_2.id + }, + ] + next_op = "OR" + alerts_op = "AND" + }, + ] + timeframe_ms = 10 + timeframe_type = "Up To" + } + ] + } + } +} +` +} + +func testAccCoralogixResourceAlertFlowUpdated() string { + return `resource "coralogix_alert" "test_1"{ + name = "logs immediate alert 1" + priority = "P1" + type_definition = { + logs_immediate = { + } + } +} + +resource "coralogix_alert" "test_2"{ + name = "logs immediate alert 2" + priority = "P2" + type_definition = { + logs_immediate = { + } + } +} + +resource "coralogix_alert" "test_3"{ + name = "logs immediate alert 3" + priority = "P3" + type_definition = { + logs_immediate = { + } + } +} + +resource "coralogix_alert" "test" { + name = "flow alert example updated" + description = "Example of flow alert from terraform updated" + priority = "P3" + type_definition = { + flow = { + stages = [ + { + flow_stages_groups = [ + { + alert_defs = [ + { + id = coralogix_alert.test_2.id + }, + { + id = coralogix_alert.test_1.id + }, + ] + next_op = "OR" + alerts_op = "AND" + }, + { + alert_defs = [ + { + id = coralogix_alert.test_2.id + }, + { + id = coralogix_alert.test_3.id + }, + ] + next_op = "AND" + alerts_op = "OR" + }, + ] + timeframe_ms = 10 + timeframe_type = "Up To" + }, + { + flow_stages_groups = [ + { + alert_defs = [ + { + id = coralogix_alert.test_2.id + }, + ] + next_op = "OR" + alerts_op = "AND" + }, + ] + timeframe_ms = 20 + timeframe_type = "Up To" + } + ] + } + } +} +` } diff --git a/coralogix/resource_coralogix_alert_test.go.old b/coralogix/resource_coralogix_alert_test.go.old new file mode 100644 index 00000000..b0688f0b --- /dev/null +++ b/coralogix/resource_coralogix_alert_test.go.old @@ -0,0 +1,1286 @@ +// Copyright 2024 Coralogix Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package coralogix + +import ( + "context" + "fmt" + "math" + "strconv" + "testing" + + "terraform-provider-coralogix/coralogix/clientset" + alertsv1 "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v2" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var alertResourceName = "coralogix_alert.test" + +func TestAccCoralogixResourceAlert_standard(t *testing.T) { + alert := standardAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + groupBy: []string{"EventType"}, + occurrencesThreshold: acctest.RandIntRange(1, 1000), + timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), + deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), + } + checks := extractStandardAlertChecks(alert) + + updatedAlert := standardAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + groupBy: []string{"EventType"}, + occurrencesThreshold: acctest.RandIntRange(1, 1000), + timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), + deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), + } + updatedAlertChecks := extractStandardAlertChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertStandard(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertStandard(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_ratio(t *testing.T) { + alert := ratioAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + q2Severities: selectManyRandomlyFromSlice(alertValidLogSeverities), + timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), + ratio: randFloat(), + groupBy: []string{"EventType"}, + q2SearchQuery: "remote_addr_enriched:/.*/", + ignoreInfinity: randBool(), + } + checks := extractRatioAlertChecks(alert) + + updatedAlert := ratioAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + q2Severities: selectManyRandomlyFromSlice(alertValidLogSeverities), + timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), + ratio: randFloat(), + groupBy: []string{"EventType"}, + q2SearchQuery: "remote_addr_enriched:/.*/", + ignoreInfinity: randBool(), + } + updatedAlertChecks := extractRatioAlertChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertRatio(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertRatio(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_newValue(t *testing.T) { + alert := newValueAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + keyToTrack: "EventType", + timeWindow: selectRandomlyFromSlice(alertValidNewValueTimeFrames), + } + alert.notifyOn = "Triggered_only" + checks := extractNewValueChecks(alert) + + updatedAlert := newValueAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + keyToTrack: "EventType", + timeWindow: selectRandomlyFromSlice(alertValidNewValueTimeFrames), + } + updatedAlert.notifyOn = "Triggered_only" + updatedAlertChecks := extractNewValueChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertNewValue(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertNewValue(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_uniqueCount(t *testing.T) { + alert := uniqueCountAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + uniqueCountKey: "EventType", + timeWindow: selectRandomlyFromSlice(alertValidUniqueCountTimeFrames), + groupByKey: "metadata.name", + maxUniqueValues: 2, + maxUniqueValuesForGroupBy: 20, + } + checks := extractUniqueCountAlertChecks(alert) + + updatedAlert := uniqueCountAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + uniqueCountKey: "EventType", + timeWindow: selectRandomlyFromSlice(alertValidUniqueCountTimeFrames), + groupByKey: "metadata.name", + maxUniqueValues: 2, + maxUniqueValuesForGroupBy: 20, + } + updatedAlertChecks := extractUniqueCountAlertChecks(updatedAlert) + updatedAlertChecks = updatedAlertChecks[:len(updatedAlertChecks)-1] // remove group_by check + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertUniqueCount(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertUniqueCount(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_timeRelative(t *testing.T) { + alert := timeRelativeAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + ratioThreshold: acctest.RandIntRange(0, 1000), + relativeTimeWindow: selectRandomlyFromSlice(alertValidRelativeTimeFrames), + groupBy: []string{"EventType"}, + ignoreInfinity: randBool(), + } + checks := extractTimeRelativeChecks(alert) + + updatedAlert := timeRelativeAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + ratioThreshold: acctest.RandIntRange(0, 1000), + relativeTimeWindow: selectRandomlyFromSlice(alertValidRelativeTimeFrames), + groupBy: []string{"EventType"}, + ignoreInfinity: randBool(), + } + updatedAlertChecks := extractTimeRelativeChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertTimeRelative(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertTimeRelative(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_metricLucene(t *testing.T) { + alert := metricLuceneAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + groupBy: []string{"EventType"}, + metricField: "subsystem", + timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), + threshold: acctest.RandIntRange(0, 1000), + arithmeticOperator: selectRandomlyFromSlice(alertValidArithmeticOperators), + } + if alert.arithmeticOperator == "Percentile" { + alert.arithmeticOperatorModifier = acctest.RandIntRange(0, 100) + } + checks := extractLuceneMetricChecks(alert) + + updatedAlert := metricLuceneAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + groupBy: []string{"EventType"}, + metricField: "subsystem", + timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), + threshold: acctest.RandIntRange(0, 1000), + arithmeticOperator: selectRandomlyFromSlice(alertValidArithmeticOperators), + } + if updatedAlert.arithmeticOperator == "Percentile" { + updatedAlert.arithmeticOperatorModifier = acctest.RandIntRange(0, 100) + } + updatedAlertChecks := extractLuceneMetricChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricLucene(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricLucene(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_metricPromql(t *testing.T) { + alert := metricPromqlAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + threshold: acctest.RandIntRange(0, 1000), + nonNullPercentage: 10 * acctest.RandIntRange(0, 10), + timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), + condition: "less_than", + } + checks := extractMetricPromqlAlertChecks(alert) + + updatedAlert := metricPromqlAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + threshold: acctest.RandIntRange(0, 1000), + nonNullPercentage: 10 * acctest.RandIntRange(0, 10), + timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), + condition: "more_than", + } + updatedAlertChecks := extractMetricPromqlAlertChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertMetricPromql(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertMetricPromql(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_tracing(t *testing.T) { + alert := tracingAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + conditionLatencyMs: math.Round(randFloat()*1000) / 1000, + occurrencesThreshold: acctest.RandIntRange(1, 10000), + timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), + groupBy: []string{"EventType"}, + } + checks := extractTracingAlertChecks(alert) + + updatedAlert := tracingAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + conditionLatencyMs: math.Round(randFloat()*1000) / 1000, + occurrencesThreshold: acctest.RandIntRange(1, 10000), + timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), + groupBy: []string{"EventType"}, + } + updatedAlertChecks := extractTracingAlertChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertTracing(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: alertResourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertTracing(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func TestAccCoralogixResourceAlert_flow(t *testing.T) { + resourceName := "coralogix_alert.test" + + alert := flowAlertTestParams{ + name: acctest.RandomWithPrefix("tf-acc-test"), + description: acctest.RandomWithPrefix("tf-acc-test"), + emailRecipients: []string{"user@example.com"}, + webhookID: "10761", + severity: selectRandomlyFromSlice(alertValidSeverities), + activeWhen: randActiveWhen(), + notifyEveryMin: acctest.RandIntRange(1500 /*to avoid notify_every < condition.0.time_window*/, 3600), + notifyOn: "Triggered_only", + } + checks := extractFlowAlertChecks(alert) + + updatedAlert := flowAlertTestParams{ + name: acctest.RandomWithPrefix("tf-acc-test"), + description: acctest.RandomWithPrefix("tf-acc-test"), + emailRecipients: []string{"user@example.com"}, + webhookID: "10761", + severity: selectRandomlyFromSlice(alertValidSeverities), + activeWhen: randActiveWhen(), + notifyEveryMin: acctest.RandIntRange(1500 /*to avoid notify_every < condition.0.time_window*/, 3600), + notifyOn: "Triggered_only", + } + updatedAlertChecks := extractFlowAlertChecks(updatedAlert) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + CheckDestroy: testAccCheckAlertDestroy, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertFLow(&alert), + Check: resource.ComposeAggregateTestCheckFunc(checks...), + }, + { + ResourceName: resourceName, + ImportState: true, + }, + { + Config: testAccCoralogixResourceAlertFLow(&updatedAlert), + Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), + }, + }, + }) +} + +func getRandomAlert() *alertCommonTestParams { + return &alertCommonTestParams{ + name: acctest.RandomWithPrefix("tf-acc-test"), + description: acctest.RandomWithPrefix("tf-acc-test"), + webhookID: "10761", + emailRecipients: []string{"user@example.com"}, + searchQuery: "remote_addr_enriched:/.*/", + severity: selectRandomlyFromSlice(alertValidSeverities), + activeWhen: randActiveWhen(), + notifyEveryMin: acctest.RandIntRange(2160 /*to avoid notify_every < condition.0.time_window*/, 3600), + notifyOn: selectRandomlyFromSlice(validNotifyOn), + alertFilters: alertFilters{ + severities: selectManyRandomlyFromSlice(alertValidLogSeverities), + }, + } +} + +func extractStandardAlertChecks(alert standardAlertTestParams) []resource.TestCheckFunc { + checks := extractCommonChecks(&alert.alertCommonTestParams, "standard") + checks = append(checks, + resource.TestCheckResourceAttr(alertResourceName, "meta_labels.alert_type", "security"), + resource.TestCheckResourceAttr(alertResourceName, "meta_labels.security_severity", "high"), + resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.threshold", strconv.Itoa(alert.occurrencesThreshold)), + resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.time_window", alert.timeWindow), + resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.group_by.0", alert.groupBy[0]), + resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.less_than", "true"), + resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.manage_undetected_values.0.auto_retire_ratio", alert.deadmanRatio), + ) + return checks +} + +func extractRatioAlertChecks(alert ratioAlertTestParams) []resource.TestCheckFunc { + checks := extractCommonChecks(&alert.alertCommonTestParams, "ratio.0.query_1") + checks = append(checks, + resource.TestCheckResourceAttr(alertResourceName, "ratio.0.query_2.0.search_query", alert.q2SearchQuery), + resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.more_than", "true"), + resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.ratio_threshold", fmt.Sprintf("%f", alert.ratio)), + resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.time_window", alert.timeWindow), + resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.group_by.0", alert.groupBy[0]), + resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.group_by_q1", "true"), + resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.ignore_infinity", fmt.Sprintf("%t", alert.ignoreInfinity)), + ) + checks = appendSeveritiesCheck(checks, alert.alertFilters.severities, "ratio.0.query_2") + + return checks +} + +func extractNewValueChecks(alert newValueAlertTestParams) []resource.TestCheckFunc { + checks := extractCommonChecks(&alert.alertCommonTestParams, "new_value") + checks = append(checks, + resource.TestCheckResourceAttr(alertResourceName, "new_value.0.condition.0.key_to_track", alert.keyToTrack), + resource.TestCheckResourceAttr(alertResourceName, "new_value.0.condition.0.time_window", alert.timeWindow), + ) + return checks +} + +func extractUniqueCountAlertChecks(alert uniqueCountAlertTestParams) []resource.TestCheckFunc { + checks := extractCommonChecks(&alert.alertCommonTestParams, "unique_count") + checks = append(checks, + resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.unique_count_key", alert.uniqueCountKey), + resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.unique_count_key", alert.uniqueCountKey), + resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.time_window", alert.timeWindow), + resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.max_unique_values", strconv.Itoa(alert.maxUniqueValues)), + resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.max_unique_values_for_group_by", strconv.Itoa(alert.maxUniqueValuesForGroupBy)), + ) + return checks +} + +func extractTimeRelativeChecks(alert timeRelativeAlertTestParams) []resource.TestCheckFunc { + checks := extractCommonChecks(&alert.alertCommonTestParams, "time_relative") + checks = append(checks, + resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.ratio_threshold", strconv.Itoa(alert.ratioThreshold)), + resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.relative_time_window", alert.relativeTimeWindow), + resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.group_by.0", alert.groupBy[0]), + resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.ignore_infinity", fmt.Sprintf("%t", alert.ignoreInfinity)), + ) + + return checks +} + +func extractLuceneMetricChecks(alert metricLuceneAlertTestParams) []resource.TestCheckFunc { + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(alertResourceName, "id"), + resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), + resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), + resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), + resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "integration_id": alert.webhookID, + }), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "email_recipients.0": alert.emailRecipients[0], + }), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.search_query", alert.searchQuery), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.metric_field", alert.metricField), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.arithmetic_operator", alert.arithmeticOperator), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.less_than", "true"), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.threshold", strconv.Itoa(alert.threshold)), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.arithmetic_operator_modifier", strconv.Itoa(alert.arithmeticOperatorModifier)), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.sample_threshold_percentage", strconv.Itoa(alert.sampleThresholdPercentage)), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.time_window", alert.timeWindow), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.group_by.0", alert.groupBy[0]), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "false"), + } + checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) + return checks +} + +func extractMetricPromqlAlertChecks(alert metricPromqlAlertTestParams) []resource.TestCheckFunc { + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(alertResourceName, "id"), + resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), + resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), + resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), + resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "integration_id": alert.webhookID, + }), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "email_recipients.0": alert.emailRecipients[0], + }), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.search_query", "http_requests_total{status!~\"4..\"}"), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.threshold", strconv.Itoa(alert.threshold)), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.sample_threshold_percentage", strconv.Itoa(alert.sampleThresholdPercentage)), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.min_non_null_values_percentage", strconv.Itoa(alert.nonNullPercentage)), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.time_window", alert.timeWindow), + } + if alert.condition == "less_than" { + checks = append(checks, + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.less_than", "true"), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "true"), + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.manage_undetected_values.0.auto_retire_ratio", "Never"), + ) + } else { + checks = append(checks, + resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.more_than", "true"), + ) + } + checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) + return checks +} + +func extractTracingAlertChecks(alert tracingAlertTestParams) []resource.TestCheckFunc { + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(alertResourceName, "id"), + resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), + resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), + resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), + resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "integration_id": alert.webhookID, + }), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "email_recipients.0": alert.emailRecipients[0], + }), + resource.TestCheckResourceAttr(alertResourceName, "tracing.0.latency_threshold_milliseconds", fmt.Sprintf("%.3f", alert.conditionLatencyMs)), + resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.more_than", "true"), + resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.time_window", alert.timeWindow), + resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.threshold", strconv.Itoa(alert.occurrencesThreshold)), + resource.TestCheckResourceAttr(alertResourceName, "tracing.0.applications.0", "nginx"), + resource.TestCheckResourceAttr(alertResourceName, "tracing.0.subsystems.0", "subsystem-name"), + resource.TestCheckResourceAttr(alertResourceName, "tracing.0.tag_filter.0.field", "Status"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "tracing.0.tag_filter.0.values.*", "filter:contains:400"), + resource.TestCheckTypeSetElemAttr(alertResourceName, "tracing.0.tag_filter.0.values.*", "500"), + } + checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) + return checks +} + +func extractFlowAlertChecks(alert flowAlertTestParams) []resource.TestCheckFunc { + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(alertResourceName, "id"), + resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), + resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), + resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), + resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "integration_id": alert.webhookID, + }), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "email_recipients.0": alert.emailRecipients[0], + }), + resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.notify_on", alert.notifyOn), + resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.retriggering_period_minutes", strconv.Itoa(alert.notifyEveryMin)), + resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.0.sub_alerts.0.operator", "OR"), + resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.0.next_operator", "OR"), + resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.sub_alerts.0.operator", "AND"), + resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.sub_alerts.0.flow_alert.0.not", "true"), + resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.next_operator", "AND"), + resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.time_window.0.minutes", "20"), + resource.TestCheckResourceAttr(alertResourceName, "flow.0.group_by.0", "coralogix.metadata.sdkId"), + } + checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) + return checks +} + +func extractCommonChecks(alert *alertCommonTestParams, alertType string) []resource.TestCheckFunc { + checks := []resource.TestCheckFunc{ + resource.TestCheckResourceAttrSet(alertResourceName, "id"), + resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), + resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), + resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), + resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "integration_id": alert.webhookID, + }), + resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", + map[string]string{ + "email_recipients.0": alert.emailRecipients[0], + }), + resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.notify_on", alert.notifyOn), + resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.retriggering_period_minutes", strconv.Itoa(alert.notifyEveryMin)), + resource.TestCheckResourceAttr(alertResourceName, fmt.Sprintf("%s.0.search_query", alertType), alert.searchQuery), + } + + checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) + + checks = appendSeveritiesCheck(checks, alert.alertFilters.severities, alertType) + + return checks +} + +func appendSeveritiesCheck(checks []resource.TestCheckFunc, severities []string, alertType string) []resource.TestCheckFunc { + for _, s := range severities { + checks = append(checks, + resource.TestCheckTypeSetElemAttr(alertResourceName, fmt.Sprintf("%s.0.severities.*", alertType), s)) + } + return checks +} + +func appendSchedulingChecks(checks []resource.TestCheckFunc, daysOfWeek []string, startTime, endTime string) []resource.TestCheckFunc { + for _, d := range daysOfWeek { + checks = append(checks, resource.TestCheckTypeSetElemAttr(alertResourceName, "scheduling.0.time_frame.0.days_enabled.*", d)) + } + checks = append(checks, resource.TestCheckResourceAttr(alertResourceName, "scheduling.0.time_frame.0.start_time", startTime)) + checks = append(checks, resource.TestCheckResourceAttr(alertResourceName, "scheduling.0.time_frame.0.end_time", endTime)) + return checks +} + +func testAccCheckAlertDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*clientset.ClientSet).Alerts() + + ctx := context.TODO() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "coralogix_alert" { + continue + } + + req := &alertsv1.GetAlertByUniqueIdRequest{ + Id: wrapperspb.String(rs.Primary.ID), + } + + resp, err := client.GetAlert(ctx, req) + if err == nil { + if resp.Alert.Id.Value == rs.Primary.ID { + return fmt.Errorf("alert still exists: %s", rs.Primary.ID) + } + } + } + + return nil +} + +func testAccCoralogixResourceAlertStandard(a *standardAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification { + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + meta_labels = { + alert_type = "security" + security_severity = "high" + } + + standard { + severities = %s + search_query = "%s" + condition { + group_by = %s + less_than = true + threshold = %d + time_window = "%s" + manage_undetected_values { + enable_triggering_on_undetected_values = true + auto_retire_ratio = "%s" + } + } + } +} +`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, + sliceToString(a.severities), a.searchQuery, sliceToString(a.groupBy), a.occurrencesThreshold, a.timeWindow, a.deadmanRatio) +} + +func testAccCoralogixResourceAlertRatio(a *ratioAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification { + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + ratio { + query_1 { + severities = %s + search_query = "%s" + } + query_2 { + severities = %s + search_query = "%s" + } + condition { + more_than = true + ratio_threshold = %f + time_window = "%s" + group_by = %s + group_by_q1 = true + ignore_infinity = %t + } + } +}`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, + sliceToString(a.severities), a.searchQuery, sliceToString(a.q2Severities), a.q2SearchQuery, + a.ratio, a.timeWindow, sliceToString(a.groupBy), a.ignoreInfinity) +} + +func testAccCoralogixResourceAlertNewValue(a *newValueAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification{ + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + new_value { + severities = %s + search_query = "%s" + condition { + key_to_track = "%s" + time_window = "%s" + } + } +}`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, + sliceToString(a.severities), a.searchQuery, a.keyToTrack, a.timeWindow) +} + +func testAccCoralogixResourceAlertUniqueCount(a *uniqueCountAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + group_by_fields = %s + notification { + integration_id = "%s" + } + notification{ + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + unique_count { + severities = %s + search_query = "%s" + condition { + unique_count_key = "%s" + max_unique_values = %d + time_window = "%s" + group_by_key = "%s" + max_unique_values_for_group_by = %d + } + } +}`, + a.name, a.description, a.severity, sliceToString([]string{a.groupByKey}), a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, sliceToString(a.severities), + a.searchQuery, a.uniqueCountKey, a.maxUniqueValues, a.timeWindow, a.groupByKey, a.maxUniqueValuesForGroupBy) +} + +func testAccCoralogixResourceAlertTimeRelative(a *timeRelativeAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification{ + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + time_relative { + severities = %s + search_query = "%s" + condition { + more_than = true + group_by = %s + ratio_threshold = %d + relative_time_window = "%s" + ignore_infinity = %t + } + } +}`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, + sliceToString(a.severities), a.searchQuery, sliceToString(a.groupBy), a.ratioThreshold, a.relativeTimeWindow, a.ignoreInfinity) +} + +func testAccCoralogixResourceAlertMetricLucene(a *metricLuceneAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification{ + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + metric { + lucene { + search_query = "%s" + condition { + metric_field = "%s" + arithmetic_operator = "%s" + less_than = true + threshold = %d + arithmetic_operator_modifier = %d + sample_threshold_percentage = %d + time_window = "%s" + group_by = %s + manage_undetected_values{ + enable_triggering_on_undetected_values = false + } + } + } + } +}`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, a.searchQuery, a.metricField, a.arithmeticOperator, + a.threshold, a.arithmeticOperatorModifier, a.sampleThresholdPercentage, a.timeWindow, sliceToString(a.groupBy)) +} + +func testAccCoralogixResourceAlertMetricPromql(a *metricPromqlAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification{ + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + metric { + promql { + search_query = "http_requests_total{status!~\"4..\"}" + condition { + %s = true + threshold = %d + sample_threshold_percentage = %d + time_window = "%s" + min_non_null_values_percentage = %d + } + } + } +}`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, a.condition, a.threshold, a.sampleThresholdPercentage, + a.timeWindow, a.nonNullPercentage) +} + +func testAccCoralogixResourceAlertTracing(a *tracingAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification{ + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + tracing { + latency_threshold_milliseconds = %f + applications = ["nginx"] + subsystems = ["subsystem-name"] + tag_filter { + field = "Status" + values = ["filter:contains:400", "500"] + } + + condition { + more_than = true + time_window = "%s" + threshold = %d + } + } +}`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, + a.conditionLatencyMs, a.timeWindow, a.occurrencesThreshold) +} + +func testAccCoralogixResourceAlertFLow(a *flowAlertTestParams) string { + return fmt.Sprintf(`resource "coralogix_alert" "standard_alert" { + name = "standard" + severity = "Info" + + notifications_group { + notification { + email_recipients = ["example@coralogix.com"] + retriggering_period_minutes = 1 + notify_on = "Triggered_only" + } + } + + standard { + condition { + more_than = true + threshold = 5 + time_window = "30Min" + group_by = ["coralogix.metadata.sdkId"] + } + } +} + + resource "coralogix_alert" "test" { + name = "%s" + description = "%s" + severity = "%s" + + notifications_group { + notification { + integration_id = "%s" + } + notification{ + email_recipients = %s + } + } + + incident_settings { + notify_on = "%s" + retriggering_period_minutes = %d + } + + scheduling { + time_zone = "%s" + time_frame { + days_enabled = %s + start_time = "%s" + end_time = "%s" + } + } + + flow { + stage { + group { + sub_alerts { + operator = "OR" + flow_alert{ + user_alert_id = coralogix_alert.standard_alert.id + } + } + next_operator = "OR" + } + group { + sub_alerts { + operator = "AND" + flow_alert{ + not = true + user_alert_id = coralogix_alert.standard_alert.id + } + } + next_operator = "AND" + } + time_window { + minutes = 20 + } + } + stage { + group { + sub_alerts { + operator = "AND" + flow_alert { + user_alert_id = coralogix_alert.standard_alert.id + } + flow_alert { + not = true + user_alert_id = coralogix_alert.standard_alert.id + } + } + next_operator = "OR" + } + } + group_by = ["coralogix.metadata.sdkId"] + } +}`, + a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, + sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds) +} + +type standardAlertTestParams struct { + groupBy []string + occurrencesThreshold int + timeWindow string + deadmanRatio string + alertCommonTestParams +} + +type ratioAlertTestParams struct { + q2Severities, groupBy []string + ratio float64 + timeWindow, q2SearchQuery string + ignoreInfinity bool + alertCommonTestParams +} + +type newValueAlertTestParams struct { + keyToTrack, timeWindow string + alertCommonTestParams +} + +type uniqueCountAlertTestParams struct { + uniqueCountKey, timeWindow, groupByKey string + maxUniqueValues, maxUniqueValuesForGroupBy int + alertCommonTestParams +} + +type timeRelativeAlertTestParams struct { + alertCommonTestParams + ratioThreshold int + relativeTimeWindow string + groupBy []string + ignoreInfinity bool +} + +type metricLuceneAlertTestParams struct { + alertCommonTestParams + groupBy []string + metricField, timeWindow, arithmeticOperator string + threshold, arithmeticOperatorModifier, sampleThresholdPercentage int +} + +type metricPromqlAlertTestParams struct { + alertCommonTestParams + threshold, nonNullPercentage, sampleThresholdPercentage int + timeWindow string + condition string +} + +type tracingAlertTestParams struct { + alertCommonTestParams + occurrencesThreshold int + conditionLatencyMs float64 + timeWindow string + groupBy []string +} + +type flowAlertTestParams struct { + name, description, severity string + emailRecipients []string + webhookID string + notifyEveryMin int + notifyOn string + activeWhen +} + +type alertCommonTestParams struct { + name, description, severity string + webhookID string + emailRecipients []string + notifyEveryMin int + notifyOn string + searchQuery string + alertFilters + activeWhen +} + +type alertFilters struct { + severities []string +} + +type activeWhen struct { + daysOfWeek []string + activityStarts, activityEnds, timeZone string +} + +func randActiveWhen() activeWhen { + return activeWhen{ + timeZone: selectRandomlyFromSlice(validTimeZones), + daysOfWeek: selectManyRandomlyFromSlice(alertValidDaysOfWeek), + activityStarts: randHourStr(), + activityEnds: randHourStr(), + } +} + +func randHourStr() string { + return fmt.Sprintf("%s:%s", + toTwoDigitsFormat(int32(acctest.RandIntRange(0, 24))), + toTwoDigitsFormat(int32(acctest.RandIntRange(0, 60)))) +} diff --git a/coralogix/resource_coralogix_alerts_schedulerv3_test.go b/coralogix/resource_coralogix_alerts_schedulerv3_test.go new file mode 100644 index 00000000..f59fc90d --- /dev/null +++ b/coralogix/resource_coralogix_alerts_schedulerv3_test.go @@ -0,0 +1,117 @@ +package coralogix + +import ( + "context" + "fmt" + "testing" + + terraform2 "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "terraform-provider-coralogix/coralogix/clientset" + alertsSchedulers "terraform-provider-coralogix/coralogix/clientset/grpc/alerts-scheduler" +) + +var ( + alertsSchedulerResourceName = "coralogix_alerts_scheduler.test" +) + +func TestAccCoralogixResourceResourceAlertsScheduler(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccCheckAlertsSchedulerDestroy, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertsScheduler(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "name", "example"), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "filter.what_expression", "source logs | filter $d.cpodId:string == '122'"), + resource.TestCheckTypeSetElemNestedAttrs(alertsSchedulerResourceName, "filter.meta_labels.*", map[string]string{ + "key": "key", + "value": "value", + }), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.operation", "active"), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.repeat_every", "2"), + resource.TestCheckTypeSetElemAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.frequency.weekly.days.*", "Sunday"), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.start_time", "2021-01-04T00:00:00.000"), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.duration.for_over", "2"), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.duration.frequency", "hours"), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.time_zone", "UTC+2"), + resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.termination_date", "2025-01-01T00:00:00.000"), + ), + }, + { + ResourceName: alertsSchedulerResourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAlertsSchedulerDestroy(s *terraform.State) error { + testAccProvider = OldProvider() + rc := terraform2.ResourceConfig{} + testAccProvider.Configure(context.Background(), &rc) + client := testAccProvider.Meta().(*clientset.ClientSet).AlertSchedulers() + ctx := context.TODO() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "coralogix_alerts_scheduler" { + continue + } + + req := &alertsSchedulers.GetAlertSchedulerRuleRequest{ + AlertSchedulerRuleId: rs.Primary.ID, + } + + resp, err := client.GetAlertScheduler(ctx, req) + if err == nil { + if resp.GetAlertSchedulerRule().GetId() == rs.Primary.ID { + return fmt.Errorf("alerts-scheduler still exists: %s", rs.Primary.ID) + } + } + } + + return nil +} + +func testAccCoralogixResourceAlertsScheduler() string { + return `resource "coralogix_alerts_scheduler" "test" { + name = "example" + description = "example" + filter = { + what_expression = "source logs | filter $d.cpodId:string == '122'" + meta_labels = [ + { + key = "key" + value = "value" + } + ] + } + schedule = { + operation = "active" + recurring = { + dynamic = { + repeat_every = 2 + frequency = { + weekly = { + days = ["Sunday"] + } + } + time_frame = { + start_time = "2021-01-04T00:00:00.000" + duration = { + for_over = 2 + frequency = "hours" + } + time_zone = "UTC+2" + } + termination_date = "2025-01-01T00:00:00.000" + } + } + } +} +` +} diff --git a/coralogix/resource_coralogix_group.go b/coralogix/resource_coralogix_group.go index 9b1e1d7f..b2d4cefd 100644 --- a/coralogix/resource_coralogix_group.go +++ b/coralogix/resource_coralogix_group.go @@ -311,7 +311,7 @@ type GroupResourceModel struct { ScopeID types.String `tfsdk:"scope_id"` } -func extractGroup(ctx context.Context, plan *GroupResourceModel) (*cxsdk.SCIMGroup, diag.Diagnostics) { +func extractGroup(ctx context.Context, plan *GroupResourceModel) (*cxsdk.Group, diag.Diagnostics) { members, diags := extractGroupMembers(ctx, plan.Members) if diags.HasError() { return nil, diags diff --git a/coralogix/utils copy.go.nope b/coralogix/utils copy.go.nope new file mode 100644 index 00000000..a3d0d8a5 --- /dev/null +++ b/coralogix/utils copy.go.nope @@ -0,0 +1,787 @@ +package coralogix + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "maps" + "math/big" + "math/rand" + "net/url" + "reflect" + "regexp" + "slices" + "strconv" + "time" + + gouuid "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/attr" + datasourceschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + diag2 "github.com/hashicorp/terraform-plugin-framework/diag" + resourceschema "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var ( + msInHour = int(time.Hour.Milliseconds()) + msInMinute = int(time.Minute.Milliseconds()) + msInSecond = int(time.Second.Milliseconds()) +) + +func formatRpcErrors(err error, url, requestStr string) string { + switch status.Code(err) { + case codes.PermissionDenied, codes.Unauthenticated: + return fmt.Sprintf("permission denied for url - %s\ncheck your api-key and permissions", url) + case codes.Internal: + return fmt.Sprintf("internal error in Coralogix backend.\nerror - %s\nurl - %s\nrequest - %s", err, url, requestStr) + case codes.InvalidArgument: + return fmt.Sprintf("invalid argument error.\nerror - %s\nurl - %s\nrequest - %s", err, url, requestStr) + default: + return err.Error() + } +} + +// datasourceSchemaFromResourceSchema is a recursive func that +// converts an existing Resource schema to a Datasource schema. +// All schema elements are copied, but certain attributes are ignored or changed: +// - all attributes have Computed = true +// - all attributes have ForceNew, Required = false +// - Validation funcs and attributes (e.g. MaxItems) are not copied +func datasourceSchemaFromResourceSchema(rs map[string]*schema.Schema) map[string]*schema.Schema { + ds := make(map[string]*schema.Schema, len(rs)) + for k, v := range rs { + dv := &schema.Schema{ + Computed: true, + ForceNew: false, + Required: false, + Description: v.Description, + Type: v.Type, + } + + switch v.Type { + case schema.TypeSet: + dv.Set = v.Set + fallthrough + case schema.TypeList: + // List & Set types are generally used for 2 cases: + // - a list/set of simple primitive values (e.g. list of strings) + // - a sub resource + if elem, ok := v.Elem.(*schema.Resource); ok { + // handle the case where the Element is a sub-resource + dv.Elem = &schema.Resource{ + Schema: datasourceSchemaFromResourceSchema(elem.Schema), + } + } else { + // handle simple primitive case + dv.Elem = v.Elem + } + + default: + // Elem of all other types are copied as-is + dv.Elem = v.Elem + + } + ds[k] = dv + + } + return ds +} + +func frameworkDatasourceSchemaFromFrameworkResourceSchema(rs resourceschema.Schema) datasourceschema.Schema { + attributes := convertAttributes(rs.Attributes) + if idSchema, ok := rs.Attributes["id"]; ok { + attributes["id"] = datasourceschema.StringAttribute{ + Required: true, + Description: idSchema.GetDescription(), + MarkdownDescription: idSchema.GetMarkdownDescription(), + } + } + + return datasourceschema.Schema{ + Attributes: attributes, + //Blocks: convertBlocks(rs.Blocks), + Description: rs.Description, + MarkdownDescription: rs.MarkdownDescription, + DeprecationMessage: rs.DeprecationMessage, + } +} + +func convertAttributes(attributes map[string]resourceschema.Attribute) map[string]datasourceschema.Attribute { + result := make(map[string]datasourceschema.Attribute, len(attributes)) + for k, v := range attributes { + result[k] = convertAttribute(v) + } + return result +} + +func convertAttribute(resourceAttribute resourceschema.Attribute) datasourceschema.Attribute { + switch attr := resourceAttribute.(type) { + case resourceschema.BoolAttribute: + return datasourceschema.BoolAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + } + case resourceschema.Float64Attribute: + return datasourceschema.Float64Attribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + } + case resourceschema.Int64Attribute: + return datasourceschema.Int64Attribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + } + case resourceschema.NumberAttribute: + return datasourceschema.NumberAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + } + case resourceschema.StringAttribute: + return datasourceschema.StringAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + } + case resourceschema.MapAttribute: + return datasourceschema.MapAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + ElementType: attr.ElementType, + } + case resourceschema.ObjectAttribute: + return datasourceschema.ObjectAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + AttributeTypes: attr.AttributeTypes, + } + case resourceschema.SetAttribute: + return datasourceschema.SetAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + ElementType: attr.ElementType, + } + case resourceschema.ListNestedAttribute: + return datasourceschema.ListNestedAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + NestedObject: datasourceschema.NestedAttributeObject{ + Attributes: convertAttributes(attr.NestedObject.Attributes), + }, + } + case resourceschema.ListAttribute: + return datasourceschema.ListAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + ElementType: attr.ElementType, + } + case resourceschema.MapNestedAttribute: + return datasourceschema.MapNestedAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + NestedObject: datasourceschema.NestedAttributeObject{ + Attributes: convertAttributes(attr.NestedObject.Attributes), + }, + } + case resourceschema.SetNestedAttribute: + return datasourceschema.SetNestedAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + NestedObject: datasourceschema.NestedAttributeObject{ + Attributes: convertAttributes(attr.NestedObject.Attributes), + }, + } + case resourceschema.SingleNestedAttribute: + return datasourceschema.SingleNestedAttribute{ + Computed: true, + Description: attr.Description, + MarkdownDescription: attr.MarkdownDescription, + Attributes: convertAttributes(attr.Attributes), + } + default: + panic(fmt.Sprintf("unknown resource attribute type: %T", resourceAttribute)) + } +} + +func interfaceSliceToStringSlice(s []interface{}) []string { + result := make([]string, 0, len(s)) + for _, v := range s { + result = append(result, v.(string)) + } + return result +} + +func attrSliceToFloat32Slice(ctx context.Context, arr []attr.Value) ([]float32, diag2.Diagnostics) { + var diags diag2.Diagnostics + result := make([]float32, 0, len(arr)) + for _, v := range arr { + val, err := v.ToTerraformValue(ctx) + if err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + var d big.Float + if err = val.As(&d); err != nil { + diags.AddError("Failed to convert value to float64", err.Error()) + continue + } + f, _ := d.Float64() + result = append(result, float32(f)) + } + return result, diags +} + +func float32SliceTypeList(ctx context.Context, arr []float32) (types.List, diag2.Diagnostics) { + if len(arr) == 0 { + return types.ListNull(types.Float64Type), nil + } + result := make([]attr.Value, 0, len(arr)) + for _, v := range arr { + if float32(int(v)) != v { + result = append(result, types.Float64Value(float64(v*10000)/float64(10000))) + } else { + result = append(result, types.Float64Value(float64(v))) + } + } + return types.ListValueFrom(ctx, types.Float64Type, result) +} + +func wrappedStringSliceToTypeStringSet(s []*wrapperspb.StringValue) types.Set { + if len(s) == 0 { + return types.SetNull(types.StringType) + } + elements := make([]attr.Value, 0, len(s)) + for _, v := range s { + elements = append(elements, types.StringValue(v.GetValue())) + } + return types.SetValueMust(types.StringType, elements) +} + +func stringSliceToTypeStringSet(s []string) types.Set { + if len(s) == 0 { + return types.SetNull(types.StringType) + } + elements := make([]attr.Value, 0, len(s)) + for _, v := range s { + elements = append(elements, types.StringValue(v)) + } + return types.SetValueMust(types.StringType, elements) +} + +func int32SliceToTypeInt64Set(arr []int32) types.Set { + if len(arr) == 0 { + return types.SetNull(types.Int64Type) + } + elements := make([]attr.Value, 0, len(arr)) + for _, n := range arr { + elements = append(elements, types.Int64Value(int64(n))) + } + return types.SetValueMust(types.StringType, elements) +} + +func wrappedStringSliceToTypeStringList(s []*wrapperspb.StringValue) types.List { + if len(s) == 0 { + return types.ListNull(types.StringType) + } + elements := make([]attr.Value, 0, len(s)) + for _, v := range s { + elements = append(elements, types.StringValue(v.GetValue())) + } + return types.ListValueMust(types.StringType, elements) +} + +func typeStringSliceToWrappedStringSlice(ctx context.Context, s []attr.Value) ([]*wrapperspb.StringValue, diag2.Diagnostics) { + var diags diag2.Diagnostics + result := make([]*wrapperspb.StringValue, 0, len(s)) + for _, v := range s { + val, err := v.ToTerraformValue(ctx) + if err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + var str string + + if err = val.As(&str); err != nil { + diags.AddError("Failed to convert value to string", err.Error()) + continue + } + result = append(result, wrapperspb.String(str)) + } + return result, diags +} + +func typeInt64ToWrappedInt64(v types.Int64) *wrapperspb.Int64Value { + if v.IsNull() || v.IsUnknown() { + return nil + } + return wrapperspb.Int64(v.ValueInt64()) +} + +func typeInt64ToWrappedInt32(v types.Int64) *wrapperspb.Int32Value { + if v.IsNull() || v.IsUnknown() { + return nil + } + return wrapperspb.Int32(int32(v.ValueInt64())) +} + +func typeInt64ToWrappedUint32(v types.Int64) *wrapperspb.UInt32Value { + if v.IsNull() || v.IsUnknown() { + return nil + } + return wrapperspb.UInt32(uint32(v.ValueInt64())) +} + +func typeBoolToWrapperspbBool(v types.Bool) *wrapperspb.BoolValue { + if v.IsNull() || v.IsUnknown() { + return nil + } + return wrapperspb.Bool(v.ValueBool()) +} + +func typeStringSliceToStringSlice(ctx context.Context, s []attr.Value) ([]string, diag2.Diagnostics) { + result := make([]string, 0, len(s)) + var diags diag2.Diagnostics + for _, v := range s { + val, err := v.ToTerraformValue(ctx) + if err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + var str string + if err = val.As(&str); err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + result = append(result, str) + } + if diags.HasError() { + return nil, diags + } + return result, nil +} + +func typeInt64SliceToInt32Slice(ctx context.Context, s []attr.Value) ([]int32, diag2.Diagnostics) { + result := make([]int32, 0, len(s)) + var diags diag2.Diagnostics + for _, v := range s { + val, err := v.ToTerraformValue(ctx) + if err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + var n int64 + if err = val.As(&n); err != nil { + diags.AddError("Failed to convert value to Terraform", err.Error()) + continue + } + result = append(result, int32(n)) + } + if diags.HasError() { + return nil, diags + } + return result, nil +} + +func timeInDaySchema(description string) *schema.Schema { + timeRegex := regexp.MustCompile(`^(0\d|1\d|2[0-3]):[0-5]\d$`) + return &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch(timeRegex, "not valid time, only HH:MM format is allowed"), + Description: description, + } +} + +func toTwoDigitsFormat(digit int32) string { + digitStr := fmt.Sprintf("%d", digit) + if len(digitStr) == 1 { + digitStr = "0" + digitStr + } + return digitStr +} + +func timeSchema(description string) *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + "minutes": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + "seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + }, + }, + Description: description, + } +} + +func expandTimeToMS(v interface{}) int { + l := v.([]interface{}) + if len(l) == 0 { + return 0 + } + + m := l[0].(map[string]interface{}) + + timeMS := msInHour * m["hours"].(int) + timeMS += msInMinute * m["minutes"].(int) + timeMS += msInSecond * m["seconds"].(int) + + return timeMS +} + +func flattenTimeframe(timeMS int) []interface{} { + if timeMS == 0 { + return nil + } + + hours := timeMS / msInHour + timeMS -= hours * msInHour + + minutes := timeMS / msInMinute + timeMS -= minutes * msInMinute + + seconds := timeMS / msInSecond + + return []interface{}{map[string]int{ + "hours": hours, + "minutes": minutes, + "seconds": seconds, + }} +} + +func sliceToString(data []string) string { + b, _ := json.Marshal(data) + return fmt.Sprintf("%v", string(b)) +} + +func randFloat() float64 { + r := rand.New(rand.NewSource(99)) + return r.Float64() +} + +func selectRandomlyFromSlice(s []string) string { + return s[acctest.RandIntRange(0, len(s))] +} + +func selectManyRandomlyFromSlice(s []string) []string { + r := rand.New(rand.NewSource(99)) + indexPerms := r.Perm(len(s)) + itemsToSelect := acctest.RandIntRange(0, len(s)+1) + result := make([]string, 0, itemsToSelect) + for _, index := range indexPerms { + result = append(result, s[index]) + } + return result +} + +func getKeysStrings(m map[string]string) []string { + result := make([]string, 0) + for k := range m { + result = append(result, k) + } + return result +} + +func getKeysInterface(m map[string]interface{}) []string { + result := make([]string, 0) + for k := range m { + result = append(result, k) + } + return result +} + +func getKeysInt32(m map[string]int32) []string { + result := make([]string, 0) + for k := range m { + result = append(result, k) + } + return result +} + +func reverseMapStrings(m map[string]string) map[string]string { + n := make(map[string]string) + for k, v := range m { + n[v] = k + } + return n +} + +func strToUint32(str string) uint32 { + n, _ := strconv.ParseUint(str, 10, 32) + return uint32(n) +} + +func uint32ToStr(n uint32) string { + return strconv.FormatUint(uint64(n), 10) +} + +type urlValidationFuncFramework struct { +} + +func (u urlValidationFuncFramework) Description(_ context.Context) string { + return "string must be a valid url format" +} + +func (u urlValidationFuncFramework) MarkdownDescription(ctx context.Context) string { + return u.Description(ctx) +} + +func (u urlValidationFuncFramework) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { + if req.ConfigValue.IsNull() || req.ConfigValue.IsUnknown() { + return + } + + value := req.ConfigValue.ValueString() + + if _, err := url.ParseRequestURI(value); err != nil { + resp.Diagnostics.Append( + diag2.NewAttributeErrorDiagnostic( + req.Path, + "Invalid Attribute Value Format", + fmt.Sprintf("Attribute %s in not a valid url - %s", req.Path, value), + ), + ) + } +} + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand.Intn(len(letterBytes))] + } + return string(b) +} + +func JSONStringsEqual(s1, s2 string) bool { + b1 := bytes.NewBufferString("") + if err := json.Compact(b1, []byte(s1)); err != nil { + return false + } + + b2 := bytes.NewBufferString("") + if err := json.Compact(b2, []byte(s2)); err != nil { + return false + } + + return JSONBytesEqual(b1.Bytes(), b2.Bytes()) +} + +func JSONBytesEqual(b1, b2 []byte) bool { + var o1 interface{} + if err := json.Unmarshal(b1, &o1); err != nil { + return false + } + + var o2 interface{} + if err := json.Unmarshal(b2, &o2); err != nil { + return false + } + + return reflect.DeepEqual(o1, o2) +} + +func randBool() bool { + return rand.Int()%2 == 0 +} + +func typeStringToWrapperspbString(str types.String) *wrapperspb.StringValue { + if str.IsNull() || str.IsUnknown() { + return nil + + } + return wrapperspb.String(str.ValueString()) +} + +func wrapperspbFloat64ToTypeFloat64(num *wrapperspb.FloatValue) types.Float64 { + if num == nil { + return types.Float64Null() + } + + return types.Float64Value(float64(num.GetValue())) +} + +func typeStringToStringPointer(str types.String) *string { + if str.IsNull() || str.IsUnknown() { + return nil + } + result := new(string) + *result = str.ValueString() + return result +} + +func stringPointerToTypeString(str *string) types.String { + if str == nil { + return types.StringNull() + } + return types.StringValue(*str) +} + +func typeFloat64ToWrapperspbDouble(num types.Float64) *wrapperspb.DoubleValue { + if num.IsNull() { + return nil + } + + return wrapperspb.Double(num.ValueFloat64()) +} + +func typeFloat64ToWrapperspbFloat(num types.Float64) *wrapperspb.FloatValue { + if num.IsNull() { + return nil + } + + return wrapperspb.Float(float32(num.ValueFloat64())) +} + +func wrapperspbStringToTypeString(str *wrapperspb.StringValue) types.String { + if str == nil { + return types.StringNull() + } + + return types.StringValue(str.GetValue()) +} + +func wrapperspbInt64ToTypeInt64(num *wrapperspb.Int64Value) types.Int64 { + if num == nil { + return types.Int64Null() + } + + return types.Int64Value(num.GetValue()) +} + +func wrapperspbUint32ToTypeInt64(num *wrapperspb.UInt32Value) types.Int64 { + if num == nil { + return types.Int64Null() + } + + return types.Int64Value(int64(num.GetValue())) +} + +func wrapperspbDoubleToTypeFloat64(num *wrapperspb.DoubleValue) types.Float64 { + if num == nil { + return types.Float64Null() + } + + return types.Float64Value(num.GetValue()) +} + +func wrapperspbBoolToTypeBool(b *wrapperspb.BoolValue) types.Bool { + if b == nil { + return types.BoolNull() + } + + return types.BoolValue(b.GetValue()) +} + +func wrapperspbInt32ToTypeInt64(num *wrapperspb.Int32Value) types.Int64 { + if num == nil { + return types.Int64Null() + } + + return types.Int64Value(int64(num.GetValue())) +} + +func ReverseMap[K, V comparable](m map[K]V) map[V]K { + n := make(map[V]K) + for k, v := range m { + n[v] = k + } + return n +} + +func GetKeys[K, V comparable](m map[K]V) []K { + slices.Sorted(maps.Keys(m)) +} + +func GetValues[K, V comparable](m map[K]V) []V { + slices.Sorted(maps.Values(m)) +} + +func typeMapToStringMap(ctx context.Context, m types.Map) (map[string]string, diag2.Diagnostics) { + var result map[string]string + diags := m.ElementsAs(ctx, &result, true) + return result, diags +} + +func expandUuid(uuid types.String) *wrapperspb.StringValue { + if uuid.IsNull() || uuid.IsUnknown() { + return &wrapperspb.StringValue{Value: gouuid.NewString()} + } + return &wrapperspb.StringValue{Value: uuid.ValueString()} +} + +func retryableStatusCode(statusCode codes.Code) bool { + switch statusCode { + case codes.Unavailable, codes.DeadlineExceeded, codes.Aborted: + return true + default: + return false + } +} + +func uint32SliceToWrappedUint32Slice(s []uint32) []*wrapperspb.UInt32Value { + result := make([]*wrapperspb.UInt32Value, 0, len(s)) + for _, n := range s { + result = append(result, wrapperspb.UInt32(n)) + } + return result +} + +func convertSchemaWithoutID(rs resourceschema.Schema) datasourceschema.Schema { + attributes := convertAttributes(rs.Attributes) + return datasourceschema.Schema{ + Attributes: attributes, + Description: rs.Description, + MarkdownDescription: rs.MarkdownDescription, + DeprecationMessage: rs.DeprecationMessage, + } +} + +func typeStringToWrapperspbUint32(str types.String) (*wrapperspb.UInt32Value, diag2.Diagnostics) { + parsed, err := strconv.ParseUint(str.ValueString(), 10, 32) + if err != nil { + return nil, diag2.Diagnostics{diag2.NewErrorDiagnostic("Failed to convert string to uint32", err.Error())} + } + return wrapperspb.UInt32(uint32(parsed)), nil +} + +func WrapperspbUint32ToString(num *wrapperspb.UInt32Value) types.String { + if num == nil { + return types.StringNull() + } + return types.StringValue(strconv.FormatUint(uint64(num.GetValue()), 10)) + +} diff --git a/coralogix/utils.go b/coralogix/utils.go index 6d33c9e3..2d780c12 100644 --- a/coralogix/utils.go +++ b/coralogix/utils.go @@ -16,14 +16,17 @@ package coralogix import ( "bytes" + "cmp" "context" "encoding/json" "fmt" + "maps" "math/big" "math/rand" "net/url" "reflect" "regexp" + "slices" "strconv" "time" @@ -754,12 +757,12 @@ func ReverseMap[K, V comparable](m map[K]V) map[V]K { return n } -func GetKeys[K, V comparable](m map[K]V) []K { - result := make([]K, 0) - for k := range m { - result = append(result, k) - } - return result +func GetKeys[K cmp.Ordered, V comparable](m map[K]V) []K { + return slices.Sorted(maps.Keys(m)) +} + +func GetValues[K, V cmp.Ordered](m map[K]V) []V { + return slices.Sorted(maps.Values(m)) } func parseNumInt32(desired string) int32 { diff --git a/go.mod b/go.mod index 3bdd6870..4f37c249 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,10 @@ go 1.22.5 toolchain go1.23.0 +replace github.com/coralogix/coralogix-management-sdk => ../coralogix-management-sdk + require ( github.com/ahmetalpbalkan/go-linq v3.0.0+incompatible - github.com/coralogix/coralogix-management-sdk v0.2.1 github.com/google/uuid v1.6.0 github.com/grafana/grafana-api-golang-client v0.27.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 @@ -23,6 +24,7 @@ require ( google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 + github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240828115216-6f699f7a4510 ) require ( diff --git a/go.sum b/go.sum index 2d84d2b7..61f823c6 100644 --- a/go.sum +++ b/go.sum @@ -40,6 +40,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coralogix/coralogix-management-sdk v0.2.1 h1:5g5F37DGfZ3AL91S3J1vtmAI2YPU4zjMKymJe6dQ86A= github.com/coralogix/coralogix-management-sdk v0.2.1/go.mod h1:1aa/coMEMe5M1NvnRymOrBF2iCdefaWR0CMaMjPu0oI= +github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240828115216-6f699f7a4510 h1:KSQGSBFQBcePt8rFRbHQenyiMxqiEHvZGq6p/o47K7c= +github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240828115216-6f699f7a4510/go.mod h1:1aa/coMEMe5M1NvnRymOrBF2iCdefaWR0CMaMjPu0oI= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 72ba5455be15337a194da9eae290d2ce278c6c47 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Fri, 13 Sep 2024 16:23:47 +0200 Subject: [PATCH 03/12] feat: alerts v3 (WIP) --- coralogix/resource_coralogix_alert.go | 1191 +++++++++++-------------- coralogix/utils.go | 20 + 2 files changed, 564 insertions(+), 647 deletions(-) diff --git a/coralogix/resource_coralogix_alert.go b/coralogix/resource_coralogix_alert.go index 141f65a0..396fa503 100644 --- a/coralogix/resource_coralogix_alert.go +++ b/coralogix/resource_coralogix_alert.go @@ -94,14 +94,6 @@ var ( logSeveritySchemaToProtoMap = ReverseMap(logSeverityProtoToSchemaMap) validLogSeverities = GetKeys(logSeveritySchemaToProtoMap) - // DELETEME - // evaluationWindowTypeProtoToSchemaMap = map[cxsdk.AlertEvaluationWindow]string{ - // cxsdk.AlertEvaluationWindowRollingOrUnspecified: "Rolling", - // cxsdk.AlertEvaluationWindowDynamic: "Dynamic", - // } - // evaluationWindowTypeSchemaToProtoMap = ReverseMap(evaluationWindowTypeProtoToSchemaMap) - // validEvaluationWindowTypes = GetKeys(evaluationWindowTypeSchemaToProtoMap) - logsTimeWindowValueProtoToSchemaMap = map[cxsdk.LogsTimeWindowValue]string{ cxsdk.LogsTimeWindow5MinutesOrUnspecified: "5_MINUTES", cxsdk.LogsTimeWindow10Minutes: "10_MINUTES", @@ -259,13 +251,36 @@ var ( cxsdk.LogsThresholdConditionTypeMoreThanOrUnspecified: "MORE_THAN", cxsdk.LogsThresholdConditionTypeLessThan: "LESS_THAN", } - logsThresholdConditionValues = GetValues(logsThresholdConditionMap) + logsThresholdConditionToProtoMap = ReverseMap(logsThresholdConditionMap) + logsThresholdConditionValues = GetValues(logsThresholdConditionMap) + + logsTimeRelativeConditionMap = map[cxsdk.LogsTimeRelativeConditionType]string{ + cxsdk.LogsTimeRelativeConditionTypeMoreThanOrUnspecified: "MORE_THAN", + cxsdk.LogsTimeRelativeConditionTypeLessThan: "LESS_THAN", + } + logsTimeRelativeConditionToProtoMap = ReverseMap(logsTimeRelativeConditionMap) + logsTimeRelativeConditionValues = GetValues(logsTimeRelativeConditionMap) - logsRatioConditionConditionMap = map[cxsdk.LogsRatioConditionType]string{ + logsRatioConditionMap = map[cxsdk.LogsRatioConditionType]string{ cxsdk.LogsRatioConditionTypeMoreThanOrUnspecified: "MORE_THAN", cxsdk.LogsRatioConditionTypeLessThan: "LESS_THAN", } - logsRatioConditionConditionMapValues = GetValues(logsRatioConditionConditionMap) + logsRatioConditionMapValues = GetValues(logsRatioConditionMap) + logsRatioConditionSchemaToProtoMap = ReverseMap(logsRatioConditionMap) + + metricsThresholdConditionMap = map[cxsdk.MetricThresholdConditionType]string{ + cxsdk.MetricThresholdConditionTypeMoreThanOrUnspecified: "MORE_THAN", + cxsdk.MetricThresholdConditionTypeLessThan: "LESS_THAN", + cxsdk.MetricThresholdConditionTypeMoreThanOrEquals: "MORE_THAN_OR_EQUALS", + cxsdk.MetricThresholdConditionTypeLessThanOrEquals: "LESS_THAN_OR_EQUALS", + } + metricsThresholdConditionValues = GetValues(metricsThresholdConditionMap) + + metricsUnusualConditionMap = map[cxsdk.MetricUnusualConditionType]string{ + cxsdk.MetricUnusualConditionTypeMoreThanOrUnspecified: "MORE_THAN", + cxsdk.MetricUnusualConditionTypeLessThan: "LESS_THAN", + } + metricsUnusualConditionValues = GetValues(metricsUnusualConditionMap) ) func NewAlertResource() resource.Resource { @@ -277,14 +292,14 @@ type AlertResource struct { } type AlertResourceModel struct { - ID types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` - Description types.String `tfsdk:"description"` - Enabled types.Bool `tfsdk:"enabled"` - Priority types.String `tfsdk:"priority"` - Schedule types.Object `tfsdk:"schedule"` // AlertScheduleModel - Type types.Object `tfsdk:"type"` // AlertTypeDefinitionModel - + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Enabled types.Bool `tfsdk:"enabled"` + Priority types.String `tfsdk:"priority"` + Schedule types.Object `tfsdk:"schedule"` // AlertScheduleModel + TypeDefinition types.Object `tfsdk:"type_definition"` // AlertTypeDefinitionModel + Type types.String `tfsdk:"type"` // AlertType GroupBy types.Set `tfsdk:"group_by"` // []types.String IncidentsSettings types.Object `tfsdk:"incidents_settings"` // IncidentsSettingsModel NotificationGroup types.Object `tfsdk:"notification_group"` // NotificationGroupModel @@ -303,10 +318,11 @@ type AlertTypeDefinitionModel struct { LogsNewValue types.Object `tfsdk:"logs_new_value"` // LogsNewValueModel LogsUniqueCount types.Object `tfsdk:"logs_unique_count"` // LogsUniqueCountModel LogsTimeRelativeThreshold types.Object `tfsdk:"logs_time_relative_threshold"` // LogsTimeRelativeThresholdModel - LogsMetricThreshold types.Object `tfsdk:"logs_metric_threshold"` // LogsMetricThresholdModel - LogsMetricUnusual types.Object `tfsdk:"logs_metric_unusual"` // LogsMetricUnusualModel - LogsTracingImmediate types.Object `tfsdk:"logs_tracing_immediate"` // LogsTracingImmediateModel - LogsTracingThreshold types.Object `tfsdk:"logs_tracing_threshold"` // LogsTracingThresholdModel + MetricThreshold types.Object `tfsdk:"metric_threshold"` // MetricThresholdModel + MetricUnusual types.Object `tfsdk:"metric_unusual"` // MetricUnusualModel + TracingImmediate types.Object `tfsdk:"tracing_immediate"` // TracingImmediateModel + TracingThreshold types.Object `tfsdk:"tracing_threshold"` // TracingThresholdModel + Flow types.Object `tfsdk:"flow"` // FlowModel } type IncidentsSettingsModel struct { @@ -348,7 +364,6 @@ type RetriggeringPeriodModel struct { } // Alert Types: - type LogsImmediateModel struct { LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String @@ -378,7 +393,7 @@ type LogsRatioThresholdModel struct { } type LogsNewValueModel struct { - Rules types.List `tfsdk:"rules"` // []NewValueRulesModel + Rules types.List `tfsdk:"rules"` // []NewValueRuleModel LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String } @@ -418,15 +433,6 @@ type MetricRule struct { MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel } -// DELETEME -// type MetricLessThanModel struct { -// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel -// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel -// MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel -// Threshold types.Float64 `tfsdk:"threshold"` -// ForOverPct types.Int64 `tfsdk:"for_over_pct"` -// } - type MetricUnusualModel struct { MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel Rules types.List `tfsdk:"rules"` // []MetricRule @@ -471,32 +477,6 @@ type FlowStagesGroupsAlertDefsModel struct { Not types.Bool `tfsdk:"not"` } -// DELETEME -// type MetricLessThanUsualModel struct { -// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel -// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel -// Threshold types.Int64 `tfsdk:"threshold"` -// ForOverPct types.Int64 `tfsdk:"for_over_pct"` -// MinNonNullValuesPct types.Int64 `tfsdk:"min_non_null_values_pct"` -// } - -// type MetricMoreThanOrEqualsModel struct { -// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel -// Threshold types.Float64 `tfsdk:"threshold"` -// ForOverPct types.Int64 `tfsdk:"for_over_pct"` -// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel -// MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel -// } - -// type MetricLessThanOrEqualsModel struct { -// MetricFilter types.Object `tfsdk:"metric_filter"` // MetricFilterModel -// OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel -// MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel -// Threshold types.Float64 `tfsdk:"threshold"` -// ForOverPct types.Int64 `tfsdk:"for_over_pct"` // MetricMissingValuesModel -// UndetectedValuesManagement types.Object `tfsdk:"undetected_values_management"` // UndetectedValuesManagementModel -// } - type AlertsLogsFilterModel struct { SimpleFilter types.Object `tfsdk:"simple_filter"` // LuceneFilterModel } @@ -555,13 +535,13 @@ type MetricMissingValuesModel struct { MinNonNullValuesPct types.Int64 `tfsdk:"min_non_null_values_pct"` } -type NewValueRulesModel struct { +type NewValueRuleModel struct { TimeWindow types.Object `tfsdk:"time_window"` // LogsTimeWindowModel KeypathToTrack types.String `tfsdk:"keypath_to_track"` } type RuleModel struct { - CompareTo types.String `tfsdk:"compare_to"` + ComparedTo types.String `tfsdk:"compared_to"` Condition types.String `tfsdk:"condition"` Threshold types.Float64 `tfsdk:"threshold"` TimeWindow types.Object `tfsdk:"time_window"` // LogsTimeWindowModel @@ -667,7 +647,8 @@ func (r requiredWhenGroupBySet) ValidateInt64(ctx context.Context, req validator func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ - Version: 1, + Version: 2, + MarkdownDescription: "Coralogix Alert. For more info please review - https://coralogix.com/docs/getting-started-with-coralogix-alerts/.", Attributes: map[string]schema.Attribute{ "id": schema.StringAttribute{ Computed: true, @@ -723,6 +704,7 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp }, MarkdownDescription: "Alert schedule. Will be activated all the time if not specified.", }, + // type is being inferred by the type_definition attribute "type_definition": schema.SingleNestedAttribute{ Required: true, Attributes: map[string]schema.Attribute{ @@ -733,6 +715,7 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp "notification_payload_filter": notificationPayloadFilterSchema(), }, Validators: []validator.Object{ + objectvalidator.ConflictsWith(path.MatchRoot("group_by")), objectvalidator.ExactlyOneOf( path.MatchRelative().AtParent().AtName("logs_threshold"), path.MatchRelative().AtParent().AtName("logs_unusual"), @@ -752,278 +735,243 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp Optional: true, Attributes: map[string]schema.Attribute{ "rules": schema.ListNestedAttribute{ - Required: true, - Validators: []validator.List{listvalidator.LengthAtLeast(1)}, - Attributes: map[string]schema.Attribute{ - "threshold": schema.Float64Attribute{ - Required: true, + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "threshold": schema.Float64Attribute{ + Required: true, + }, + "time_window": logsTimeWindowSchema(), + "condition": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(logsThresholdConditionValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", logsThresholdConditionValues), + }, }, - "time_window": logsTimeWindowSchema(), - "condition": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf(logsThresholdConditionValues...), + }, + }, + "notification_payload_filter": notificationPayloadFilterSchema(), + "logs_filter": logsFilterSchema(), + }, + }, + "logs_unusual": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "logs_filter": logsFilterSchema(), + "notification_payload_filter": notificationPayloadFilterSchema(), + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "threshold": schema.Float64Attribute{ + Required: true, + }, + "time_window": logsTimeWindowSchema(), + "minimum_threshold": schema.Float64Attribute{ + Required: true, }, - MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", logsThresholdConditionValues), }, + // Condition type is missing since there is only a single type to be filled in }, + }, + }, + }, + "logs_ratio_threshold": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "numerator": logsFilterSchema(), + "numerator_alias": schema.StringAttribute{ + Required: true, + }, + "denominator": logsFilterSchema(), + "denominator_alias": schema.StringAttribute{ + Required: true, + }, + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "threshold": schema.Float64Attribute{ + Required: true, + }, + "time_window": logsRatioTimeWindowSchema(), + "ignore_infinity": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + }, + }, + }, + "notification_payload_filter": notificationPayloadFilterSchema(), + "group_by_for": logsRatioGroupByForSchema(), + }, + }, + "logs_new_value": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "keypath_to_track": schema.StringAttribute{Required: true}, + "time_window": logsNewValueTimeWindowSchema(), + }, + }, + }, + "logs_filter": logsFilterSchema(), "notification_payload_filter": notificationPayloadFilterSchema(), + }, + Validators: []validator.Object{ + objectvalidator.ConflictsWith(path.MatchRoot("group_by")), + }, + }, + "logs_unique_count": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ "logs_filter": logsFilterSchema(), - // "evaluation_window": schema.StringAttribute{ - // Optional: true, - // Computed: true, - // Default: stringdefault.StaticString("Rolling"), - // Validators: []validator.String{ - // stringvalidator.OneOf(validEvaluationWindowTypes...), - // }, - // MarkdownDescription: fmt.Sprintf("Evaluation window type. Valid values: %q.", validEvaluationWindowTypes), - // }, + "notification_payload_filter": notificationPayloadFilterSchema(), + "time_window": logsUniqueCountTimeWindowSchema(), + "unique_count_keypath": schema.StringAttribute{Required: true}, + "max_unique_count": schema.Int64Attribute{Required: true}, + "max_unique_count_per_group_by_key": schema.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.AlsoRequires(path.MatchRoot("group_by")), + requiredWhenGroupBySet{}, + }, + }, + }, + }, + "logs_time_relative": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "logs_filter": logsFilterSchema(), + "notification_payload_filter": notificationPayloadFilterSchema(), + "undetected_values_management": undetectedValuesManagementSchema(), + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "threshold": schema.Float64Attribute{ + Required: true, + }, + "ignore_infinity": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + "compared_to": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsTimeRelativeComparedTo...), + }, + MarkdownDescription: fmt.Sprintf("Compared to a different time frame. Valid values: %q.", validLogsTimeRelativeComparedTo), + }, + }, + }, + }, }, }, - // "logs_less_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "logs_filter": logsFilterSchema(), - // "notification_payload_filter": notificationPayloadFilterSchema(), - // "time_window": logsTimeWindowSchema(), - // "threshold": schema.Int64Attribute{ - // Required: true, - // }, - // "undetected_values_management": undetectedValuesManagementSchema(), - // }, - // }, - // "logs_more_than_usual": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "minimum_threshold": schema.Int64Attribute{ - // Required: true, - // }, - // "time_window": logsTimeWindowSchema(), - // "logs_filter": logsFilterSchema(), - // "notification_payload_filter": notificationPayloadFilterSchema(), - // }, - // }, - // "logs_ratio_more_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "numerator_logs_filter": logsFilterSchema(), - // "numerator_alias": schema.StringAttribute{ - // Required: true, - // }, - // "denominator_logs_filter": logsFilterSchema(), - // "denominator_alias": schema.StringAttribute{ - // Required: true, - // }, - // "threshold": schema.Int64Attribute{ - // Required: true, - // }, - // "time_window": logsRatioTimeWindowSchema(), - // "ignore_infinity": schema.BoolAttribute{ - // Optional: true, - // Computed: true, - // Default: booldefault.StaticBool(false), - // }, - // "notification_payload_filter": notificationPayloadFilterSchema(), - // "group_by_for": logsRatioGroupByForSchema(), - // }, - // }, - // "logs_ratio_less_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "numerator_logs_filter": logsFilterSchema(), - // "numerator_alias": schema.StringAttribute{ - // Required: true, - // }, - // "denominator_logs_filter": logsFilterSchema(), - // "denominator_alias": schema.StringAttribute{ - // Required: true, - // }, - // "threshold": schema.Int64Attribute{ - // Required: true, - // }, - // "time_window": logsRatioTimeWindowSchema(), - // "ignore_infinity": schema.BoolAttribute{ - // Optional: true, - // Computed: true, - // Default: booldefault.StaticBool(false), - // }, - // "notification_payload_filter": notificationPayloadFilterSchema(), - // "group_by_for": logsRatioGroupByForSchema(), - // "undetected_values_management": undetectedValuesManagementSchema(), - // }, - // }, - // "logs_new_value": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "logs_filter": logsFilterSchema(), - // "keypath_to_track": schema.StringAttribute{Required: true}, - // "time_window": logsNewValueTimeWindowSchema(), - // "notification_payload_filter": notificationPayloadFilterSchema(), - // }, - // Validators: []validator.Object{ - // objectvalidator.ConflictsWith(path.MatchRoot("group_by")), - // }, - // }, - // "logs_unique_count": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "logs_filter": logsFilterSchema(), - // "notification_payload_filter": notificationPayloadFilterSchema(), - // "time_window": logsUniqueCountTimeWindowSchema(), - // "unique_count_keypath": schema.StringAttribute{Required: true}, - // "max_unique_count": schema.Int64Attribute{Required: true}, - // "max_unique_count_per_group_by_key": schema.Int64Attribute{ - // Optional: true, - // Validators: []validator.Int64{ - // int64validator.AlsoRequires(path.MatchRoot("group_by")), - // requiredWhenGroupBySet{}, - // }, - // }, - // }, - // }, - // "logs_time_relative_more_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "logs_filter": logsFilterSchema(), - // "notification_payload_filter": notificationPayloadFilterSchema(), - // "threshold": schema.Int64Attribute{Required: true}, - // "compared_to": timeRelativeCompareTo(), - // "ignore_infinity": schema.BoolAttribute{ - // Optional: true, - // Computed: true, - // Default: booldefault.StaticBool(false), - // }, - // }, - // }, - // "logs_time_relative_less_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "logs_filter": logsFilterSchema(), - // "threshold": schema.Int64Attribute{Required: true}, - // "notification_payload_filter": notificationPayloadFilterSchema(), - // "compared_to": schema.StringAttribute{ - // Required: true, - // Validators: []validator.String{ - // stringvalidator.OneOf(validLogsTimeRelativeComparedTo...), - // }, - // MarkdownDescription: fmt.Sprintf("Compared to. Valid values: %q.", validLogsTimeRelativeComparedTo), - // }, - // "ignore_infinity": schema.BoolAttribute{ - // Optional: true, - // Computed: true, - // Default: booldefault.StaticBool(false), - // }, - // "undetected_values_management": undetectedValuesManagementSchema(), - // }, - // }, - // "metric_more_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "metric_filter": metricFilterSchema(), - // "threshold": schema.Float64Attribute{ - // Required: true, - // }, - // "for_over_pct": schema.Int64Attribute{ - // Required: true, - // }, - // "of_the_last": metricTimeWindowSchema(), - // "missing_values": missingValuesSchema(), - // }, - // }, - // "metric_less_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "metric_filter": metricFilterSchema(), - // "threshold": schema.Float64Attribute{ - // Required: true, - // }, - // "for_over_pct": schema.Int64Attribute{ - // Required: true, - // }, - // "of_the_last": metricTimeWindowSchema(), - // "missing_values": missingValuesSchema(), - // "undetected_values_management": undetectedValuesManagementSchema(), - // }, - // }, - // "metric_less_than_usual": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "metric_filter": metricFilterSchema(), - // "of_the_last": metricTimeWindowSchema(), - // "threshold": schema.Int64Attribute{ - // Required: true, - // }, - // "for_over_pct": schema.Int64Attribute{ - // Required: true, - // }, - // "min_non_null_values_pct": schema.Int64Attribute{ - // Required: true, - // }, - // }, - // }, - // "metric_more_than_usual": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "metric_filter": metricFilterSchema(), - // "of_the_last": metricTimeWindowSchema(), - // "threshold": schema.Int64Attribute{ - // Required: true, - // }, - // "for_over_pct": schema.Int64Attribute{ - // Required: true, - // }, - // "min_non_null_values_pct": schema.Int64Attribute{ - // Required: true, - // }, - // }, - // }, - // "metric_more_than_or_equals": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "metric_filter": metricFilterSchema(), - // "threshold": schema.Float64Attribute{ - // Required: true, - // }, - // "for_over_pct": schema.Int64Attribute{ - // Required: true, - // }, - // "of_the_last": metricTimeWindowSchema(), - // "missing_values": missingValuesSchema(), - // }, - // }, - // "metric_less_than_or_equals": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "metric_filter": metricFilterSchema(), - // "threshold": schema.Float64Attribute{ - // Required: true, - // }, - // "for_over_pct": schema.Int64Attribute{ - // Required: true, - // }, - // "of_the_last": metricTimeWindowSchema(), - // "missing_values": missingValuesSchema(), - // "undetected_values_management": undetectedValuesManagementSchema(), - // }, - // }, - // "tracing_immediate": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "tracing_filter": tracingQuerySchema(), - // "notification_payload_filter": notificationPayloadFilterSchema(), - // }, - // }, - // "tracing_more_than": schema.SingleNestedAttribute{ - // Optional: true, - // Attributes: map[string]schema.Attribute{ - // "tracing_filter": tracingQuerySchema(), - // "notification_payload_filter": notificationPayloadFilterSchema(), - // "time_window": tracingTimeWindowSchema(), - // "span_amount": schema.Int64Attribute{ - // Required: true, - // }, - // }, - // }, + // Metrics + "metric_threshold": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "metric_filter": metricFilterSchema(), + "undetected_values_management": undetectedValuesManagementSchema(), + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "threshold": schema.Float64Attribute{ + Required: true, + }, + "for_over_pct": schema.Int64Attribute{ + Required: true, + }, + "of_the_last": metricTimeWindowSchema(), + "missing_values": missingValuesSchema(), + "condition": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(metricsThresholdConditionMap...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", metricsThresholdConditionMap), + }, + }, + }, + }, + }, + }, + "metric_unusual": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "metric_filter": metricFilterSchema(), + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "threshold": schema.Float64Attribute{ + Required: true, + }, + "of_the_last": metricTimeWindowSchema(), + + "for_over_pct": schema.Int64Attribute{ + Required: true, + }, + "min_non_null_values_pct": schema.Int64Attribute{ + Required: true, + }, + "condition": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(metricsUnusualConditionValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", metricsUnusualConditionValues), + }, + }, + }, + }, + }, + }, + // Tracing + "tracing_immediate": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "tracing_filter": tracingQuerySchema(), + "notification_payload_filter": notificationPayloadFilterSchema(), + }, + }, + "tracing_threshold": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "tracing_filter": tracingQuerySchema(), + "notification_payload_filter": notificationPayloadFilterSchema(), + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "span_amount": schema.Float64Attribute{ + Required: true, + }, + "time_window": tracingTimeWindowSchema(), + }, + // Condition type is missing since there is only a single type to be filled in + }, + }, + }, + }, + // Flow "flow": schema.SingleNestedAttribute{ Optional: true, Attributes: map[string]schema.Attribute{ @@ -1087,9 +1035,17 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp Default: booldefault.StaticBool(false), }, }, + MarkdownDescription: "Alert type definition. Exactly one of the following must be specified: logs_immediate, logs_threshold, logs_unusual, logs_ratio_threshold, logs_new_value, logs_unique_count, logs_time_relative_threshold, metric_threshold, metric_unusual, tracing_immediate, tracing_threshold flow.", }, }, - MarkdownDescription: "Alert type definition. Exactly one of the following must be specified: logs_immediate, logs_more_than, logs_less_than, logs_more_than_usual, logs_ratio_more_than, logs_ratio_less_than, logs_new_value, logs_unique_count, logs_time_relative_more_than, logs_time_relative_less_than, metric_more_than, metric_less_than, metric_more_than_usual, metric_less_than_usual, metric_less_than_or_equals, metric_more_than_or_equals, tracing_immediate, tracing_more_than, flow.", + }, + "phantom_mode": schema.BoolAttribute{ + Optional: true, + Computed: true, + Default: booldefault.StaticBool(false), + }, + "deleted": schema.BoolAttribute{ + Computed: true, }, "group_by": schema.SetAttribute{ Optional: true, @@ -1208,7 +1164,6 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp ElementType: types.StringType, }, }, - MarkdownDescription: "Coralogix Alert. For more info please review - https://coralogix.com/docs/getting-started-with-coralogix-alerts/.", } } @@ -1374,7 +1329,7 @@ func logsFilterSchema() schema.SingleNestedAttribute { objectplanmodifier.UseStateForUnknown(), }, Attributes: map[string]schema.Attribute{ - "lucene_filter": schema.SingleNestedAttribute{ + "simple_filter": schema.SingleNestedAttribute{ Optional: true, Computed: true, PlanModifiers: []planmodifier.Object{ @@ -1975,26 +1930,28 @@ func expandAlertsTypeDefinition(ctx context.Context, alertProperties *cxsdk.Aler } var diags diag.Diagnostics - if logsImmediate := alertDefinitionModel.LogsImmediate; !(logsImmediate.IsNull() || logsImmediate.IsUnknown()) { + + if logsImmediate := alertDefinitionModel.LogsImmediate; !objIsNullOrUnknown(logsImmediate) { + // LogsImmediate alertProperties, diags = expandLogsImmediateAlertTypeDefinition(ctx, alertProperties, logsImmediate) - } else if logsMoreThan := alertDefinitionModel.LogsMoreThan; !(logsMoreThan.IsNull() || logsMoreThan.IsUnknown()) { - alertProperties, diags = expandLogsMoreThanAlertTypeDefinition(ctx, alertProperties, logsMoreThan) - } else if logsLessThan := alertDefinitionModel.LogsLessThan; !(logsLessThan.IsNull() || logsLessThan.IsUnknown()) { - alertProperties, diags = expandLogsLessThanAlertTypeDefinition(ctx, alertProperties, logsLessThan) - } else if logsMoreThanUsual := alertDefinitionModel.LogsMoreThanUsual; !(logsMoreThanUsual.IsNull() || logsMoreThanUsual.IsUnknown()) { - alertProperties, diags = expandLogsMoreThanUsualAlertTypeDefinition(ctx, alertProperties, logsMoreThanUsual) - } else if logsRatioMoreThan := alertDefinitionModel.LogsRatioMoreThan; !(logsRatioMoreThan.IsNull() || logsRatioMoreThan.IsUnknown()) { - alertProperties, diags = expandLogsRatioMoreThanAlertTypeDefinition(ctx, alertProperties, logsRatioMoreThan) - } else if logsRatioLessThan := alertDefinitionModel.LogsRatioLessThan; !(logsRatioLessThan.IsNull() || logsRatioLessThan.IsUnknown()) { - alertProperties, diags = expandLogsRatioLessThanAlertTypeDefinition(ctx, alertProperties, logsRatioLessThan) - } else if logsNewValue := alertDefinitionModel.LogsNewValue; !(logsNewValue.IsNull() || logsNewValue.IsUnknown()) { + } else if logsThreshold := alertDefinitionModel.LogsThreshold; !objIsNullOrUnknown(logsThreshold) { + // LogsThreshold + alertProperties, diags = expandLogsThresholdTypeDefinition(ctx, alertProperties, logsThreshold) + } else if logsUnusual := alertDefinitionModel.LogsUnusual; !objIsNullOrUnknown(logsUnusual) { + // LogsUnusual + alertProperties, diags = expandLogsUnusualAlertTypeDefinition(ctx, alertProperties, logsUnusual) + } else if logsRatioThreshold := alertDefinitionModel.LogsRatioThreshold; !objIsNullOrUnknown(logsRatioThreshold) { + // LogsRatioThreshold + alertProperties, diags = expandLogsRatioThresholdTypeDefinition(ctx, alertProperties, logsRatioThreshold) + } else if logsNewValue := alertDefinitionModel.LogsNewValue; !objIsNullOrUnknown(logsNewValue) { + // LogsNewValue alertProperties, diags = expandLogsNewValueAlertTypeDefinition(ctx, alertProperties, logsNewValue) - } else if logsUniqueCount := alertDefinitionModel.LogsUniqueCount; !(logsUniqueCount.IsNull() || logsUniqueCount.IsUnknown()) { + } else if logsUniqueCount := alertDefinitionModel.LogsUniqueCount; !objIsNullOrUnknown(logsUniqueCount) { + // LogsUniqueCount alertProperties, diags = expandLogsUniqueCountAlertTypeDefinition(ctx, alertProperties, logsUniqueCount) - } else if logsTimeRelativeMoreThan := alertDefinitionModel.LogsTimeRelativeMoreThan; !(logsTimeRelativeMoreThan.IsNull() || logsTimeRelativeMoreThan.IsUnknown()) { - alertProperties, diags = expandLogsTimeRelativeMoreThanAlertTypeDefinition(ctx, alertProperties, logsTimeRelativeMoreThan) - } else if logsTimeRelativeLessThan := alertDefinitionModel.LogsTimeRelativeLessThan; !(logsTimeRelativeLessThan.IsNull() || logsTimeRelativeLessThan.IsUnknown()) { - alertProperties, diags = expandLogsTimeRelativeLessThanAlertTypeDefinition(ctx, alertProperties, logsTimeRelativeLessThan) + } else if logsTimeRelativeThreshold := alertDefinitionModel.LogsTimeRelativeThreshold; !objIsNullOrUnknown(logsTimeRelativeThreshold) { + // LogsTimeRelativeThreshold + alertProperties, diags = expandLogsTimeRelativeThresholdAlertTypeDefinition(ctx, alertProperties, logsTimeRelativeThreshold) } else if metricMoreThan := alertDefinitionModel.MetricMoreThan; !(metricMoreThan.IsNull() || metricMoreThan.IsUnknown()) { alertProperties, diags = expandMetricMoreThanAlertTypeDefinition(ctx, alertProperties, metricMoreThan) } else if metricLessThan := alertDefinitionModel.MetricLessThan; !(metricLessThan.IsNull() || metricLessThan.IsUnknown()) { @@ -2066,8 +2023,8 @@ func extractLogsFilter(ctx context.Context, filter types.Object) (*cxsdk.LogsFil logsFilter := &cxsdk.LogsFilter{} var diags diag.Diagnostics - if !(filterModel.LuceneFilter.IsNull() || filterModel.LuceneFilter.IsUnknown()) { - logsFilter.FilterType, diags = extractLuceneFilter(ctx, filterModel.LuceneFilter) + if !(filterModel.SimpleFilter.IsNull() || filterModel.SimpleFilter.IsUnknown()) { + logsFilter.FilterType, diags = extractLuceneFilter(ctx, filterModel.SimpleFilter) } if diags.HasError() { @@ -2077,12 +2034,12 @@ func extractLogsFilter(ctx context.Context, filter types.Object) (*cxsdk.LogsFil return logsFilter, nil } -func extractLuceneFilter(ctx context.Context, luceneFilter types.Object) (*cxsdk.LogsFilterLuceneFilter, diag.Diagnostics) { +func extractLuceneFilter(ctx context.Context, luceneFilter types.Object) (*cxsdk.LogsFilterSimpleFilter, diag.Diagnostics) { if luceneFilter.IsNull() || luceneFilter.IsUnknown() { return nil, nil } - var luceneFilterModel LuceneFilterModel + var luceneFilterModel SimpleFilterModel if diags := luceneFilter.As(ctx, &luceneFilterModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } @@ -2179,45 +2136,38 @@ func extractLogSeverities(ctx context.Context, elements []attr.Value) ([]cxsdk.L return result, diags } -func expandLogsMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, moreThanObject types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if moreThanObject.IsNull() || moreThanObject.IsUnknown() { +func expandLogsThresholdTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, thresholdObject types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if thresholdObject.IsNull() || thresholdObject.IsUnknown() { return properties, nil } - var moreThanModel LogsMoreThanModel - if diags := moreThanObject.As(ctx, &moreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + var thresholdModel LogsThresholdModel + if diags := thresholdObject.As(ctx, &thresholdModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - logsFilter, diags := extractLogsFilter(ctx, moreThanModel.LogsFilter) + logsFilter, diags := extractLogsFilter(ctx, thresholdModel.LogsFilter) if diags.HasError() { return nil, diags } - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, moreThanModel.NotificationPayloadFilter.Elements()) + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, thresholdModel.NotificationPayloadFilter.Elements()) if diags.HasError() { return nil, diags } - timeWindow, diags := extractLogsTimeWindow(ctx, moreThanModel.TimeWindow) + rules, diags := extractThresholdRules(ctx, thresholdModel.Rules) if diags.HasError() { return nil, diags } - properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsThreshold{ LogsThreshold: &cxsdk.LogsThresholdType{ - LogsFilter: logsFilter, - Rules: []*cxsdk.LogsThresholdRule{ - {Condition: &cxsdk.LogsThresholdCondition{ - Threshold: typeFloat64ToWrapperspbDouble(moreThanModel.Threshold), - TimeWindow: timeWindow, - ConditionType: cxsdk.LogsThresholdConditionTypeMoreThanOrUnspecified, - // EvaluationWindow: evaluationWindowTypeSchemaToProtoMap[moreThanModel.EvaluationWindow.ValueString()], - }}, - }, + LogsFilter: logsFilter, + Rules: rules, NotificationPayloadFilter: notificationPayloadFilter, }, } + properties.Type = cxsdk.AlertDefTypeLogsThreshold return properties, nil } @@ -2243,47 +2193,34 @@ func extractLogsTimeWindow(ctx context.Context, timeWindow types.Object) (*cxsdk return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} } -func expandLogsLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, lessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if lessThan.IsNull() || lessThan.IsUnknown() { - return properties, nil - } - - var lessThanModel LogsLessThanModel - if diags := lessThan.As(ctx, &lessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - logsFilter, diags := extractLogsFilter(ctx, lessThanModel.LogsFilter) - if diags.HasError() { - return nil, diags - } - - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, lessThanModel.NotificationPayloadFilter.Elements()) - if diags.HasError() { - return nil, diags - } - - timeWindow, diags := extractLogsTimeWindow(ctx, lessThanModel.TimeWindow) - if diags.HasError() { - return nil, diags +func extractThresholdRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsThresholdRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.LogsThresholdRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule RuleModel + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + timeWindow, dg := extractLogsTimeWindow(ctx, rule.TimeWindow) + if dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.LogsThresholdRule{ + Condition: &cxsdk.LogsThresholdCondition{ + Threshold: typeFloat64ToWrapperspbDouble(rule.Threshold), + TimeWindow: timeWindow, + ConditionType: logsThresholdConditionToProtoMap[rule.Condition.ValueString()], + }, + } } - - undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, lessThanModel.UndetectedValuesManagement) if diags.HasError() { return nil, diags } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsLessThan{ - LogsLessThan: &cxsdk.LogsLessThanTypeDefinition{ - LogsFilter: logsFilter, - Threshold: typeInt64ToWrappedUint32(lessThanModel.Threshold), - TimeWindow: timeWindow, - UndetectedValuesManagement: undetectedValuesManagement, - NotificationPayloadFilter: notificationPayloadFilter, - }, - } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_LESS_THAN - return properties, nil + return rules, nil } func extractUndetectedValuesManagement(ctx context.Context, management types.Object) (*cxsdk.UndetectedValuesManagement, diag.Diagnostics) { @@ -2308,97 +2245,145 @@ func extractUndetectedValuesManagement(ctx context.Context, management types.Obj }, nil } -func expandLogsMoreThanUsualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, moreThanUsual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if moreThanUsual.IsNull() || moreThanUsual.IsUnknown() { +func expandLogsUnusualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, unusual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if unusual.IsNull() || unusual.IsUnknown() { return properties, nil } - var moreThanUsualModel LogsMoreThanUsualModel - if diags := moreThanUsual.As(ctx, &moreThanUsualModel, basetypes.ObjectAsOptions{}); diags.HasError() { + var unusualModel LogsUnusualModel + if diags := unusual.As(ctx, &unusualModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - logsFilter, diags := extractLogsFilter(ctx, moreThanUsualModel.LogsFilter) + logsFilter, diags := extractLogsFilter(ctx, unusualModel.LogsFilter) if diags.HasError() { return nil, diags } - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, moreThanUsualModel.NotificationPayloadFilter.Elements()) + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, unusualModel.NotificationPayloadFilter.Elements()) if diags.HasError() { return nil, diags } - timeWindow, diags := extractLogsTimeWindow(ctx, moreThanUsualModel.TimeWindow) + rules, diags := extractUnusualRules(ctx, unusualModel.Rules) if diags.HasError() { return nil, diags } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsMoreThanUsual{ - LogsMoreThanUsual: &cxsdk.LogsMoreThanUsualTypeDefinition{ + properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsUnusual{ + LogsUnusual: &cxsdk.LogsUnusualType{ LogsFilter: logsFilter, - MinimumThreshold: typeInt64ToWrappedUint32(moreThanUsualModel.MinimumThreshold), - TimeWindow: timeWindow, + Rules: rules, NotificationPayloadFilter: notificationPayloadFilter, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_MORE_THAN_USUAL + + properties.Type = cxsdk.AlertDefTypeLogsUnusual return properties, nil } -func expandLogsRatioMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, moreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if moreThan.IsNull() || moreThan.IsUnknown() { - return properties, nil +func extractUnusualRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsUnusualRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.LogsUnusualRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule RuleModel + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + timeWindow, dg := extractLogsTimeWindow(ctx, rule.TimeWindow) + if dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.LogsUnusualRule{ + Condition: &cxsdk.LogsUnusualCondition{ + MinimumThreshold: typeFloat64ToWrapperspbDouble(rule.Threshold), + TimeWindow: timeWindow, + ConditionType: cxsdk.LogsUnusualConditionTypeMoreThanOrUnspecified, + }, + } } - - var moreThanModel LogsRatioMoreThanModel - if diags := moreThan.As(ctx, &moreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + if diags.HasError() { return nil, diags } + return rules, nil +} - numeratorLogsFilter, diags := extractLogsFilter(ctx, moreThanModel.NumeratorLogsFilter) - if diags.HasError() { +func expandLogsRatioThresholdTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, ratioThreshold types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if ratioThreshold.IsNull() || ratioThreshold.IsUnknown() { + return properties, nil + } + + var ratioThresholdModel LogsRatioThresholdModel + if diags := ratioThreshold.As(ctx, &ratioThresholdModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - denominatorLogsFilter, diags := extractLogsFilter(ctx, moreThanModel.DenominatorLogsFilter) + numeratorLogsFilter, diags := extractLogsFilter(ctx, ratioThresholdModel.Numerator) if diags.HasError() { return nil, diags } - timeWindow, diags := extractLogsRatioTimeWindow(ctx, moreThanModel.TimeWindow) + denominatorLogsFilter, diags := extractLogsFilter(ctx, ratioThresholdModel.Denominator) if diags.HasError() { return nil, diags } - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, moreThanModel.NotificationPayloadFilter.Elements()) + rules, diags := extractRatioRules(ctx, ratioThresholdModel.Rules) + + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, ratioThresholdModel.NotificationPayloadFilter.Elements()) if diags.HasError() { return nil, diags } properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsRatioThreshold{ LogsRatioThreshold: &cxsdk.LogsRatioThresholdType{ - Numerator: numeratorLogsFilter, - NumeratorAlias: typeStringToWrapperspbString(moreThanModel.NumeratorAlias), - Denominator: denominatorLogsFilter, - DenominatorAlias: typeStringToWrapperspbString(moreThanModel.DenominatorAlias), - Rules: []*cxsdk.LogsRatioRules{ - { - Condition: &cxsdk.LogsRatioCondition{ - Threshold: typeFloat64ToWrapperspbDouble(moreThanModel.Threshold), - TimeWindow: timeWindow, - IgnoreInfinity: typeBoolToWrapperspbBool(moreThanModel.IgnoreInfinity), - ConditionType: cxsdk.LogsRatioConditionTypeMoreThanOrUnspecified, - }, - }, - }, + Numerator: numeratorLogsFilter, + NumeratorAlias: typeStringToWrapperspbString(ratioThresholdModel.NumeratorAlias), + Denominator: denominatorLogsFilter, + DenominatorAlias: typeStringToWrapperspbString(ratioThresholdModel.DenominatorAlias), + Rules: rules, NotificationPayloadFilter: notificationPayloadFilter, - GroupByFor: logsRatioGroupByForSchemaToProtoMap[moreThanModel.GroupByFor.ValueString()], + GroupByFor: logsRatioGroupByForSchemaToProtoMap[ratioThresholdModel.GroupByFor.ValueString()], }, } properties.Type = cxsdk.AlertDefTypeLogsRatioThreshold return properties, nil } +func extractRatioRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsRatioRules, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.LogsRatioRules, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule RuleModel + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + timeWindow, dg := extractLogsRatioTimeWindow(ctx, rule.TimeWindow) + if dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.LogsRatioRules{ + Condition: &cxsdk.LogsRatioCondition{ + Threshold: typeFloat64ToWrapperspbDouble(rule.Threshold), + TimeWindow: timeWindow, + IgnoreInfinity: typeBoolToWrapperspbBool(rule.IgnoreInfinity), + ConditionType: logsRatioConditionSchemaToProtoMap[rule.Condition.ValueString()], + }, + } + } + if diags.HasError() { + return nil, diags + } + return rules, nil +} + func extractLogsRatioTimeWindow(ctx context.Context, window types.Object) (*cxsdk.LogsRatioTimeWindow, diag.Diagnostics) { if window.IsNull() || window.IsUnknown() { return nil, nil @@ -2420,59 +2405,6 @@ func extractLogsRatioTimeWindow(ctx context.Context, window types.Object) (*cxsd return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} } -func expandLogsRatioLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, ratioLessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if ratioLessThan.IsNull() || ratioLessThan.IsUnknown() { - return properties, nil - } - - var ratioLessThanModel LogsRatioLessThanModel - if diags := ratioLessThan.As(ctx, &ratioLessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - numeratorLogsFilter, diags := extractLogsFilter(ctx, ratioLessThanModel.NumeratorLogsFilter) - if diags.HasError() { - return nil, diags - } - - denominatorLogsFilter, diags := extractLogsFilter(ctx, ratioLessThanModel.DenominatorLogsFilter) - if diags.HasError() { - return nil, diags - } - - timeWindow, diags := extractLogsRatioTimeWindow(ctx, ratioLessThanModel.TimeWindow) - if diags.HasError() { - return nil, diags - } - - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, ratioLessThanModel.NotificationPayloadFilter.Elements()) - if diags.HasError() { - return nil, diags - } - - undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, ratioLessThanModel.UndetectedValuesManagement) - if diags.HasError() { - return nil, diags - } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsRatioLessThan{ - LogsRatioLessThan: &cxsdk.LogsRatioLessThanTypeDefinition{ - NumeratorLogsFilter: numeratorLogsFilter, - NumeratorAlias: typeStringToWrapperspbString(ratioLessThanModel.NumeratorAlias), - DenominatorLogsFilter: denominatorLogsFilter, - DenominatorAlias: typeStringToWrapperspbString(ratioLessThanModel.DenominatorAlias), - Threshold: typeInt64ToWrappedUint32(ratioLessThanModel.Threshold), - TimeWindow: timeWindow, - IgnoreInfinity: typeBoolToWrapperspbBool(ratioLessThanModel.IgnoreInfinity), - NotificationPayloadFilter: notificationPayloadFilter, - GroupByFor: logsRatioGroupByForSchemaToProtoMap[ratioLessThanModel.GroupByFor.ValueString()], - UndetectedValuesManagement: undetectedValuesManagement, - }, - } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_RATIO_LESS_THAN - return properties, nil -} - func expandLogsNewValueAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, newValue types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { if newValue.IsNull() || newValue.IsUnknown() { return properties, nil @@ -2493,20 +2425,18 @@ func expandLogsNewValueAlertTypeDefinition(ctx context.Context, properties *cxsd return nil, diags } - timeWindow, diags := extractLogsNewValueTimeWindow(ctx, newValueModel.TimeWindow) + rules, diags := extractNewValueRules(ctx, newValueModel.Rules) if diags.HasError() { return nil, diags } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsNewValue{ - LogsNewValue: &cxsdk.LogsNewValueTypeDefinition{ + properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsNewValue{ + LogsNewValue: &cxsdk.LogsNewValueType{ LogsFilter: logsFilter, - KeypathToTrack: typeStringToWrapperspbString(newValueModel.KeypathToTrack), - TimeWindow: timeWindow, + Rules: rules, NotificationPayloadFilter: notificationPayloadFilter, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_NEW_VALUE + properties.Type = cxsdk.AlertDefTypeLogsNewValue return properties, nil } @@ -2522,7 +2452,7 @@ func extractLogsNewValueTimeWindow(ctx context.Context, window types.Object) (*c if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { return &cxsdk.LogsNewValueTimeWindow{ - Type: &cxsdk.LogsNewValueTimeWindow_LogsNewValueTimeWindowSpecificValue{ + Type: &cxsdk.LogsNewValueTimeWindowSpecificValue{ LogsNewValueTimeWindowSpecificValue: logsNewValueTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], }, }, nil @@ -2532,6 +2462,36 @@ func extractLogsNewValueTimeWindow(ctx context.Context, window types.Object) (*c } +func extractNewValueRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsNewValueRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.LogsNewValueRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule NewValueRuleModel + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + + timeWindow, dg := extractLogsNewValueTimeWindow(ctx, rule.TimeWindow) + if dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.LogsNewValueRule{ + Condition: &cxsdk.LogsNewValueCondition{ + KeypathToTrack: typeStringToWrapperspbString(rule.KeypathToTrack), + TimeWindow: timeWindow, + }, + } + } + if diags.HasError() { + return nil, diags + } + return rules, nil +} + func expandLogsUniqueCountAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, uniqueCount types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { if uniqueCount.IsNull() || uniqueCount.IsUnknown() { return properties, nil @@ -2593,76 +2553,63 @@ func extractLogsUniqueCountTimeWindow(ctx context.Context, window types.Object) } -func expandLogsTimeRelativeMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, relativeMoreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if relativeMoreThan.IsNull() || relativeMoreThan.IsUnknown() { +func expandLogsTimeRelativeThresholdAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, relativeThreshold types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if relativeThreshold.IsNull() || relativeThreshold.IsUnknown() { return properties, nil } - var relativeMoreThanModel LogsTimeRelativeMoreThanModel - if diags := relativeMoreThan.As(ctx, &relativeMoreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + var relativeThresholdModel LogsTimeRelativeThresholdModel + if diags := relativeThreshold.As(ctx, &relativeThresholdModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - logsFilter, diags := extractLogsFilter(ctx, relativeMoreThanModel.LogsFilter) + logsFilter, diags := extractLogsFilter(ctx, relativeThresholdModel.LogsFilter) if diags.HasError() { return nil, diags } - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, relativeMoreThanModel.NotificationPayloadFilter.Elements()) + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, relativeThresholdModel.NotificationPayloadFilter.Elements()) if diags.HasError() { return nil, diags } - properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsTimeRelativeMoreThan{ - LogsTimeRelativeMoreThan: &cxsdk.LogsTimeRelativeMoreThanTypeDefinition{ + rules, diags := extractTimeRelativeThresholdRules(ctx, relativeThresholdModel.Rules) + + properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsTimeRelativeThreshold{ + LogsTimeRelativeThreshold: &cxsdk.LogsTimeRelativeThresholdType{ LogsFilter: logsFilter, - Threshold: typeInt64ToWrappedUint32(relativeMoreThanModel.Threshold), - ComparedTo: logsTimeRelativeComparedToSchemaToProtoMap[relativeMoreThanModel.ComparedTo.ValueString()], - IgnoreInfinity: typeBoolToWrapperspbBool(relativeMoreThanModel.IgnoreInfinity), + Rules: rules, NotificationPayloadFilter: notificationPayloadFilter, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_TIME_RELATIVE_MORE_THAN + properties.Type = cxsdk.AlertDefTypeLogsTimeRelativeThreshold return properties, nil } -func expandLogsTimeRelativeLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, timeRelativeLessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if timeRelativeLessThan.IsNull() || timeRelativeLessThan.IsUnknown() { - return properties, nil - } - - var timeRelativeLessThanModel LogsTimeRelativeLessThanModel - if diags := timeRelativeLessThan.As(ctx, &timeRelativeLessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - logsFilter, diags := extractLogsFilter(ctx, timeRelativeLessThanModel.LogsFilter) - if diags.HasError() { - return nil, diags - } - - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, timeRelativeLessThanModel.NotificationPayloadFilter.Elements()) - if diags.HasError() { - return nil, diags +func extractTimeRelativeThresholdRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsTimeRelativeRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.LogsTimeRelativeRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule RuleModel + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.LogsTimeRelativeRule{ + Condition: &cxsdk.LogsTimeRelativeCondition{ + Threshold: typeFloat64ToWrapperspbDouble(rule.Threshold), + ComparedTo: logsTimeRelativeComparedToSchemaToProtoMap[rule.ComparedTo.ValueString()], + IgnoreInfinity: typeBoolToWrapperspbBool(rule.IgnoreInfinity), + ConditionType: logsTimeRelativeConditionToProtoMap[rule.Condition.ValueString()], + }, + } } - - undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, timeRelativeLessThanModel.UndetectedValuesManagement) if diags.HasError() { return nil, diags } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsTimeRelativeLessThan{ - LogsTimeRelativeLessThan: &cxsdk.LogsTimeRelativeLessThanTypeDefinition{ - LogsFilter: logsFilter, - Threshold: typeInt64ToWrappedUint32(timeRelativeLessThanModel.Threshold), - ComparedTo: logsTimeRelativeComparedToSchemaToProtoMap[timeRelativeLessThanModel.ComparedTo.ValueString()], - IgnoreInfinity: typeBoolToWrapperspbBool(timeRelativeLessThanModel.IgnoreInfinity), - UndetectedValuesManagement: undetectedValuesManagement, - NotificationPayloadFilter: notificationPayloadFilter, - }, - } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_TIME_RELATIVE_LESS_THAN - return properties, nil + return rules, nil } func expandMetricMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricMoreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { @@ -3493,7 +3440,7 @@ func flattenAdvancedTargetSettings(ctx context.Context, advancedTargetSettings * func flattenRetriggeringPeriod(ctx context.Context, notifications *cxsdk.AlertDefAdvancedTargetSettings) (types.Object, diag.Diagnostics) { switch notificationPeriodType := notifications.RetriggeringPeriod.(type) { - case *cxsdk.AlertDefAdvancedTargetSettings_Minutes: + case *cxsdk.AlertDefAdvancedTargetSettingsMinutes: return types.ObjectValueFrom(ctx, retriggeringPeriodAttr(), RetriggeringPeriodModel{ Minutes: wrapperspbUint32ToTypeInt64(notificationPeriodType.Minutes), }) @@ -3565,65 +3512,45 @@ func flattenAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefP } alertTypeDefinitionModel := AlertTypeDefinitionModel{ - LogsImmediate: types.ObjectNull(logsImmediateAttr()), - LogsMoreThan: types.ObjectNull(logsMoreThanAttr()), - LogsLessThan: types.ObjectNull(logsLessThanAttr()), - LogsMoreThanUsual: types.ObjectNull(logsMoreThanUsualAttr()), - LogsRatioMoreThan: types.ObjectNull(logsRatioMoreThanAttr()), - LogsRatioLessThan: types.ObjectNull(logsRatioLessThanAttr()), - LogsNewValue: types.ObjectNull(logsNewValueAttr()), - LogsUniqueCount: types.ObjectNull(logsUniqueCountAttr()), - LogsTimeRelativeMoreThan: types.ObjectNull(logsTimeRelativeMoreThanAttr()), - LogsTimeRelativeLessThan: types.ObjectNull(logsTimeRelativeLessThanAttr()), - MetricMoreThan: types.ObjectNull(metricMoreThanAttr()), - MetricLessThan: types.ObjectNull(metricLessThanAttr()), - MetricMoreThanUsual: types.ObjectNull(metricMoreThanUsualAttr()), - MetricLessThanUsual: types.ObjectNull(metricLessThanUsualAttr()), - MetricLessThanOrEquals: types.ObjectNull(metricLessThanOrEqualsAttr()), - MetricMoreThanOrEquals: types.ObjectNull(metricMoreThanOrEqualsAttr()), - TracingImmediate: types.ObjectNull(tracingImmediateAttr()), - TracingMoreThan: types.ObjectNull(tracingMoreThanAttr()), - Flow: types.ObjectNull(flowAttr()), + + LogsImmediate: types.ObjectNull(logsImmediateAttr()), + LogsThreshold: types.ObjectNull(logsThresholdAttr()), + LogsUnusual: types.ObjectNull(logsUnusualAttr()), + LogsRatioThreshold: types.ObjectNull(logsRatioThresholdAttr()), + LogsNewValue: types.ObjectNull(logsNewValueAttr()), + LogsUniqueCount: types.ObjectNull(logsUniqueCountAttr()), + LogsTimeRelativeThreshold: types.ObjectNull(logsTimeRelativeThresholdAttr()), + MetricThreshold: types.ObjectNull(metricThresholdAttr()), + MetricUnusual: types.ObjectNull(metricUnusualAttr()), + TracingImmediate: types.ObjectNull(tracingImmediateAttr()), + TracingThreshold: types.ObjectNull(tracingThresholdAttr()), + Flow: types.ObjectNull(flowAttr()), } var diags diag.Diagnostics switch alertTypeDefinition := properties.TypeDefinition.(type) { - case *cxsdk.AlertDefProperties_LogsImmediate: + case *cxsdk.AlertDefPropertiesLogsImmediate: alertTypeDefinitionModel.LogsImmediate, diags = flattenLogsImmediate(ctx, alertTypeDefinition.LogsImmediate) - case *cxsdk.AlertDefProperties_LogsMoreThan: - alertTypeDefinitionModel.LogsMoreThan, diags = flattenLogsMoreThan(ctx, alertTypeDefinition.LogsMoreThan) - case *cxsdk.AlertDefProperties_LogsLessThan: - alertTypeDefinitionModel.LogsLessThan, diags = flattenLogsLessThan(ctx, alertTypeDefinition.LogsLessThan) - case *cxsdk.AlertDefProperties_LogsMoreThanUsual: - alertTypeDefinitionModel.LogsMoreThanUsual, diags = flattenLogsMoreThanUsual(ctx, alertTypeDefinition.LogsMoreThanUsual) - case *cxsdk.AlertDefProperties_LogsRatioMoreThan: - alertTypeDefinitionModel.LogsRatioMoreThan, diags = flattenLogsRatioMoreThan(ctx, alertTypeDefinition.LogsRatioMoreThan) - case *cxsdk.AlertDefProperties_LogsRatioLessThan: - alertTypeDefinitionModel.LogsRatioLessThan, diags = flattenLogsRatioLessThan(ctx, alertTypeDefinition.LogsRatioLessThan) - case *cxsdk.AlertDefProperties_LogsNewValue: + case *cxsdk.AlertDefPropertiesLogsThreshold: + alertTypeDefinitionModel.LogsThreshold, diags = flattenLogsThreshold(ctx, alertTypeDefinition.LogsThreshold) + case *cxsdk.AlertDefPropertiesLogsUnusual: + alertTypeDefinitionModel.LogsUnusual, diags = flattenLogsUnusual(ctx, alertTypeDefinition.LogsUnusual) + case *cxsdk.AlertDefPropertiesLogsRatioThreshold: + alertTypeDefinitionModel.LogsRatioThreshold, diags = flattenLogsRatioThreshold(ctx, alertTypeDefinition.LogsRatioThreshold) + case *cxsdk.AlertDefPropertiesLogsNewValue: alertTypeDefinitionModel.LogsNewValue, diags = flattenLogsNewValue(ctx, alertTypeDefinition.LogsNewValue) - case *cxsdk.AlertDefProperties_LogsUniqueCount: + case *cxsdk.AlertDefPropertiesLogsUniqueCount: alertTypeDefinitionModel.LogsUniqueCount, diags = flattenLogsUniqueCount(ctx, alertTypeDefinition.LogsUniqueCount) - case *cxsdk.AlertDefProperties_LogsTimeRelativeMoreThan: - alertTypeDefinitionModel.LogsTimeRelativeMoreThan, diags = flattenLogsTimeRelativeMoreThan(ctx, alertTypeDefinition.LogsTimeRelativeMoreThan) - case *cxsdk.AlertDefProperties_LogsTimeRelativeLessThan: - alertTypeDefinitionModel.LogsTimeRelativeLessThan, diags = flattenLogsTimeRelativeLessThan(ctx, alertTypeDefinition.LogsTimeRelativeLessThan) - case *cxsdk.AlertDefProperties_MetricMoreThan: - alertTypeDefinitionModel.MetricMoreThan, diags = flattenMetricMoreThan(ctx, alertTypeDefinition.MetricMoreThan) - case *cxsdk.AlertDefProperties_MetricLessThan: - alertTypeDefinitionModel.MetricLessThan, diags = flattenMetricLessThan(ctx, alertTypeDefinition.MetricLessThan) - case *cxsdk.AlertDefProperties_MetricMoreThanUsual: - alertTypeDefinitionModel.MetricMoreThanUsual, diags = flattenMetricMoreThanUsual(ctx, alertTypeDefinition.MetricMoreThanUsual) - case *cxsdk.AlertDefProperties_MetricLessThanUsual: - alertTypeDefinitionModel.MetricLessThanUsual, diags = flattenMetricLessThanUsual(ctx, alertTypeDefinition.MetricLessThanUsual) - case *cxsdk.AlertDefProperties_MetricLessThanOrEquals: - alertTypeDefinitionModel.MetricLessThanOrEquals, diags = flattenMetricLessThanOrEquals(ctx, alertTypeDefinition.MetricLessThanOrEquals) - case *cxsdk.AlertDefProperties_MetricMoreThanOrEquals: - alertTypeDefinitionModel.MetricMoreThanOrEquals, diags = flattenMetricMoreThanOrEquals(ctx, alertTypeDefinition.MetricMoreThanOrEquals) - case *cxsdk.AlertDefProperties_TracingImmediate: + case *cxsdk.AlertDefPropertiesLogsTimeRelativeThreshold: + alertTypeDefinitionModel.LogsTimeRelativeThreshold, diags = flattenLogsTimeRelativeThreshold(ctx, alertTypeDefinition.LogsTimeRelativeThreshold) + case *cxsdk.AlertDefPropertiesMetricThreshold: + alertTypeDefinitionModel.MetricThreshold, diags = flattenMetricThreshold(ctx, alertTypeDefinition.MetricThreshold) + case *cxsdk.AlertDefPropertiesMetricUnusual: + alertTypeDefinitionModel.MetricUnusual, diags = flattenMetricUnusual(ctx, alertTypeDefinition.MetricUnusual) + case *cxsdk.AlertDefPropertiesTracingImmediate: alertTypeDefinitionModel.TracingImmediate, diags = flattenTracingImmediate(ctx, alertTypeDefinition.TracingImmediate) - case *cxsdk.AlertDefProperties_TracingMoreThan: - alertTypeDefinitionModel.TracingMoreThan, diags = flattenTracingMoreThan(ctx, alertTypeDefinition.TracingMoreThan) - case *cxsdk.AlertDefProperties_Flow: + case *cxsdk.AlertDefPropertiesTracingThreshold: + alertTypeDefinitionModel.TracingThreshold, diags = flattenTracingThreshold(ctx, alertTypeDefinition.TracingThreshold) + case *cxsdk.AlertDefPropertiesFlow: alertTypeDefinitionModel.Flow, diags = flattenFlow(ctx, alertTypeDefinition.Flow) default: return types.ObjectNull(alertTypeDefinitionAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Alert Type Definition", fmt.Sprintf("Alert Type %v Definition is not valid", alertTypeDefinition))} @@ -3636,7 +3563,7 @@ func flattenAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefP return types.ObjectValueFrom(ctx, alertTypeDefinitionAttr(), alertTypeDefinitionModel) } -func flattenLogsImmediate(ctx context.Context, immediate *cxsdk.LogsImmediateTypeDefinition) (types.Object, diag.Diagnostics) { +func flattenLogsImmediate(ctx context.Context, immediate *cxsdk.LogsImmediateType) (types.Object, diag.Diagnostics) { if immediate == nil { return types.ObjectNull(logsImmediateAttr()), nil } @@ -3661,8 +3588,8 @@ func flattenAlertsLogsFilter(ctx context.Context, filter *cxsdk.LogsFilter) (typ var diags diag.Diagnostics var logsFilterModer AlertsLogsFilterModel switch filterType := filter.FilterType.(type) { - case *cxsdk.LogsFilter_LuceneFilter: - logsFilterModer.LuceneFilter, diags = flattenLuceneFilter(ctx, filterType.LuceneFilter) + case *cxsdk.LogsFilterSimpleFilter: + logsFilterModer.SimpleFilter, diags = flattenSimpleFilter(ctx, filterType.SimpleFilter) default: return types.ObjectNull(logsFilterAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Logs Filter", fmt.Sprintf("Logs Filter %v is not supported", filterType))} } @@ -3674,7 +3601,7 @@ func flattenAlertsLogsFilter(ctx context.Context, filter *cxsdk.LogsFilter) (typ return types.ObjectValueFrom(ctx, logsFilterAttr(), logsFilterModer) } -func flattenLuceneFilter(ctx context.Context, filter *cxsdk.LuceneFilter) (types.Object, diag.Diagnostics) { +func flattenSimpleFilter(ctx context.Context, filter *cxsdk.SimpleFilter) (types.Object, diag.Diagnostics) { if filter == nil { return types.ObjectNull(luceneFilterAttr()), nil } @@ -3684,7 +3611,7 @@ func flattenLuceneFilter(ctx context.Context, filter *cxsdk.LuceneFilter) (types return types.ObjectNull(luceneFilterAttr()), diags } - return types.ObjectValueFrom(ctx, luceneFilterAttr(), LuceneFilterModel{ + return types.ObjectValueFrom(ctx, luceneFilterAttr(), SimpleFilterModel{ LuceneQuery: wrapperspbStringToTypeString(filter.GetLuceneQuery()), LabelFilters: labelFilters, }) @@ -3742,22 +3669,22 @@ func flattenLogSeverities(ctx context.Context, severities []cxsdk.LogSeverity) ( return types.SetValueFrom(ctx, types.StringType, result) } -func flattenLogsMoreThan(ctx context.Context, moreThan *cxsdk.LogsMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { - if moreThan == nil { - return types.ObjectNull(logsMoreThanAttr()), nil +func flattenLogsThreshold(ctx context.Context, threshold *cxsdk.LogsThresholdType) (types.Object, diag.Diagnostics) { + if threshold == nil { + return types.ObjectNull(logsThresholdAttr()), nil } - logsFilter, diags := flattenAlertsLogsFilter(ctx, moreThan.GetLogsFilter()) + logsFilter, diags := flattenAlertsLogsFilter(ctx, threshold.GetLogsFilter()) if diags.HasError() { - return types.ObjectNull(logsMoreThanAttr()), diags + return types.ObjectNull(logsThresholdAttr()), diags } - timeWindow, diags := flattenLogsTimeWindow(ctx, moreThan.GetTimeWindow()) + timeWindow, diags := flattenLogsTimeWindow(ctx, threshold.GetTimeWindow()) if diags.HasError() { - return types.ObjectNull(logsMoreThanAttr()), diags + return types.ObjectNull(logsThresholdAttr()), diags } - logsMoreThanModel := LogsMoreThanModel{ + logsMoreThanModel := LogsThresholdModel{ LogsFilter: logsFilter, Threshold: wrapperspbUint32ToTypeInt64(moreThan.GetThreshold()), TimeWindow: timeWindow, @@ -3773,7 +3700,7 @@ func flattenLogsTimeWindow(ctx context.Context, timeWindow *cxsdk.LogsTimeWindow } switch timeWindowType := timeWindow.Type.(type) { - case *cxsdk.LogsTimeWindow_LogsTimeWindowSpecificValue: + case *cxsdk.LogsTimeWindowSpecificValue: return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsTimeWindowModel{ SpecificValue: types.StringValue(logsTimeWindowValueProtoToSchemaMap[timeWindowType.LogsTimeWindowSpecificValue]), }) @@ -3783,36 +3710,6 @@ func flattenLogsTimeWindow(ctx context.Context, timeWindow *cxsdk.LogsTimeWindow } -func flattenLogsLessThan(ctx context.Context, lessThan *cxsdk.LogsLessThanTypeDefinition) (types.Object, diag.Diagnostics) { - if lessThan == nil { - return types.ObjectNull(logsLessThanAttr()), nil - } - - logsFilter, diags := flattenAlertsLogsFilter(ctx, lessThan.GetLogsFilter()) - if diags.HasError() { - return types.ObjectNull(logsLessThanAttr()), diags - } - - timeWindow, diags := flattenLogsTimeWindow(ctx, lessThan.GetTimeWindow()) - if diags.HasError() { - return types.ObjectNull(logsLessThanAttr()), diags - } - - undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, lessThan.GetUndetectedValuesManagement()) - if diags.HasError() { - return types.ObjectNull(logsLessThanAttr()), diags - } - - logsLessThanModel := LogsLessThanModel{ - LogsFilter: logsFilter, - Threshold: wrapperspbUint32ToTypeInt64(lessThan.GetThreshold()), - TimeWindow: timeWindow, - UndetectedValuesManagement: undetectedValuesManagement, - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(lessThan.GetNotificationPayloadFilter()), - } - return types.ObjectValueFrom(ctx, logsLessThanAttr(), logsLessThanModel) -} - func flattenUndetectedValuesManagement(ctx context.Context, undetectedValuesManagement *cxsdk.UndetectedValuesManagement) (types.Object, diag.Diagnostics) { if undetectedValuesManagement == nil { return types.ObjectNull(undetectedValuesManagementAttr()), nil diff --git a/coralogix/utils.go b/coralogix/utils.go index 2d780c12..5aff0b6d 100644 --- a/coralogix/utils.go +++ b/coralogix/utils.go @@ -516,6 +516,10 @@ func flattenTimeframe(timeMS int) []interface{} { }} } +func objIsNullOrUnknown(obj types.Object) bool { + return obj.IsNull() || obj.IsUnknown() +} + func sliceToString(data []string) string { b, _ := json.Marshal(data) return fmt.Sprintf("%v", string(b)) @@ -820,3 +824,19 @@ func convertSchemaWithoutID(rs resourceschema.Schema) datasourceschema.Schema { DeprecationMessage: rs.DeprecationMessage, } } + +func typeStringToWrapperspbUint32(str types.String) (*wrapperspb.UInt32Value, diag2.Diagnostics) { + parsed, err := strconv.ParseUint(str.ValueString(), 10, 32) + if err != nil { + return nil, diag2.Diagnostics{diag2.NewErrorDiagnostic("Failed to convert string to uint32", err.Error())} + } + return wrapperspb.UInt32(uint32(parsed)), nil +} + +func WrapperspbUint32ToString(num *wrapperspb.UInt32Value) types.String { + if num == nil { + return types.StringNull() + } + return types.StringValue(strconv.FormatUint(uint64(num.GetValue()), 10)) + +} From 679f87595a02262d63195b7952b42a7528dfd748 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Fri, 13 Sep 2024 16:24:25 +0200 Subject: [PATCH 04/12] feat: alerts v3 (WIP) --- coralogix/utils copy.go.nope | 787 ----------------------------------- 1 file changed, 787 deletions(-) delete mode 100644 coralogix/utils copy.go.nope diff --git a/coralogix/utils copy.go.nope b/coralogix/utils copy.go.nope deleted file mode 100644 index a3d0d8a5..00000000 --- a/coralogix/utils copy.go.nope +++ /dev/null @@ -1,787 +0,0 @@ -package coralogix - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "maps" - "math/big" - "math/rand" - "net/url" - "reflect" - "regexp" - "slices" - "strconv" - "time" - - gouuid "github.com/google/uuid" - "github.com/hashicorp/terraform-plugin-framework/attr" - datasourceschema "github.com/hashicorp/terraform-plugin-framework/datasource/schema" - diag2 "github.com/hashicorp/terraform-plugin-framework/diag" - resourceschema "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/schema/validator" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -var ( - msInHour = int(time.Hour.Milliseconds()) - msInMinute = int(time.Minute.Milliseconds()) - msInSecond = int(time.Second.Milliseconds()) -) - -func formatRpcErrors(err error, url, requestStr string) string { - switch status.Code(err) { - case codes.PermissionDenied, codes.Unauthenticated: - return fmt.Sprintf("permission denied for url - %s\ncheck your api-key and permissions", url) - case codes.Internal: - return fmt.Sprintf("internal error in Coralogix backend.\nerror - %s\nurl - %s\nrequest - %s", err, url, requestStr) - case codes.InvalidArgument: - return fmt.Sprintf("invalid argument error.\nerror - %s\nurl - %s\nrequest - %s", err, url, requestStr) - default: - return err.Error() - } -} - -// datasourceSchemaFromResourceSchema is a recursive func that -// converts an existing Resource schema to a Datasource schema. -// All schema elements are copied, but certain attributes are ignored or changed: -// - all attributes have Computed = true -// - all attributes have ForceNew, Required = false -// - Validation funcs and attributes (e.g. MaxItems) are not copied -func datasourceSchemaFromResourceSchema(rs map[string]*schema.Schema) map[string]*schema.Schema { - ds := make(map[string]*schema.Schema, len(rs)) - for k, v := range rs { - dv := &schema.Schema{ - Computed: true, - ForceNew: false, - Required: false, - Description: v.Description, - Type: v.Type, - } - - switch v.Type { - case schema.TypeSet: - dv.Set = v.Set - fallthrough - case schema.TypeList: - // List & Set types are generally used for 2 cases: - // - a list/set of simple primitive values (e.g. list of strings) - // - a sub resource - if elem, ok := v.Elem.(*schema.Resource); ok { - // handle the case where the Element is a sub-resource - dv.Elem = &schema.Resource{ - Schema: datasourceSchemaFromResourceSchema(elem.Schema), - } - } else { - // handle simple primitive case - dv.Elem = v.Elem - } - - default: - // Elem of all other types are copied as-is - dv.Elem = v.Elem - - } - ds[k] = dv - - } - return ds -} - -func frameworkDatasourceSchemaFromFrameworkResourceSchema(rs resourceschema.Schema) datasourceschema.Schema { - attributes := convertAttributes(rs.Attributes) - if idSchema, ok := rs.Attributes["id"]; ok { - attributes["id"] = datasourceschema.StringAttribute{ - Required: true, - Description: idSchema.GetDescription(), - MarkdownDescription: idSchema.GetMarkdownDescription(), - } - } - - return datasourceschema.Schema{ - Attributes: attributes, - //Blocks: convertBlocks(rs.Blocks), - Description: rs.Description, - MarkdownDescription: rs.MarkdownDescription, - DeprecationMessage: rs.DeprecationMessage, - } -} - -func convertAttributes(attributes map[string]resourceschema.Attribute) map[string]datasourceschema.Attribute { - result := make(map[string]datasourceschema.Attribute, len(attributes)) - for k, v := range attributes { - result[k] = convertAttribute(v) - } - return result -} - -func convertAttribute(resourceAttribute resourceschema.Attribute) datasourceschema.Attribute { - switch attr := resourceAttribute.(type) { - case resourceschema.BoolAttribute: - return datasourceschema.BoolAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - } - case resourceschema.Float64Attribute: - return datasourceschema.Float64Attribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - } - case resourceschema.Int64Attribute: - return datasourceschema.Int64Attribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - } - case resourceschema.NumberAttribute: - return datasourceschema.NumberAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - } - case resourceschema.StringAttribute: - return datasourceschema.StringAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - } - case resourceschema.MapAttribute: - return datasourceschema.MapAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - ElementType: attr.ElementType, - } - case resourceschema.ObjectAttribute: - return datasourceschema.ObjectAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - AttributeTypes: attr.AttributeTypes, - } - case resourceschema.SetAttribute: - return datasourceschema.SetAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - ElementType: attr.ElementType, - } - case resourceschema.ListNestedAttribute: - return datasourceschema.ListNestedAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - NestedObject: datasourceschema.NestedAttributeObject{ - Attributes: convertAttributes(attr.NestedObject.Attributes), - }, - } - case resourceschema.ListAttribute: - return datasourceschema.ListAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - ElementType: attr.ElementType, - } - case resourceschema.MapNestedAttribute: - return datasourceschema.MapNestedAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - NestedObject: datasourceschema.NestedAttributeObject{ - Attributes: convertAttributes(attr.NestedObject.Attributes), - }, - } - case resourceschema.SetNestedAttribute: - return datasourceschema.SetNestedAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - NestedObject: datasourceschema.NestedAttributeObject{ - Attributes: convertAttributes(attr.NestedObject.Attributes), - }, - } - case resourceschema.SingleNestedAttribute: - return datasourceschema.SingleNestedAttribute{ - Computed: true, - Description: attr.Description, - MarkdownDescription: attr.MarkdownDescription, - Attributes: convertAttributes(attr.Attributes), - } - default: - panic(fmt.Sprintf("unknown resource attribute type: %T", resourceAttribute)) - } -} - -func interfaceSliceToStringSlice(s []interface{}) []string { - result := make([]string, 0, len(s)) - for _, v := range s { - result = append(result, v.(string)) - } - return result -} - -func attrSliceToFloat32Slice(ctx context.Context, arr []attr.Value) ([]float32, diag2.Diagnostics) { - var diags diag2.Diagnostics - result := make([]float32, 0, len(arr)) - for _, v := range arr { - val, err := v.ToTerraformValue(ctx) - if err != nil { - diags.AddError("Failed to convert value to Terraform", err.Error()) - continue - } - var d big.Float - if err = val.As(&d); err != nil { - diags.AddError("Failed to convert value to float64", err.Error()) - continue - } - f, _ := d.Float64() - result = append(result, float32(f)) - } - return result, diags -} - -func float32SliceTypeList(ctx context.Context, arr []float32) (types.List, diag2.Diagnostics) { - if len(arr) == 0 { - return types.ListNull(types.Float64Type), nil - } - result := make([]attr.Value, 0, len(arr)) - for _, v := range arr { - if float32(int(v)) != v { - result = append(result, types.Float64Value(float64(v*10000)/float64(10000))) - } else { - result = append(result, types.Float64Value(float64(v))) - } - } - return types.ListValueFrom(ctx, types.Float64Type, result) -} - -func wrappedStringSliceToTypeStringSet(s []*wrapperspb.StringValue) types.Set { - if len(s) == 0 { - return types.SetNull(types.StringType) - } - elements := make([]attr.Value, 0, len(s)) - for _, v := range s { - elements = append(elements, types.StringValue(v.GetValue())) - } - return types.SetValueMust(types.StringType, elements) -} - -func stringSliceToTypeStringSet(s []string) types.Set { - if len(s) == 0 { - return types.SetNull(types.StringType) - } - elements := make([]attr.Value, 0, len(s)) - for _, v := range s { - elements = append(elements, types.StringValue(v)) - } - return types.SetValueMust(types.StringType, elements) -} - -func int32SliceToTypeInt64Set(arr []int32) types.Set { - if len(arr) == 0 { - return types.SetNull(types.Int64Type) - } - elements := make([]attr.Value, 0, len(arr)) - for _, n := range arr { - elements = append(elements, types.Int64Value(int64(n))) - } - return types.SetValueMust(types.StringType, elements) -} - -func wrappedStringSliceToTypeStringList(s []*wrapperspb.StringValue) types.List { - if len(s) == 0 { - return types.ListNull(types.StringType) - } - elements := make([]attr.Value, 0, len(s)) - for _, v := range s { - elements = append(elements, types.StringValue(v.GetValue())) - } - return types.ListValueMust(types.StringType, elements) -} - -func typeStringSliceToWrappedStringSlice(ctx context.Context, s []attr.Value) ([]*wrapperspb.StringValue, diag2.Diagnostics) { - var diags diag2.Diagnostics - result := make([]*wrapperspb.StringValue, 0, len(s)) - for _, v := range s { - val, err := v.ToTerraformValue(ctx) - if err != nil { - diags.AddError("Failed to convert value to Terraform", err.Error()) - continue - } - var str string - - if err = val.As(&str); err != nil { - diags.AddError("Failed to convert value to string", err.Error()) - continue - } - result = append(result, wrapperspb.String(str)) - } - return result, diags -} - -func typeInt64ToWrappedInt64(v types.Int64) *wrapperspb.Int64Value { - if v.IsNull() || v.IsUnknown() { - return nil - } - return wrapperspb.Int64(v.ValueInt64()) -} - -func typeInt64ToWrappedInt32(v types.Int64) *wrapperspb.Int32Value { - if v.IsNull() || v.IsUnknown() { - return nil - } - return wrapperspb.Int32(int32(v.ValueInt64())) -} - -func typeInt64ToWrappedUint32(v types.Int64) *wrapperspb.UInt32Value { - if v.IsNull() || v.IsUnknown() { - return nil - } - return wrapperspb.UInt32(uint32(v.ValueInt64())) -} - -func typeBoolToWrapperspbBool(v types.Bool) *wrapperspb.BoolValue { - if v.IsNull() || v.IsUnknown() { - return nil - } - return wrapperspb.Bool(v.ValueBool()) -} - -func typeStringSliceToStringSlice(ctx context.Context, s []attr.Value) ([]string, diag2.Diagnostics) { - result := make([]string, 0, len(s)) - var diags diag2.Diagnostics - for _, v := range s { - val, err := v.ToTerraformValue(ctx) - if err != nil { - diags.AddError("Failed to convert value to Terraform", err.Error()) - continue - } - var str string - if err = val.As(&str); err != nil { - diags.AddError("Failed to convert value to Terraform", err.Error()) - continue - } - result = append(result, str) - } - if diags.HasError() { - return nil, diags - } - return result, nil -} - -func typeInt64SliceToInt32Slice(ctx context.Context, s []attr.Value) ([]int32, diag2.Diagnostics) { - result := make([]int32, 0, len(s)) - var diags diag2.Diagnostics - for _, v := range s { - val, err := v.ToTerraformValue(ctx) - if err != nil { - diags.AddError("Failed to convert value to Terraform", err.Error()) - continue - } - var n int64 - if err = val.As(&n); err != nil { - diags.AddError("Failed to convert value to Terraform", err.Error()) - continue - } - result = append(result, int32(n)) - } - if diags.HasError() { - return nil, diags - } - return result, nil -} - -func timeInDaySchema(description string) *schema.Schema { - timeRegex := regexp.MustCompile(`^(0\d|1\d|2[0-3]):[0-5]\d$`) - return &schema.Schema{ - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringMatch(timeRegex, "not valid time, only HH:MM format is allowed"), - Description: description, - } -} - -func toTwoDigitsFormat(digit int32) string { - digitStr := fmt.Sprintf("%d", digit) - if len(digitStr) == 1 { - digitStr = "0" + digitStr - } - return digitStr -} - -func timeSchema(description string) *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hours": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), - }, - "minutes": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), - }, - "seconds": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), - }, - }, - }, - Description: description, - } -} - -func expandTimeToMS(v interface{}) int { - l := v.([]interface{}) - if len(l) == 0 { - return 0 - } - - m := l[0].(map[string]interface{}) - - timeMS := msInHour * m["hours"].(int) - timeMS += msInMinute * m["minutes"].(int) - timeMS += msInSecond * m["seconds"].(int) - - return timeMS -} - -func flattenTimeframe(timeMS int) []interface{} { - if timeMS == 0 { - return nil - } - - hours := timeMS / msInHour - timeMS -= hours * msInHour - - minutes := timeMS / msInMinute - timeMS -= minutes * msInMinute - - seconds := timeMS / msInSecond - - return []interface{}{map[string]int{ - "hours": hours, - "minutes": minutes, - "seconds": seconds, - }} -} - -func sliceToString(data []string) string { - b, _ := json.Marshal(data) - return fmt.Sprintf("%v", string(b)) -} - -func randFloat() float64 { - r := rand.New(rand.NewSource(99)) - return r.Float64() -} - -func selectRandomlyFromSlice(s []string) string { - return s[acctest.RandIntRange(0, len(s))] -} - -func selectManyRandomlyFromSlice(s []string) []string { - r := rand.New(rand.NewSource(99)) - indexPerms := r.Perm(len(s)) - itemsToSelect := acctest.RandIntRange(0, len(s)+1) - result := make([]string, 0, itemsToSelect) - for _, index := range indexPerms { - result = append(result, s[index]) - } - return result -} - -func getKeysStrings(m map[string]string) []string { - result := make([]string, 0) - for k := range m { - result = append(result, k) - } - return result -} - -func getKeysInterface(m map[string]interface{}) []string { - result := make([]string, 0) - for k := range m { - result = append(result, k) - } - return result -} - -func getKeysInt32(m map[string]int32) []string { - result := make([]string, 0) - for k := range m { - result = append(result, k) - } - return result -} - -func reverseMapStrings(m map[string]string) map[string]string { - n := make(map[string]string) - for k, v := range m { - n[v] = k - } - return n -} - -func strToUint32(str string) uint32 { - n, _ := strconv.ParseUint(str, 10, 32) - return uint32(n) -} - -func uint32ToStr(n uint32) string { - return strconv.FormatUint(uint64(n), 10) -} - -type urlValidationFuncFramework struct { -} - -func (u urlValidationFuncFramework) Description(_ context.Context) string { - return "string must be a valid url format" -} - -func (u urlValidationFuncFramework) MarkdownDescription(ctx context.Context) string { - return u.Description(ctx) -} - -func (u urlValidationFuncFramework) ValidateString(ctx context.Context, req validator.StringRequest, resp *validator.StringResponse) { - if req.ConfigValue.IsNull() || req.ConfigValue.IsUnknown() { - return - } - - value := req.ConfigValue.ValueString() - - if _, err := url.ParseRequestURI(value); err != nil { - resp.Diagnostics.Append( - diag2.NewAttributeErrorDiagnostic( - req.Path, - "Invalid Attribute Value Format", - fmt.Sprintf("Attribute %s in not a valid url - %s", req.Path, value), - ), - ) - } -} - -const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - -func RandStringBytes(n int) string { - b := make([]byte, n) - for i := range b { - b[i] = letterBytes[rand.Intn(len(letterBytes))] - } - return string(b) -} - -func JSONStringsEqual(s1, s2 string) bool { - b1 := bytes.NewBufferString("") - if err := json.Compact(b1, []byte(s1)); err != nil { - return false - } - - b2 := bytes.NewBufferString("") - if err := json.Compact(b2, []byte(s2)); err != nil { - return false - } - - return JSONBytesEqual(b1.Bytes(), b2.Bytes()) -} - -func JSONBytesEqual(b1, b2 []byte) bool { - var o1 interface{} - if err := json.Unmarshal(b1, &o1); err != nil { - return false - } - - var o2 interface{} - if err := json.Unmarshal(b2, &o2); err != nil { - return false - } - - return reflect.DeepEqual(o1, o2) -} - -func randBool() bool { - return rand.Int()%2 == 0 -} - -func typeStringToWrapperspbString(str types.String) *wrapperspb.StringValue { - if str.IsNull() || str.IsUnknown() { - return nil - - } - return wrapperspb.String(str.ValueString()) -} - -func wrapperspbFloat64ToTypeFloat64(num *wrapperspb.FloatValue) types.Float64 { - if num == nil { - return types.Float64Null() - } - - return types.Float64Value(float64(num.GetValue())) -} - -func typeStringToStringPointer(str types.String) *string { - if str.IsNull() || str.IsUnknown() { - return nil - } - result := new(string) - *result = str.ValueString() - return result -} - -func stringPointerToTypeString(str *string) types.String { - if str == nil { - return types.StringNull() - } - return types.StringValue(*str) -} - -func typeFloat64ToWrapperspbDouble(num types.Float64) *wrapperspb.DoubleValue { - if num.IsNull() { - return nil - } - - return wrapperspb.Double(num.ValueFloat64()) -} - -func typeFloat64ToWrapperspbFloat(num types.Float64) *wrapperspb.FloatValue { - if num.IsNull() { - return nil - } - - return wrapperspb.Float(float32(num.ValueFloat64())) -} - -func wrapperspbStringToTypeString(str *wrapperspb.StringValue) types.String { - if str == nil { - return types.StringNull() - } - - return types.StringValue(str.GetValue()) -} - -func wrapperspbInt64ToTypeInt64(num *wrapperspb.Int64Value) types.Int64 { - if num == nil { - return types.Int64Null() - } - - return types.Int64Value(num.GetValue()) -} - -func wrapperspbUint32ToTypeInt64(num *wrapperspb.UInt32Value) types.Int64 { - if num == nil { - return types.Int64Null() - } - - return types.Int64Value(int64(num.GetValue())) -} - -func wrapperspbDoubleToTypeFloat64(num *wrapperspb.DoubleValue) types.Float64 { - if num == nil { - return types.Float64Null() - } - - return types.Float64Value(num.GetValue()) -} - -func wrapperspbBoolToTypeBool(b *wrapperspb.BoolValue) types.Bool { - if b == nil { - return types.BoolNull() - } - - return types.BoolValue(b.GetValue()) -} - -func wrapperspbInt32ToTypeInt64(num *wrapperspb.Int32Value) types.Int64 { - if num == nil { - return types.Int64Null() - } - - return types.Int64Value(int64(num.GetValue())) -} - -func ReverseMap[K, V comparable](m map[K]V) map[V]K { - n := make(map[V]K) - for k, v := range m { - n[v] = k - } - return n -} - -func GetKeys[K, V comparable](m map[K]V) []K { - slices.Sorted(maps.Keys(m)) -} - -func GetValues[K, V comparable](m map[K]V) []V { - slices.Sorted(maps.Values(m)) -} - -func typeMapToStringMap(ctx context.Context, m types.Map) (map[string]string, diag2.Diagnostics) { - var result map[string]string - diags := m.ElementsAs(ctx, &result, true) - return result, diags -} - -func expandUuid(uuid types.String) *wrapperspb.StringValue { - if uuid.IsNull() || uuid.IsUnknown() { - return &wrapperspb.StringValue{Value: gouuid.NewString()} - } - return &wrapperspb.StringValue{Value: uuid.ValueString()} -} - -func retryableStatusCode(statusCode codes.Code) bool { - switch statusCode { - case codes.Unavailable, codes.DeadlineExceeded, codes.Aborted: - return true - default: - return false - } -} - -func uint32SliceToWrappedUint32Slice(s []uint32) []*wrapperspb.UInt32Value { - result := make([]*wrapperspb.UInt32Value, 0, len(s)) - for _, n := range s { - result = append(result, wrapperspb.UInt32(n)) - } - return result -} - -func convertSchemaWithoutID(rs resourceschema.Schema) datasourceschema.Schema { - attributes := convertAttributes(rs.Attributes) - return datasourceschema.Schema{ - Attributes: attributes, - Description: rs.Description, - MarkdownDescription: rs.MarkdownDescription, - DeprecationMessage: rs.DeprecationMessage, - } -} - -func typeStringToWrapperspbUint32(str types.String) (*wrapperspb.UInt32Value, diag2.Diagnostics) { - parsed, err := strconv.ParseUint(str.ValueString(), 10, 32) - if err != nil { - return nil, diag2.Diagnostics{diag2.NewErrorDiagnostic("Failed to convert string to uint32", err.Error())} - } - return wrapperspb.UInt32(uint32(parsed)), nil -} - -func WrapperspbUint32ToString(num *wrapperspb.UInt32Value) types.String { - if num == nil { - return types.StringNull() - } - return types.StringValue(strconv.FormatUint(uint64(num.GetValue()), 10)) - -} From e25018198e6ac77d1ac38fb31252ef5d31ed33b7 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Mon, 23 Sep 2024 14:07:15 +0200 Subject: [PATCH 05/12] feat: terraform to use coraglogix sdk --- coralogix/resource_coralogix_alert.go | 1845 ++++++++++--------------- 1 file changed, 707 insertions(+), 1138 deletions(-) diff --git a/coralogix/resource_coralogix_alert.go b/coralogix/resource_coralogix_alert.go index 396fa503..96468654 100644 --- a/coralogix/resource_coralogix_alert.go +++ b/coralogix/resource_coralogix_alert.go @@ -155,7 +155,7 @@ var ( cxsdk.LogsNewValueTimeWindowValue1Week: "1_WEEK", cxsdk.LogsNewValueTimeWindowValue1Month: "1_MONTH", cxsdk.LogsNewValueTimeWindowValue2Months: "2_MONTHS", - cxsdk.LogsNewValueTimeWindowValue_3Months: "3_MONTHS", + cxsdk.LogsNewValueTimeWindowValue3Months: "3_MONTHS", } logsNewValueTimeWindowValueSchemaToProtoMap = ReverseMap(logsNewValueTimeWindowValueProtoToSchemaMap) validLogsNewValueTimeWindowValues = GetKeys(logsNewValueTimeWindowValueSchemaToProtoMap) @@ -274,13 +274,15 @@ var ( cxsdk.MetricThresholdConditionTypeMoreThanOrEquals: "MORE_THAN_OR_EQUALS", cxsdk.MetricThresholdConditionTypeLessThanOrEquals: "LESS_THAN_OR_EQUALS", } - metricsThresholdConditionValues = GetValues(metricsThresholdConditionMap) + metricsThresholdConditionValues = GetValues(metricsThresholdConditionMap) + metricsThresholdConditionToProtoMap = ReverseMap(metricsThresholdConditionMap) - metricsUnusualConditionMap = map[cxsdk.MetricUnusualConditionType]string{ + metricUnusualConditionMap = map[cxsdk.MetricUnusualConditionType]string{ cxsdk.MetricUnusualConditionTypeMoreThanOrUnspecified: "MORE_THAN", cxsdk.MetricUnusualConditionTypeLessThan: "LESS_THAN", } - metricsUnusualConditionValues = GetValues(metricsUnusualConditionMap) + metricUnusualConditionValues = GetValues(metricUnusualConditionMap) + metricUnusualConditionToProtoMap = ReverseMap(metricUnusualConditionMap) ) func NewAlertResource() resource.Resource { @@ -399,15 +401,15 @@ type LogsNewValueModel struct { } type LogsUniqueCountModel struct { - Rules types.List `tfsdk:"rules"` // []LogsUniqueCountRulesModel + Rules types.List `tfsdk:"rules"` // []LogsUniqueCountRuleModel LogsFilter types.Object `tfsdk:"logs_filter"` // AlertsLogsFilterModel NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String } -type LogsUniqueCountRulesModel struct { +type LogsUniqueCountRuleModel struct { MaxUniqueCountPerGroupByKey types.Int64 `tfsdk:"max_unique_count_per_group_by_key"` MaxUniqueCount types.Int64 `tfsdk:"max_unique_count"` - TimeWindow types.Object `tfsdk:"time_window"` // LogsUniqueCountTimeWindowModel + TimeWindow types.String `tfsdk:"time_window"` UniqueCountKeypath types.String `tfsdk:"unique_count_keypath"` } @@ -427,9 +429,9 @@ type MetricThresholdModel struct { type MetricRule struct { Threshold types.Float64 `tfsdk:"threshold"` ForOverPct types.Int64 `tfsdk:"for_over_pct"` - OfTheLast types.Object `tfsdk:"of_the_last"` // MetricTimeWindowModel + OfTheLast types.String `tfsdk:"of_the_last"` Condition types.String `tfsdk:"condition"` - MinNonNullValuesPct types.Int32 `tfsdk:"min_non_null_values_pct"` + MinNonNullValuesPct types.Int64 `tfsdk:"min_non_null_values_pct"` MissingValues types.Object `tfsdk:"missing_values"` // MetricMissingValuesModel } @@ -451,8 +453,12 @@ type TracingImmediateModel struct { type TracingThresholdModel struct { TracingFilter types.Object `tfsdk:"tracing_filter"` // TracingFilterModel NotificationPayloadFilter types.Set `tfsdk:"notification_payload_filter"` // []types.String - TimeWindow types.Object `tfsdk:"time_window"` // TracingTimeWindowModel - SpanAmount types.Int64 `tfsdk:"span_amount"` + Rules types.List `tfsdk:"rules"` // []TracingThresholdRuleModel +} + +type TracingThresholdRuleModel struct { + TimeWindow types.String `tfsdk:"time_window"` + SpanAmount types.Float64 `tfsdk:"span_amount"` } type FlowModel struct { @@ -481,10 +487,6 @@ type AlertsLogsFilterModel struct { SimpleFilter types.Object `tfsdk:"simple_filter"` // LuceneFilterModel } -type LogsTimeWindowModel struct { - SpecificValue types.String `tfsdk:"specific_value"` -} - type SimpleFilterModel struct { LuceneQuery types.String `tfsdk:"lucene_query"` LabelFilters types.Object `tfsdk:"label_filters"` // LabelFiltersModel @@ -510,33 +512,17 @@ type UndetectedValuesManagementModel struct { AutoRetireTimeframe types.String `tfsdk:"auto_retire_timeframe"` } -type LogsRatioTimeWindowModel struct { - SpecificValue types.String `tfsdk:"specific_value"` -} - -type LogsNewValueTimeWindowModel struct { - SpecificValue types.String `tfsdk:"specific_value"` -} - -type LogsUniqueCountTimeWindowModel struct { - SpecificValue types.String `tfsdk:"specific_value"` -} - type MetricFilterModel struct { Promql types.String `tfsdk:"promql"` } -type MetricTimeWindowModel struct { - SpecificValue types.String `tfsdk:"specific_value"` -} - type MetricMissingValuesModel struct { ReplaceWithZero types.Bool `tfsdk:"replace_with_zero"` MinNonNullValuesPct types.Int64 `tfsdk:"min_non_null_values_pct"` } type NewValueRuleModel struct { - TimeWindow types.Object `tfsdk:"time_window"` // LogsTimeWindowModel + TimeWindow types.String `tfsdk:"time_window"` KeypathToTrack types.String `tfsdk:"keypath_to_track"` } @@ -544,7 +530,7 @@ type RuleModel struct { ComparedTo types.String `tfsdk:"compared_to"` Condition types.String `tfsdk:"condition"` Threshold types.Float64 `tfsdk:"threshold"` - TimeWindow types.Object `tfsdk:"time_window"` // LogsTimeWindowModel + TimeWindow types.String `tfsdk:"time_window"` IgnoreInfinity types.Bool `tfsdk:"ignore_infinity"` } @@ -571,10 +557,6 @@ type TracingSpanFieldsFilterModel struct { FilterType types.Object `tfsdk:"filter_type"` // TracingFilterTypeModel } -type TracingTimeWindowModel struct { - SpecificValue types.String `tfsdk:"specific_value"` -} - func (r *AlertResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { resp.TypeName = req.ProviderTypeName + "_alert" } @@ -742,7 +724,13 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp "threshold": schema.Float64Attribute{ Required: true, }, - "time_window": logsTimeWindowSchema(), + "time_window": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validLogsTimeWindowValues), + }, "condition": schema.StringAttribute{ Required: true, Validators: []validator.String{ @@ -767,10 +755,13 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp Validators: []validator.List{listvalidator.SizeAtLeast(1)}, NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ - "threshold": schema.Float64Attribute{ + "time_window": schema.StringAttribute{ Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validLogsTimeWindowValues), }, - "time_window": logsTimeWindowSchema(), "minimum_threshold": schema.Float64Attribute{ Required: true, }, @@ -799,7 +790,13 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp "threshold": schema.Float64Attribute{ Required: true, }, - "time_window": logsRatioTimeWindowSchema(), + "time_window": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsRatioTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validLogsRatioTimeWindowValues), + }, "ignore_infinity": schema.BoolAttribute{ Optional: true, Computed: true, @@ -821,7 +818,13 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp NestedObject: schema.NestedAttributeObject{ Attributes: map[string]schema.Attribute{ "keypath_to_track": schema.StringAttribute{Required: true}, - "time_window": logsNewValueTimeWindowSchema(), + "time_window": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsNewValueTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validLogsNewValueTimeWindowValues), + }, }, }, }, @@ -837,14 +840,28 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp Attributes: map[string]schema.Attribute{ "logs_filter": logsFilterSchema(), "notification_payload_filter": notificationPayloadFilterSchema(), - "time_window": logsUniqueCountTimeWindowSchema(), - "unique_count_keypath": schema.StringAttribute{Required: true}, - "max_unique_count": schema.Int64Attribute{Required: true}, - "max_unique_count_per_group_by_key": schema.Int64Attribute{ - Optional: true, - Validators: []validator.Int64{ - int64validator.AlsoRequires(path.MatchRoot("group_by")), - requiredWhenGroupBySet{}, + "rules": schema.ListNestedAttribute{ + Required: true, + Validators: []validator.List{listvalidator.SizeAtLeast(1)}, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "time_window": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validLogsUniqueCountTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validLogsUniqueCountTimeWindowValues), + }, + "unique_count_keypath": schema.StringAttribute{Required: true}, + "max_unique_count": schema.Int64Attribute{Required: true}, + "max_unique_count_per_group_by_key": schema.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.AlsoRequires(path.MatchRoot("group_by")), + requiredWhenGroupBySet{}, + }, + }, + }, }, }, }, @@ -897,14 +914,20 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp "for_over_pct": schema.Int64Attribute{ Required: true, }, - "of_the_last": metricTimeWindowSchema(), + "of_the_last": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validMetricTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validMetricTimeWindowValues), + }, "missing_values": missingValuesSchema(), "condition": schema.StringAttribute{ Required: true, Validators: []validator.String{ - stringvalidator.OneOf(metricsThresholdConditionMap...), + stringvalidator.OneOf(metricsThresholdConditionValues...), }, - MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", metricsThresholdConditionMap), + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", metricsThresholdConditionValues), }, }, }, @@ -923,8 +946,13 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp "threshold": schema.Float64Attribute{ Required: true, }, - "of_the_last": metricTimeWindowSchema(), - + "of_the_last": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validMetricTimeWindowValues...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validMetricTimeWindowValues), + }, "for_over_pct": schema.Int64Attribute{ Required: true, }, @@ -934,9 +962,9 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp "condition": schema.StringAttribute{ Required: true, Validators: []validator.String{ - stringvalidator.OneOf(metricsUnusualConditionValues...), + stringvalidator.OneOf(metricUnusualConditionValues...), }, - MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", metricsUnusualConditionValues), + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", metricUnusualConditionValues), }, }, }, @@ -964,7 +992,13 @@ func (r *AlertResource) Schema(_ context.Context, _ resource.SchemaRequest, resp "span_amount": schema.Float64Attribute{ Required: true, }, - "time_window": tracingTimeWindowSchema(), + "time_window": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.OneOf(validTracingTimeWindow...), + }, + MarkdownDescription: fmt.Sprintf("Condition to evaluate the threshold with. Valid values: %q.", validTracingTimeWindow), + }, }, // Condition type is missing since there is only a single type to be filled in }, @@ -1223,21 +1257,6 @@ func tracingQuerySchema() schema.SingleNestedAttribute { } } -func tracingTimeWindowSchema() schema.SingleNestedAttribute { - return schema.SingleNestedAttribute{ - Required: true, - Attributes: map[string]schema.Attribute{ - "specific_value": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf(validTracingTimeWindow...), - }, - MarkdownDescription: fmt.Sprintf("Specific value. Valid values: %q.", validTracingTimeWindow), - }, - }, - } -} - func tracingLabelFiltersSchema() schema.SingleNestedAttribute { return schema.SingleNestedAttribute{ Required: true, @@ -1306,21 +1325,6 @@ func metricFilterSchema() schema.Attribute { } } -func metricTimeWindowSchema() schema.SingleNestedAttribute { - return schema.SingleNestedAttribute{ - Required: true, - Attributes: map[string]schema.Attribute{ - "specific_value": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf(validMetricTimeWindowValues...), - }, - MarkdownDescription: fmt.Sprintf("Specific value. Valid values: %q.", validMetricTimeWindowValues), - }, - }, - } -} - func logsFilterSchema() schema.SingleNestedAttribute { return schema.SingleNestedAttribute{ Optional: true, @@ -1417,66 +1421,6 @@ func timeOfDaySchema() schema.SingleNestedAttribute { } } -func logsTimeWindowSchema() schema.SingleNestedAttribute { - return schema.SingleNestedAttribute{ - Required: true, - Attributes: map[string]schema.Attribute{ - "specific_value": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf(validLogsTimeWindowValues...), - }, - MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsTimeWindowValues), - }, - }, - } -} - -func logsRatioTimeWindowSchema() schema.SingleNestedAttribute { - return schema.SingleNestedAttribute{ - Required: true, - Attributes: map[string]schema.Attribute{ - "specific_value": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf(validLogsRatioTimeWindowValues...), - }, - MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsRatioTimeWindowValues), - }, - }, - } -} - -func logsNewValueTimeWindowSchema() schema.Attribute { - return schema.SingleNestedAttribute{ - Required: true, - Attributes: map[string]schema.Attribute{ - "specific_value": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf(validLogsNewValueTimeWindowValues...), - }, - MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsNewValueTimeWindowValues), - }, - }, - } -} - -func logsUniqueCountTimeWindowSchema() schema.SingleNestedAttribute { - return schema.SingleNestedAttribute{ - Required: true, - Attributes: map[string]schema.Attribute{ - "specific_value": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - stringvalidator.OneOf(validLogsUniqueCountTimeWindowValues...), - }, - MarkdownDescription: fmt.Sprintf("Time window value. Valid values: %q.", validLogsUniqueCountTimeWindowValues), - }, - }, - } -} - func undetectedValuesManagementSchema() schema.SingleNestedAttribute { return schema.SingleNestedAttribute{ Optional: true, @@ -1952,22 +1896,18 @@ func expandAlertsTypeDefinition(ctx context.Context, alertProperties *cxsdk.Aler } else if logsTimeRelativeThreshold := alertDefinitionModel.LogsTimeRelativeThreshold; !objIsNullOrUnknown(logsTimeRelativeThreshold) { // LogsTimeRelativeThreshold alertProperties, diags = expandLogsTimeRelativeThresholdAlertTypeDefinition(ctx, alertProperties, logsTimeRelativeThreshold) - } else if metricMoreThan := alertDefinitionModel.MetricMoreThan; !(metricMoreThan.IsNull() || metricMoreThan.IsUnknown()) { - alertProperties, diags = expandMetricMoreThanAlertTypeDefinition(ctx, alertProperties, metricMoreThan) - } else if metricLessThan := alertDefinitionModel.MetricLessThan; !(metricLessThan.IsNull() || metricLessThan.IsUnknown()) { - alertProperties, diags = expandMetricLessThanAlertTypeDefinition(ctx, alertProperties, metricLessThan) - } else if metricMoreThanUsual := alertDefinitionModel.MetricMoreThanUsual; !(metricMoreThanUsual.IsNull() || metricMoreThanUsual.IsUnknown()) { - alertProperties, diags = expandMetricMoreThanUsualAlertTypeDefinition(ctx, alertProperties, metricMoreThanUsual) - } else if metricLessThanUsual := alertDefinitionModel.MetricLessThanUsual; !(metricLessThanUsual.IsNull() || metricLessThanUsual.IsUnknown()) { - alertProperties, diags = expandMetricLessThanUsualAlertTypeDefinition(ctx, alertProperties, metricLessThanUsual) - } else if metricMoreThanOrEquals := alertDefinitionModel.MetricMoreThanOrEquals; !(metricMoreThanOrEquals.IsNull() || metricMoreThanOrEquals.IsUnknown()) { - alertProperties, diags = expandMetricMoreThanOrEqualsAlertTypeDefinition(ctx, alertProperties, metricMoreThanOrEquals) - } else if metricLessThanOrEquals := alertDefinitionModel.MetricLessThanOrEquals; !(metricLessThanOrEquals.IsNull() || metricLessThanOrEquals.IsUnknown()) { - alertProperties, diags = expandMetricLessThanOrEqualsAlertTypeDefinition(ctx, alertProperties, metricLessThanOrEquals) - } else if tracingImmediate := alertDefinitionModel.TracingImmediate; !(tracingImmediate.IsNull() || tracingImmediate.IsUnknown()) { - alertProperties, diags = expandTracingImmediateAlertTypeDefinition(ctx, alertProperties, tracingImmediate) - } else if tracingMoreThan := alertDefinitionModel.TracingMoreThan; !(tracingMoreThan.IsNull() || tracingMoreThan.IsUnknown()) { - alertProperties, diags = expandTracingMoreThanAlertTypeDefinition(ctx, alertProperties, tracingMoreThan) + } else if metricThreshold := alertDefinitionModel.MetricThreshold; !objIsNullOrUnknown(metricThreshold) { + // MetricsThreshold + alertProperties, diags = expandMetricThresholdAlertTypeDefinition(ctx, alertProperties, metricThreshold) + } else if metricUnusual := alertDefinitionModel.MetricUnusual; !objIsNullOrUnknown(metricUnusual) { + // MetricsUnusual + alertProperties, diags = expandMetricUnusualAlertTypeDefinition(ctx, alertProperties, metricUnusual) + } else if tracingImmediate := alertDefinitionModel.TracingImmediate; !objIsNullOrUnknown(tracingImmediate) { + // TracingImmediate + alertProperties, diags = expandTracingImmediateTypeDefinition(ctx, alertProperties, tracingImmediate) + } else if tracingThreshold := alertDefinitionModel.TracingImmediate; !objIsNullOrUnknown(tracingImmediate) { + // TracingThreshold + alertProperties, diags = expandTracingThresholdTypeDefinition(ctx, alertProperties, tracingThreshold) } else if flow := alertDefinitionModel.Flow; !(flow.IsNull() || flow.IsUnknown()) { alertProperties, diags = expandFlowAlertTypeDefinition(ctx, alertProperties, flow) } else { @@ -2172,25 +2112,16 @@ func expandLogsThresholdTypeDefinition(ctx context.Context, properties *cxsdk.Al return properties, nil } -func extractLogsTimeWindow(ctx context.Context, timeWindow types.Object) (*cxsdk.LogsTimeWindow, diag.Diagnostics) { +func extractLogsTimeWindow(ctx context.Context, timeWindow types.String) (*cxsdk.LogsTimeWindow, diag.Diagnostics) { if timeWindow.IsNull() || timeWindow.IsUnknown() { return nil, nil } - var timeWindowModel LogsTimeWindowModel - if diags := timeWindow.As(ctx, &timeWindowModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - if specificValue := timeWindowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { - return &cxsdk.LogsTimeWindow{ - Type: &cxsdk.LogsTimeWindowSpecificValue{ - LogsTimeWindowSpecificValue: logsTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], - }, - }, nil - } - - return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} + return &cxsdk.LogsTimeWindow{ + Type: &cxsdk.LogsTimeWindowSpecificValue{ + LogsTimeWindowSpecificValue: logsTimeWindowValueSchemaToProtoMap[timeWindow.ValueString()], + }, + }, nil } func extractThresholdRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsThresholdRule, diag.Diagnostics) { @@ -2384,25 +2315,16 @@ func extractRatioRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsR return rules, nil } -func extractLogsRatioTimeWindow(ctx context.Context, window types.Object) (*cxsdk.LogsRatioTimeWindow, diag.Diagnostics) { +func extractLogsRatioTimeWindow(ctx context.Context, window types.String) (*cxsdk.LogsRatioTimeWindow, diag.Diagnostics) { if window.IsNull() || window.IsUnknown() { return nil, nil } - var windowModel LogsRatioTimeWindowModel - if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { - return &cxsdk.LogsRatioTimeWindow{ - Type: &cxsdk.LogsRatioTimeWindowSpecificValue{ - LogsRatioTimeWindowSpecificValue: logsRatioTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], - }, - }, nil - } - - return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} + return &cxsdk.LogsRatioTimeWindow{ + Type: &cxsdk.LogsRatioTimeWindowSpecificValue{ + LogsRatioTimeWindowSpecificValue: logsRatioTimeWindowValueSchemaToProtoMap[window.ValueString()], + }, + }, nil } func expandLogsNewValueAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, newValue types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { @@ -2440,26 +2362,15 @@ func expandLogsNewValueAlertTypeDefinition(ctx context.Context, properties *cxsd return properties, nil } -func extractLogsNewValueTimeWindow(ctx context.Context, window types.Object) (*cxsdk.LogsNewValueTimeWindow, diag.Diagnostics) { +func extractLogsNewValueTimeWindow(ctx context.Context, window types.String) (*cxsdk.LogsNewValueTimeWindow, diag.Diagnostics) { if window.IsNull() || window.IsUnknown() { return nil, nil } - - var windowModel LogsNewValueTimeWindowModel - if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { - return &cxsdk.LogsNewValueTimeWindow{ - Type: &cxsdk.LogsNewValueTimeWindowSpecificValue{ - LogsNewValueTimeWindowSpecificValue: logsNewValueTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], - }, - }, nil - } - - return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} - + return &cxsdk.LogsNewValueTimeWindow{ + Type: &cxsdk.LogsNewValueTimeWindowSpecificValue{ + LogsNewValueTimeWindowSpecificValue: logsNewValueTimeWindowValueSchemaToProtoMap[window.ValueString()], + }, + }, nil } func extractNewValueRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsNewValueRule, diag.Diagnostics) { @@ -2512,45 +2423,62 @@ func expandLogsUniqueCountAlertTypeDefinition(ctx context.Context, properties *c return nil, diags } - timeWindow, diags := extractLogsUniqueCountTimeWindow(ctx, uniqueCountModel.TimeWindow) + rules, diags := extractLogsUniqueCountRules(ctx, uniqueCountModel.Rules) if diags.HasError() { return nil, diags } - properties.TypeDefinition = &cxsdk.AlertDefProperties_LogsUniqueCount{ - LogsUniqueCount: &cxsdk.LogsUniqueCountTypeDefinition{ - LogsFilter: logsFilter, - UniqueCountKeypath: typeStringToWrapperspbString(uniqueCountModel.UniqueCountKeypath), - MaxUniqueCount: typeInt64ToWrappedInt64(uniqueCountModel.MaxUniqueCount), - TimeWindow: timeWindow, - NotificationPayloadFilter: notificationPayloadFilter, - MaxUniqueCountPerGroupByKey: typeInt64ToWrappedInt64(uniqueCountModel.MaxUniqueCountPerGroupByKey), + properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsUniqueCount{ + LogsUniqueCount: &cxsdk.LogsUniqueCountType{ + LogsFilter: logsFilter, + Rules: rules, + NotificationPayloadFilter: notificationPayloadFilter, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_LOGS_UNIQUE_COUNT + properties.Type = cxsdk.AlertDefTypeLogsUniqueCount return properties, nil } -func extractLogsUniqueCountTimeWindow(ctx context.Context, window types.Object) (*cxsdk.LogsUniqueValueTimeWindow, diag.Diagnostics) { - if window.IsNull() || window.IsUnknown() { - return nil, nil +func extractLogsUniqueCountRules(ctx context.Context, elements types.List) ([]*cxsdk.LogsUniqueCountRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.LogsUniqueCountRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule LogsUniqueCountRuleModel + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + timeWindow, dg := extractLogsUniqueCountTimeWindow(ctx, rule.TimeWindow) + if dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.LogsUniqueCountRule{ + Condition: &cxsdk.LogsUniqueCountCondition{ + UniqueCountKeypath: typeStringToWrapperspbString(rule.UniqueCountKeypath), + MaxUniqueCount: typeInt64ToWrappedInt64(rule.MaxUniqueCount), + TimeWindow: timeWindow, + MaxUniqueCountPerGroupByKey: typeInt64ToWrappedInt64(rule.MaxUniqueCountPerGroupByKey), + }, + } } - - var windowModel LogsUniqueCountTimeWindowModel - if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { + if diags.HasError() { return nil, diags } + return rules, nil +} - if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { - return &cxsdk.LogsUniqueValueTimeWindow{ - Type: &cxsdk.LogsUniqueValueTimeWindow_LogsUniqueValueTimeWindowSpecificValue{ - LogsUniqueValueTimeWindowSpecificValue: logsUniqueCountTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], - }, - }, nil +func extractLogsUniqueCountTimeWindow(ctx context.Context, window types.String) (*cxsdk.LogsUniqueValueTimeWindow, diag.Diagnostics) { + if window.IsNull() || window.IsUnknown() { + return nil, nil } - - return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} - + return &cxsdk.LogsUniqueValueTimeWindow{ + Type: &cxsdk.LogsUniqueValueTimeWindowSpecificValue{ + LogsUniqueValueTimeWindowSpecificValue: logsUniqueCountTimeWindowValueSchemaToProtoMap[window.ValueString()], + }, + }, nil } func expandLogsTimeRelativeThresholdAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, relativeThreshold types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { @@ -2574,7 +2502,9 @@ func expandLogsTimeRelativeThresholdAlertTypeDefinition(ctx context.Context, pro } rules, diags := extractTimeRelativeThresholdRules(ctx, relativeThresholdModel.Rules) - + if diags.HasError() { + return nil, diags + } properties.TypeDefinition = &cxsdk.AlertDefPropertiesLogsTimeRelativeThreshold{ LogsTimeRelativeThreshold: &cxsdk.LogsTimeRelativeThresholdType{ LogsFilter: logsFilter, @@ -2612,45 +2542,76 @@ func extractTimeRelativeThresholdRules(ctx context.Context, elements types.List) return rules, nil } -func expandMetricMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricMoreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if metricMoreThan.IsNull() || metricMoreThan.IsUnknown() { +func expandMetricThresholdAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricThreshold types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if metricThreshold.IsNull() || metricThreshold.IsUnknown() { return properties, nil } - var metricMoreThanModel MetricMoreThanModel - if diags := metricMoreThan.As(ctx, &metricMoreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - metricFilter, diags := extractMetricFilter(ctx, metricMoreThanModel.MetricFilter) - if diags.HasError() { + var metricThresholdModel MetricThresholdModel + if diags := metricThreshold.As(ctx, &metricThresholdModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - ofTheLast, diags := extractMetricTimeWindow(ctx, metricMoreThanModel.OfTheLast) + metricFilter, diags := extractMetricFilter(ctx, metricThresholdModel.MetricFilter) if diags.HasError() { return nil, diags } - missingValues, diags := extractMissingValues(ctx, metricMoreThanModel.MissingValues) + rules, diags := extractMetricThresholdRules(ctx, metricThresholdModel.Rules) if diags.HasError() { return nil, diags } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricMoreThan{ - MetricMoreThan: &cxsdk.MetricMoreThanTypeDefinition{ - MetricFilter: metricFilter, - Threshold: typeFloat64ToWrapperspbFloat(metricMoreThanModel.Threshold), - ForOverPct: typeInt64ToWrappedUint32(metricMoreThanModel.ForOverPct), - OfTheLast: ofTheLast, - MissingValues: missingValues, + properties.TypeDefinition = &cxsdk.AlertDefPropertiesMetricThreshold{ + MetricThreshold: &cxsdk.MetricThresholdType{ + MetricFilter: metricFilter, + Rules: rules, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_MORE_THAN + properties.Type = cxsdk.AlertDefTypeMetricThreshold return properties, nil } +func extractMetricThresholdRules(ctx context.Context, elements types.List) ([]*cxsdk.MetricThresholdRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.MetricThresholdRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule MetricRule + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + + ofTheLast, dg := extractMetricTimeWindow(ctx, rule.OfTheLast) + if dg.HasError() { + diags.Append(dg...) + continue + } + + missingValues, dg := extractMissingValues(ctx, rule.MissingValues) + if dg.HasError() { + diags.Append(dg...) + continue + } + + rules[i] = &cxsdk.MetricThresholdRule{ + Condition: &cxsdk.MetricThresholdCondition{ + Threshold: typeFloat64ToWrapperspbDouble(rule.Threshold), + ForOverPct: typeInt64ToWrappedUint32(rule.ForOverPct), + OfTheLast: ofTheLast, + MissingValues: missingValues, + ConditionType: metricsThresholdConditionToProtoMap[rule.Condition.ValueString()], + }, + } + } + if diags.HasError() { + return nil, diags + } + return rules, nil +} + func extractMetricFilter(ctx context.Context, filter types.Object) (*cxsdk.MetricFilter, diag.Diagnostics) { if filter.IsNull() || filter.IsUnknown() { return nil, nil @@ -2663,7 +2624,7 @@ func extractMetricFilter(ctx context.Context, filter types.Object) (*cxsdk.Metri if promql := filterModel.Promql; !(promql.IsNull() || promql.IsUnknown()) { return &cxsdk.MetricFilter{ - Type: &cxsdk.MetricFilter_Promql{ + Type: &cxsdk.MetricFilterPromql{ Promql: typeStringToWrapperspbString(promql), }, }, nil @@ -2672,25 +2633,16 @@ func extractMetricFilter(ctx context.Context, filter types.Object) (*cxsdk.Metri return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Metric Filter", "Metric Filter is not valid")} } -func extractMetricTimeWindow(ctx context.Context, timeWindow types.Object) (*cxsdk.MetricTimeWindow, diag.Diagnostics) { - if timeWindow.IsNull() || timeWindow.IsUnknown() { +func extractMetricTimeWindow(ctx context.Context, window types.String) (*cxsdk.MetricTimeWindow, diag.Diagnostics) { + if window.IsNull() || window.IsUnknown() { return nil, nil } - var timeWindowModel MetricTimeWindowModel - if diags := timeWindow.As(ctx, &timeWindowModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - if specificValue := timeWindowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { - return &cxsdk.MetricTimeWindow{ - Type: &cxsdk.MetricTimeWindow_MetricTimeWindowSpecificValue{ - MetricTimeWindowSpecificValue: metricTimeWindowValueSchemaToProtoMap[specificValue.ValueString()], - }, - }, nil - } - - return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} + return &cxsdk.MetricTimeWindow{ + Type: &cxsdk.MetricTimeWindowSpecificValue{ + MetricTimeWindowSpecificValue: metricTimeWindowValueSchemaToProtoMap[window.ValueString()], + }, + }, nil } func extractMissingValues(ctx context.Context, missingValues types.Object) (*cxsdk.MetricMissingValues, diag.Diagnostics) { @@ -2705,11 +2657,11 @@ func extractMissingValues(ctx context.Context, missingValues types.Object) (*cxs metricMissingValues := &cxsdk.MetricMissingValues{} if replaceWithZero := missingValuesModel.ReplaceWithZero; !(replaceWithZero.IsNull() || replaceWithZero.IsUnknown()) { - metricMissingValues.MissingValues = &cxsdk.MetricMissingValues_ReplaceWithZero{ + metricMissingValues.MissingValues = &cxsdk.MetricMissingValuesReplaceWithZero{ ReplaceWithZero: typeBoolToWrapperspbBool(replaceWithZero), } } else if minNonNullValuesPct := missingValuesModel.MinNonNullValuesPct; !(minNonNullValuesPct.IsNull() || minNonNullValuesPct.IsUnknown()) { - metricMissingValues.MissingValues = &cxsdk.MetricMissingValues_MinNonNullValuesPct{ + metricMissingValues.MissingValues = &cxsdk.MetricMissingValuesMinNonNullValuesPct{ MinNonNullValuesPct: typeInt64ToWrappedUint32(minNonNullValuesPct), } } else { @@ -2719,129 +2671,118 @@ func extractMissingValues(ctx context.Context, missingValues types.Object) (*cxs return metricMissingValues, nil } -func expandMetricLessThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricLessThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if metricLessThan.IsNull() || metricLessThan.IsUnknown() { +func expandTracingImmediateTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, tracingImmediate types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if objIsNullOrUnknown(tracingImmediate) { return properties, nil } - var metricLessThanModel MetricLessThanModel - if diags := metricLessThan.As(ctx, &metricLessThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - metricFilter, diags := extractMetricFilter(ctx, metricLessThanModel.MetricFilter) - if diags.HasError() { - return nil, diags - } - - ofTheLast, diags := extractMetricTimeWindow(ctx, metricLessThanModel.OfTheLast) - if diags.HasError() { + var tracingImmediateModel TracingImmediateModel + if diags := tracingImmediate.As(ctx, &tracingImmediateModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - missingValues, diags := extractMissingValues(ctx, metricLessThanModel.MissingValues) + tracingQuery, diags := expandTracingFilters(ctx, tracingImmediateModel.TracingFilter) if diags.HasError() { return nil, diags } - undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, metricLessThanModel.UndetectedValuesManagement) + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, tracingImmediateModel.NotificationPayloadFilter.Elements()) if diags.HasError() { return nil, diags } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricLessThan{ - MetricLessThan: &cxsdk.MetricLessThanTypeDefinition{ - MetricFilter: metricFilter, - Threshold: typeFloat64ToWrapperspbFloat(metricLessThanModel.Threshold), - ForOverPct: typeInt64ToWrappedUint32(metricLessThanModel.ForOverPct), - OfTheLast: ofTheLast, - MissingValues: missingValues, - UndetectedValuesManagement: undetectedValuesManagement, + properties.TypeDefinition = &cxsdk.AlertDefPropertiesTracingImmediate{ + TracingImmediate: &cxsdk.TracingImmediateType{ + TracingFilter: &cxsdk.TracingFilter{ + FilterType: tracingQuery, + }, + NotificationPayloadFilter: notificationPayloadFilter, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_LESS_THAN + properties.Type = cxsdk.AlertDefTypeTracingImmediate return properties, nil } -func expandTracingMoreThanAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, tracingMoreThan types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if tracingMoreThan.IsNull() || tracingMoreThan.IsUnknown() { +func expandTracingThresholdTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, tracingThreshold types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if objIsNullOrUnknown(tracingThreshold) { return properties, nil } - var tracingMoreThanModel TracingMoreThanModel - if diags := tracingMoreThan.As(ctx, &tracingMoreThanModel, basetypes.ObjectAsOptions{}); diags.HasError() { + var tracingThresholdModel TracingThresholdModel + if diags := tracingThreshold.As(ctx, &tracingThresholdModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - tracingQuery, diags := extractTracingFilter(ctx, tracingMoreThanModel.TracingFilter) + tracingQuery, diags := expandTracingFilters(ctx, tracingThresholdModel.TracingFilter) if diags.HasError() { return nil, diags } - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, tracingMoreThanModel.NotificationPayloadFilter.Elements()) + notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, tracingThresholdModel.NotificationPayloadFilter.Elements()) if diags.HasError() { return nil, diags } - timeWindow, diags := extractTracingTimeWindow(ctx, tracingMoreThanModel.TimeWindow) + rules, diags := extractTracingThresholdRules(ctx, tracingThresholdModel.Rules) if diags.HasError() { return nil, diags } - properties.TypeDefinition = &cxsdk.AlertDefProperties_TracingMoreThan{ - TracingMoreThan: &cxsdk.TracingMoreThanTypeDefinition{ - TracingFilter: tracingQuery, - SpanAmount: typeInt64ToWrappedUint32(tracingMoreThanModel.SpanAmount), - TimeWindow: timeWindow, + properties.TypeDefinition = &cxsdk.AlertDefPropertiesTracingThreshold{ + TracingThreshold: &cxsdk.TracingThresholdType{ + TracingFilter: &cxsdk.TracingFilter{ + FilterType: tracingQuery, + }, NotificationPayloadFilter: notificationPayloadFilter, + Rules: rules, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_TRACING_MORE_THAN + properties.Type = cxsdk.AlertDefTypeTracingImmediate return properties, nil } -func extractTracingFilter(ctx context.Context, query types.Object) (*cxsdk.TracingFilter, diag.Diagnostics) { - if query.IsNull() || query.IsUnknown() { - return nil, nil - } - - var queryModel TracingFilterModel - if diags := query.As(ctx, &queryModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - tracingQuery := &cxsdk.TracingFilter{ - LatencyThresholdMs: typeInt64ToWrappedUint32(queryModel.LatencyThresholdMs), - } - - tracingQuery, diags := expandTracingFilters(ctx, tracingQuery, &queryModel) +func extractTracingThresholdRules(ctx context.Context, elements types.List) ([]*cxsdk.TracingThresholdRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.TracingThresholdRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule TracingThresholdRuleModel + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } + timeWindow, dg := extractTracingTimeWindow(ctx, rule.TimeWindow) + if dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.TracingThresholdRule{ + Condition: &cxsdk.TracingThresholdCondition{ + SpanAmount: typeFloat64ToWrapperspbDouble(rule.SpanAmount), + TimeWindow: timeWindow, + ConditionType: cxsdk.TracingThresholdConditionTypeMoreThanOrUnspecified, + }, + } + } if diags.HasError() { return nil, diags } - - return tracingQuery, nil + return rules, nil } -func expandTracingFilters(ctx context.Context, query *cxsdk.TracingFilter, tracingQueryModel *TracingFilterModel) (*cxsdk.TracingFilter, diag.Diagnostics) { - if tracingQueryModel == nil { - return query, nil +func expandTracingFilters(ctx context.Context, query types.Object) (*cxsdk.TracingFilterSimpleFilter, diag.Diagnostics) { + if objIsNullOrUnknown(query) { + return nil, nil } - - var diags diag.Diagnostics - if tracingLabelFilters := tracingQueryModel.TracingLabelFilters; !(tracingLabelFilters.IsNull() || tracingLabelFilters.IsUnknown()) { - query, diags = expandTracingLabelFilters(ctx, query, tracingLabelFilters) - } else { - diags = diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Tracing Label Filters", "Tracing Label Filters is not valid")} + var labelFilterModel TracingFilterModel + if diags := query.As(ctx, &labelFilterModel, basetypes.ObjectAsOptions{}); diags.HasError() { + return nil, diags } - return query, diags -} - -func expandTracingLabelFilters(ctx context.Context, query *cxsdk.TracingFilter, tracingLabelFilters types.Object) (*cxsdk.TracingFilter, diag.Diagnostics) { var filtersModel TracingLabelFiltersModel - if diags := tracingLabelFilters.As(ctx, &filtersModel, basetypes.ObjectAsOptions{}); diags.HasError() { + if diags := labelFilterModel.TracingLabelFilters.As(ctx, &filtersModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } @@ -2860,21 +2801,30 @@ func expandTracingLabelFilters(ctx context.Context, query *cxsdk.TracingFilter, return nil, diags } + serviceName, diags := extractTracingLabelFilters(ctx, filtersModel.ServiceName) + if diags.HasError() { + return nil, diags + } + spanFields, diags := extractTracingSpanFieldsFilterType(ctx, filtersModel.SpanFields) if diags.HasError() { return nil, diags } - query.Filters = &cxsdk.TracingFilter_TracingLabelFilters{ - TracingLabelFilters: &cxsdk.TracingLabelFilters{ - ApplicationName: applicationName, - SubsystemName: subsystemName, - OperationName: operationName, - SpanFields: spanFields, + filter := &cxsdk.TracingFilterSimpleFilter{ + SimpleFilter: &cxsdk.TracingSimpleFilter{ + TracingLabelFilters: &cxsdk.TracingLabelFilters{ + ApplicationName: applicationName, + SubsystemName: subsystemName, + ServiceName: serviceName, + OperationName: operationName, + SpanFields: spanFields, + }, + LatencyThresholdMs: typeInt64ToWrappedUint32(labelFilterModel.LatencyThresholdMs), }, } - return query, nil + return filter, nil } func extractTracingLabelFilters(ctx context.Context, tracingLabelFilters types.Set) ([]*cxsdk.TracingFilterType, diag.Diagnostics) { @@ -2944,207 +2894,80 @@ func extractTracingSpanFieldsFilterType(ctx context.Context, spanFields types.Se return filters, nil } -func extractTracingTimeWindow(ctx context.Context, window types.Object) (*cxsdk.TracingTimeWindow, diag.Diagnostics) { +func extractTracingTimeWindow(ctx context.Context, window types.String) (*cxsdk.TracingTimeWindow, diag.Diagnostics) { if window.IsNull() || window.IsUnknown() { return nil, nil } - var windowModel TracingTimeWindowModel - if diags := window.As(ctx, &windowModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - if specificValue := windowModel.SpecificValue; !(specificValue.IsNull() || specificValue.IsUnknown()) { - return &cxsdk.TracingTimeWindow{ - Type: &cxsdk.TracingTimeWindow_TracingTimeWindowValue{ - TracingTimeWindowValue: tracingTimeWindowSchemaToProtoMap[specificValue.ValueString()], - }, - }, nil - } - - return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", "Time Window is not valid")} - -} - -func expandMetricMoreThanUsualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricMoreThanUsual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if metricMoreThanUsual.IsNull() || metricMoreThanUsual.IsUnknown() { - return properties, nil - } - - var metricMoreThanUsualModel MetricMoreThanUsualModel - if diags := metricMoreThanUsual.As(ctx, &metricMoreThanUsualModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - metricFilter, diags := extractMetricFilter(ctx, metricMoreThanUsualModel.MetricFilter) - if diags.HasError() { - return nil, diags - } - - ofTheLast, diags := extractMetricTimeWindow(ctx, metricMoreThanUsualModel.OfTheLast) - if diags.HasError() { - return nil, diags - } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricMoreThanUsual{ - MetricMoreThanUsual: &cxsdk.MetricMoreThanUsualTypeDefinition{ - MetricFilter: metricFilter, - Threshold: typeInt64ToWrappedUint32(metricMoreThanUsualModel.Threshold), - ForOverPct: typeInt64ToWrappedUint32(metricMoreThanUsualModel.ForOverPct), - OfTheLast: ofTheLast, - MinNonNullValuesPct: typeInt64ToWrappedUint32(metricMoreThanUsualModel.MinNonNullValuesPct), - }, - } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_MORE_THAN_USUAL - - return properties, nil -} - -func expandMetricLessThanUsualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricLessThanUsual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if metricLessThanUsual.IsNull() || metricLessThanUsual.IsUnknown() { - return properties, nil - } - - var metricLessThanUsualModel MetricLessThanUsualModel - if diags := metricLessThanUsual.As(ctx, &metricLessThanUsualModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - metricFilter, diags := extractMetricFilter(ctx, metricLessThanUsualModel.MetricFilter) - if diags.HasError() { - return nil, diags - } - - ofTheLast, diags := extractMetricTimeWindow(ctx, metricLessThanUsualModel.OfTheLast) - if diags.HasError() { - return nil, diags - } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricLessThanUsual{ - MetricLessThanUsual: &cxsdk.MetricLessThanUsualTypeDefinition{ - MetricFilter: metricFilter, - Threshold: typeInt64ToWrappedUint32(metricLessThanUsualModel.Threshold), - ForOverPct: typeInt64ToWrappedUint32(metricLessThanUsualModel.ForOverPct), - OfTheLast: ofTheLast, - MinNonNullValuesPct: typeInt64ToWrappedUint32(metricLessThanUsualModel.MinNonNullValuesPct), + return &cxsdk.TracingTimeWindow{ + Type: &cxsdk.TracingTimeWindowSpecificValue{ + TracingTimeWindowValue: tracingTimeWindowSchemaToProtoMap[window.ValueString()], }, - } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_LESS_THAN_USUAL - - return properties, nil + }, nil } -func expandMetricMoreThanOrEqualsAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricMoreThanOrEquals types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if metricMoreThanOrEquals.IsNull() || metricMoreThanOrEquals.IsUnknown() { +func expandMetricUnusualAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, metricUnusual types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { + if objIsNullOrUnknown(metricUnusual) { return properties, nil } - var metricMoreThanOrEqualsModel MetricMoreThanOrEqualsModel - if diags := metricMoreThanOrEquals.As(ctx, &metricMoreThanOrEqualsModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - metricFilter, diags := extractMetricFilter(ctx, metricMoreThanOrEqualsModel.MetricFilter) - if diags.HasError() { + var metricUnusualModel MetricUnusualModel + if diags := metricUnusual.As(ctx, &metricUnusualModel, basetypes.ObjectAsOptions{}); diags.HasError() { return nil, diags } - ofTheLast, diags := extractMetricTimeWindow(ctx, metricMoreThanOrEqualsModel.OfTheLast) + metricFilter, diags := extractMetricFilter(ctx, metricUnusualModel.MetricFilter) if diags.HasError() { return nil, diags } - missingValues, diags := extractMissingValues(ctx, metricMoreThanOrEqualsModel.MissingValues) + rules, diags := extractMetricUnusualRules(ctx, metricUnusualModel.Rules) if diags.HasError() { return nil, diags } - properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricMoreThanOrEquals{ - MetricMoreThanOrEquals: &cxsdk.MetricMoreThanOrEqualsTypeDefinition{ - MetricFilter: metricFilter, - Threshold: typeFloat64ToWrapperspbFloat(metricMoreThanOrEqualsModel.Threshold), - ForOverPct: typeInt64ToWrappedUint32(metricMoreThanOrEqualsModel.ForOverPct), - OfTheLast: ofTheLast, - MissingValues: missingValues, + properties.TypeDefinition = &cxsdk.AlertDefPropertiesMetricUnusual{ + MetricUnusual: &cxsdk.MetricUnusualType{ + MetricFilter: metricFilter, + Rules: rules, }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_MORE_THAN_OR_EQUALS - return properties, nil -} - -func expandMetricLessThanOrEqualsAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, equals types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if equals.IsNull() || equals.IsUnknown() { - return properties, nil - } - - var equalsModel MetricLessThanOrEqualsModel - if diags := equals.As(ctx, &equalsModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } - - metricFilter, diags := extractMetricFilter(ctx, equalsModel.MetricFilter) - if diags.HasError() { - return nil, diags - } - - ofTheLast, diags := extractMetricTimeWindow(ctx, equalsModel.OfTheLast) - if diags.HasError() { - return nil, diags - } - - missingValues, diags := extractMissingValues(ctx, equalsModel.MissingValues) - if diags.HasError() { - return nil, diags - } - - undetectedValuesManagement, diags := extractUndetectedValuesManagement(ctx, equalsModel.UndetectedValuesManagement) - if diags.HasError() { - return nil, diags - } + properties.Type = cxsdk.AlertDefTypeMetricUnusual - properties.TypeDefinition = &cxsdk.AlertDefProperties_MetricLessThanOrEquals{ - MetricLessThanOrEquals: &cxsdk.MetricLessThanOrEqualsTypeDefinition{ - MetricFilter: metricFilter, - Threshold: typeFloat64ToWrapperspbFloat(equalsModel.Threshold), - ForOverPct: typeInt64ToWrappedUint32(equalsModel.ForOverPct), - OfTheLast: ofTheLast, - MissingValues: missingValues, - UndetectedValuesManagement: undetectedValuesManagement, - }, - } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_METRIC_LESS_THAN_OR_EQUALS return properties, nil } -func expandTracingImmediateAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, tracingImmediate types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { - if tracingImmediate.IsNull() || tracingImmediate.IsUnknown() { - return properties, nil - } - - var tracingImmediateModel TracingImmediateModel - if diags := tracingImmediate.As(ctx, &tracingImmediateModel, basetypes.ObjectAsOptions{}); diags.HasError() { - return nil, diags - } +func extractMetricUnusualRules(ctx context.Context, elements types.List) ([]*cxsdk.MetricUnusualRule, diag.Diagnostics) { + diags := diag.Diagnostics{} + rules := make([]*cxsdk.MetricUnusualRule, len(elements.Elements())) + var objs []types.Object + elements.ElementsAs(ctx, &objs, false) + for i, r := range objs { + var rule MetricRule + if dg := r.As(ctx, &rule, basetypes.ObjectAsOptions{}); dg.HasError() { + diags.Append(dg...) + continue + } - tracingQuery, diags := extractTracingFilter(ctx, tracingImmediateModel.TracingFilter) - if diags.HasError() { - return nil, diags + ofTheLast, dg := extractMetricTimeWindow(ctx, rule.OfTheLast) + if dg.HasError() { + diags.Append(dg...) + continue + } + rules[i] = &cxsdk.MetricUnusualRule{ + Condition: &cxsdk.MetricUnusualCondition{ + Threshold: typeFloat64ToWrapperspbDouble(rule.Threshold), + ForOverPct: typeInt64ToWrappedUint32(rule.ForOverPct), + OfTheLast: ofTheLast, + ConditionType: metricUnusualConditionToProtoMap[rule.Condition.ValueString()], + MinNonNullValuesPct: typeInt64ToWrappedUint32(rule.MinNonNullValuesPct), + }, + } } - - notificationPayloadFilter, diags := typeStringSliceToWrappedStringSlice(ctx, tracingImmediateModel.NotificationPayloadFilter.Elements()) if diags.HasError() { return nil, diags } - - properties.TypeDefinition = &cxsdk.AlertDefProperties_TracingImmediate{ - TracingImmediate: &cxsdk.TracingImmediateTypeDefinition{ - TracingFilter: tracingQuery, - NotificationPayloadFilter: notificationPayloadFilter, - }, - } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_TRACING_IMMEDIATE - - return properties, nil + return rules, nil } func expandFlowAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefProperties, flow types.Object) (*cxsdk.AlertDefProperties, diag.Diagnostics) { @@ -3162,13 +2985,13 @@ func expandFlowAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertD return nil, diags } - properties.TypeDefinition = &cxsdk.AlertDefProperties_Flow{ - Flow: &cxsdk.FlowTypeDefinition{ + properties.TypeDefinition = &cxsdk.AlertDefPropertiesFlow{ + Flow: &cxsdk.FlowType{ Stages: stages, EnforceSuppression: typeBoolToWrapperspbBool(flowModel.EnforceSuppression), }, } - properties.AlertDefType = cxsdk.AlertDefType_ALERT_DEF_TYPE_FLOW + properties.Type = cxsdk.AlertDefTypeFlow return properties, nil } @@ -3217,7 +3040,7 @@ func extractFlowStage(ctx context.Context, object types.Object) (*cxsdk.FlowStag return flowStage, nil } -func extractFlowStagesGroups(ctx context.Context, groups types.List) (*cxsdk.FlowStages_FlowStagesGroups, diag.Diagnostics) { +func extractFlowStagesGroups(ctx context.Context, groups types.List) (*cxsdk.FlowStagesGroups, diag.Diagnostics) { if groups.IsNull() || groups.IsUnknown() { return nil, nil } @@ -3237,9 +3060,10 @@ func extractFlowStagesGroups(ctx context.Context, groups types.List) (*cxsdk.Flo flowStagesGroups = append(flowStagesGroups, group) } - return &cxsdk.FlowStages_FlowStagesGroups{FlowStagesGroups: &cxsdk.FlowStagesGroups{ - Groups: flowStagesGroups, - }}, nil + return &cxsdk.FlowStagesGroups{ + FlowStagesGroups: &cxsdk.FlowStagesGroupsValue{ + Groups: flowStagesGroups, + }}, nil } @@ -3423,9 +3247,9 @@ func flattenAdvancedTargetSettings(ctx context.Context, advancedTargetSettings * Recipients: types.SetNull(types.StringType), } switch integrationType := notification.GetIntegration(); integrationType.GetIntegrationType().(type) { - case *cxsdk.IntegrationType_IntegrationId: + case *cxsdk.AlertDefIntegrationTypeIntegrationID: notificationModel.IntegrationID = types.StringValue(strconv.Itoa(int(integrationType.GetIntegrationId().GetValue()))) - case *cxsdk.IntegrationType_Recipients: + case *cxsdk.AlertDefIntegrationTypeRecipients: notificationModel.Recipients = wrappedStringSliceToTypeStringSet(integrationType.GetRecipients().GetEmails()) } notificationsModel = append(notificationsModel, ¬ificationModel) @@ -3463,9 +3287,9 @@ func flattenSimpleTargetSettings(ctx context.Context, simpleTargetSettings *cxsd Recipients: types.SetNull(types.StringType), } switch notification.GetIntegrationType().(type) { - case *cxsdk.IntegrationType_IntegrationId: + case *cxsdk.AlertDefIntegrationTypeIntegrationID: notificationModel.IntegrationID = types.StringValue(strconv.Itoa(int(notification.GetIntegrationId().GetValue()))) - case *cxsdk.IntegrationType_Recipients: + case *cxsdk.AlertDefIntegrationTypeRecipients: notificationModel.Recipients = wrappedStringSliceToTypeStringSet(notification.GetRecipients().GetEmails()) } notificationsModel = append(notificationsModel, notificationModel) @@ -3497,7 +3321,7 @@ func flattenIncidentsSettingsByRetriggeringPeriod(ctx context.Context, settings var periodModel RetriggeringPeriodModel switch period := settings.RetriggeringPeriod.(type) { - case *cxsdk.AlertDefIncidentSettings_Minutes: + case *cxsdk.AlertDefIncidentSettingsMinutes: periodModel.Minutes = wrapperspbUint32ToTypeInt64(period.Minutes) default: return types.ObjectNull(retriggeringPeriodAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Retriggering Period", fmt.Sprintf("Retriggering Period %v is not supported", period))} @@ -3519,7 +3343,7 @@ func flattenAlertTypeDefinition(ctx context.Context, properties *cxsdk.AlertDefP LogsRatioThreshold: types.ObjectNull(logsRatioThresholdAttr()), LogsNewValue: types.ObjectNull(logsNewValueAttr()), LogsUniqueCount: types.ObjectNull(logsUniqueCountAttr()), - LogsTimeRelativeThreshold: types.ObjectNull(logsTimeRelativeThresholdAttr()), + LogsTimeRelativeThreshold: types.ObjectNull(logsTimeRelativeAttr()), MetricThreshold: types.ObjectNull(metricThresholdAttr()), MetricUnusual: types.ObjectNull(metricUnusualAttr()), TracingImmediate: types.ObjectNull(tracingImmediateAttr()), @@ -3679,35 +3503,40 @@ func flattenLogsThreshold(ctx context.Context, threshold *cxsdk.LogsThresholdTyp return types.ObjectNull(logsThresholdAttr()), diags } - timeWindow, diags := flattenLogsTimeWindow(ctx, threshold.GetTimeWindow()) + rules, diags := flattenLogsThresholdRules(ctx, threshold.Rules) if diags.HasError() { return types.ObjectNull(logsThresholdAttr()), diags } logsMoreThanModel := LogsThresholdModel{ LogsFilter: logsFilter, - Threshold: wrapperspbUint32ToTypeInt64(moreThan.GetThreshold()), - TimeWindow: timeWindow, - EvaluationWindow: types.StringValue(evaluationWindowTypeProtoToSchemaMap[moreThan.GetEvaluationWindow()]), - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(moreThan.GetNotificationPayloadFilter()), + Rules: rules, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(threshold.GetNotificationPayloadFilter()), } - return types.ObjectValueFrom(ctx, logsMoreThanAttr(), logsMoreThanModel) + return types.ObjectValueFrom(ctx, logsThresholdAttr(), logsMoreThanModel) } -func flattenLogsTimeWindow(ctx context.Context, timeWindow *cxsdk.LogsTimeWindow) (types.Object, diag.Diagnostics) { - if timeWindow == nil { - return types.ObjectNull(logsTimeWindowAttr()), nil +func flattenLogsThresholdRules(ctx context.Context, rules []*cxsdk.LogsThresholdRule) (types.List, diag.Diagnostics) { + if rules == nil { + return types.ListNull(types.ObjectType{AttrTypes: flowStageAttr()}), nil } - - switch timeWindowType := timeWindow.Type.(type) { - case *cxsdk.LogsTimeWindowSpecificValue: - return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsTimeWindowModel{ - SpecificValue: types.StringValue(logsTimeWindowValueProtoToSchemaMap[timeWindowType.LogsTimeWindowSpecificValue]), - }) - default: - return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} + convertedRules := make([]*RuleModel, len(rules)) + for i, rule := range rules { + timeWindow := flattenLogsTimeWindow(ctx, rule.Condition.TimeWindow) + convertedRules[i] = &RuleModel{ + Condition: types.StringValue(logsThresholdConditionMap[rule.Condition.ConditionType]), + Threshold: wrapperspbDoubleToTypeFloat64(rule.Condition.Threshold), + TimeWindow: timeWindow, + } } + return types.ListValueFrom(ctx, types.ObjectType{AttrTypes: logsThresholdRulesAttr()}, convertedRules) +} +func flattenLogsTimeWindow(ctx context.Context, timeWindow *cxsdk.LogsTimeWindow) types.String { + if timeWindow == nil { + return types.StringNull() + } + return types.StringValue(logsTimeWindowValueProtoToSchemaMap[timeWindow.GetLogsTimeWindowSpecificValue()]) } func flattenUndetectedValuesManagement(ctx context.Context, undetectedValuesManagement *cxsdk.UndetectedValuesManagement) (types.Object, diag.Diagnostics) { @@ -3723,120 +3552,75 @@ func flattenUndetectedValuesManagement(ctx context.Context, undetectedValuesMana return types.ObjectValueFrom(ctx, undetectedValuesManagementAttr(), undetectedValuesManagementModel) } -func flattenLogsMoreThanUsual(ctx context.Context, moreThanUsual *cxsdk.LogsMoreThanUsualTypeDefinition) (types.Object, diag.Diagnostics) { - if moreThanUsual == nil { - return types.ObjectNull(logsMoreThanUsualAttr()), nil +func flattenLogsUnusual(ctx context.Context, unusual *cxsdk.LogsUnusualType) (types.Object, diag.Diagnostics) { + if unusual == nil { + return types.ObjectNull(logsUnusualAttr()), nil } - logsFilter, diags := flattenAlertsLogsFilter(ctx, moreThanUsual.GetLogsFilter()) + logsFilter, diags := flattenAlertsLogsFilter(ctx, unusual.GetLogsFilter()) if diags.HasError() { - return types.ObjectNull(logsMoreThanUsualAttr()), diags + return types.ObjectNull(logsUnusualAttr()), diags } - timeWindow, diags := flattenLogsTimeWindow(ctx, moreThanUsual.GetTimeWindow()) - if diags.HasError() { - return types.ObjectNull(logsMoreThanUsualAttr()), diags + rulesRaw := make([]RuleModel, len(unusual.Rules)) + for i, rule := range unusual.Rules { + timeWindow := flattenLogsTimeWindow(ctx, rule.Condition.TimeWindow) + rulesRaw[i] = RuleModel{ + Threshold: wrapperspbDoubleToTypeFloat64(rule.Condition.MinimumThreshold), + TimeWindow: timeWindow, + } } - logsMoreThanUsualModel := LogsMoreThanUsualModel{ + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: logsUnusualRulesAttr()}, rulesRaw) + + logsMoreThanUsualModel := LogsUnusualModel{ LogsFilter: logsFilter, - MinimumThreshold: wrapperspbUint32ToTypeInt64(moreThanUsual.GetMinimumThreshold()), - TimeWindow: timeWindow, - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(moreThanUsual.GetNotificationPayloadFilter()), + Rules: rules, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(unusual.GetNotificationPayloadFilter()), } - return types.ObjectValueFrom(ctx, logsMoreThanUsualAttr(), logsMoreThanUsualModel) + return types.ObjectValueFrom(ctx, logsUnusualAttr(), logsMoreThanUsualModel) } -func flattenLogsRatioMoreThan(ctx context.Context, ratioMoreThan *cxsdk.LogsRatioMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { - if ratioMoreThan == nil { - return types.ObjectNull(logsRatioMoreThanAttr()), nil - } - - numeratorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioMoreThan.GetNumeratorLogsFilter()) - if diags.HasError() { - return types.ObjectNull(logsRatioMoreThanAttr()), diags +func flattenLogsRatioThreshold(ctx context.Context, ratioThreshold *cxsdk.LogsRatioThresholdType) (types.Object, diag.Diagnostics) { + if ratioThreshold == nil { + return types.ObjectNull(logsRatioThresholdAttr()), nil } - denominatorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioMoreThan.GetDenominatorLogsFilter()) + numeratorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioThreshold.GetNumerator()) if diags.HasError() { - return types.ObjectNull(logsRatioMoreThanAttr()), diags - } - - timeWindow, diags := flattenLogsRatioTimeWindow(ctx, ratioMoreThan.GetTimeWindow()) - if diags.HasError() { - return types.ObjectNull(logsRatioMoreThanAttr()), diags - } - - logsRatioMoreThanModel := LogsRatioMoreThanModel{ - NumeratorLogsFilter: numeratorLogsFilter, - NumeratorAlias: wrapperspbStringToTypeString(ratioMoreThan.GetNumeratorAlias()), - DenominatorLogsFilter: denominatorLogsFilter, - DenominatorAlias: wrapperspbStringToTypeString(ratioMoreThan.GetDenominatorAlias()), - Threshold: typeFloat64ToWrapperspbDouble(ratioMoreThan.GetThreshold()), - TimeWindow: timeWindow, - IgnoreInfinity: wrapperspbBoolToTypeBool(ratioMoreThan.GetIgnoreInfinity()), - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(ratioMoreThan.GetNotificationPayloadFilter()), - GroupByFor: types.StringValue(logsRatioGroupByForProtoToSchemaMap[ratioMoreThan.GetGroupByFor()]), - } - return types.ObjectValueFrom(ctx, logsRatioMoreThanAttr(), logsRatioMoreThanModel) -} - -func flattenLogsRatioTimeWindow(ctx context.Context, window *cxsdk.LogsRatioTimeWindow) (types.Object, diag.Diagnostics) { - if window == nil { - return types.ObjectNull(logsTimeWindowAttr()), nil - } - - switch timeWindowType := window.Type.(type) { - case *cxsdk.LogsRatioTimeWindow_LogsRatioTimeWindowSpecificValue: - return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsRatioTimeWindowModel{ - SpecificValue: types.StringValue(logsRatioTimeWindowValueProtoToSchemaMap[timeWindowType.LogsRatioTimeWindowSpecificValue]), - }) - default: - return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} - } -} - -func flattenLogsRatioLessThan(ctx context.Context, ratioLessThan *cxsdk.LogsRatioLessThanTypeDefinition) (types.Object, diag.Diagnostics) { - if ratioLessThan == nil { - return types.ObjectNull(logsRatioLessThanAttr()), nil + return types.ObjectNull(logsRatioThresholdAttr()), diags } - numeratorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioLessThan.GetNumeratorLogsFilter()) + denominatorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioThreshold.GetDenominator()) if diags.HasError() { - return types.ObjectNull(logsRatioLessThanAttr()), diags + return types.ObjectNull(logsRatioThresholdAttr()), diags } - denominatorLogsFilter, diags := flattenAlertsLogsFilter(ctx, ratioLessThan.GetDenominatorLogsFilter()) - if diags.HasError() { - return types.ObjectNull(logsRatioLessThanAttr()), diags + rulesRaw := make([]RuleModel, len(ratioThreshold.Rules)) + for i, rule := range ratioThreshold.Rules { + timeWindow := types.StringValue(logsRatioTimeWindowValueProtoToSchemaMap[rule.Condition.TimeWindow.GetLogsRatioTimeWindowSpecificValue()]) + rulesRaw[i] = RuleModel{ + Threshold: wrapperspbDoubleToTypeFloat64(rule.Condition.GetThreshold()), + TimeWindow: timeWindow, + IgnoreInfinity: wrapperspbBoolToTypeBool(rule.Condition.GetIgnoreInfinity()), + } } - timeWindow, diags := flattenLogsRatioTimeWindow(ctx, ratioLessThan.GetTimeWindow()) - if diags.HasError() { - return types.ObjectNull(logsRatioLessThanAttr()), diags - } + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: logsRatioThresholdRulesAttr()}, rulesRaw) - undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, ratioLessThan.GetUndetectedValuesManagement()) - if diags.HasError() { - return types.ObjectNull(logsRatioLessThanAttr()), diags - } - - logsRatioLessThanModel := LogsRatioLessThanModel{ - NumeratorLogsFilter: numeratorLogsFilter, - NumeratorAlias: wrapperspbStringToTypeString(ratioLessThan.GetNumeratorAlias()), - DenominatorLogsFilter: denominatorLogsFilter, - DenominatorAlias: wrapperspbStringToTypeString(ratioLessThan.GetDenominatorAlias()), - Threshold: wrapperspbUint32ToTypeInt64(ratioLessThan.GetThreshold()), - TimeWindow: timeWindow, - IgnoreInfinity: wrapperspbBoolToTypeBool(ratioLessThan.GetIgnoreInfinity()), - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(ratioLessThan.GetNotificationPayloadFilter()), - GroupByFor: types.StringValue(logsRatioGroupByForProtoToSchemaMap[ratioLessThan.GetGroupByFor()]), - UndetectedValuesManagement: undetectedValuesManagement, + logsRatioMoreThanModel := LogsRatioThresholdModel{ + Numerator: numeratorLogsFilter, + NumeratorAlias: wrapperspbStringToTypeString(ratioThreshold.GetNumeratorAlias()), + Denominator: denominatorLogsFilter, + DenominatorAlias: wrapperspbStringToTypeString(ratioThreshold.GetDenominatorAlias()), + Rules: rules, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(ratioThreshold.GetNotificationPayloadFilter()), + GroupByFor: types.StringValue(logsRatioGroupByForProtoToSchemaMap[ratioThreshold.GetGroupByFor()]), } - return types.ObjectValueFrom(ctx, logsRatioLessThanAttr(), logsRatioLessThanModel) + return types.ObjectValueFrom(ctx, logsRatioThresholdAttr(), logsRatioMoreThanModel) } -func flattenLogsUniqueCount(ctx context.Context, uniqueCount *cxsdk.LogsUniqueCountTypeDefinition) (types.Object, diag.Diagnostics) { +func flattenLogsUniqueCount(ctx context.Context, uniqueCount *cxsdk.LogsUniqueCountType) (types.Object, diag.Diagnostics) { if uniqueCount == nil { return types.ObjectNull(logsUniqueCountAttr()), nil } @@ -3846,39 +3630,28 @@ func flattenLogsUniqueCount(ctx context.Context, uniqueCount *cxsdk.LogsUniqueCo return types.ObjectNull(logsUniqueCountAttr()), diags } - timeWindow, diags := flattenLogsUniqueCountTimeWindow(ctx, uniqueCount.GetTimeWindow()) - if diags.HasError() { - return types.ObjectNull(logsUniqueCountAttr()), diags + rulesRaw := make([]LogsUniqueCountRuleModel, len(uniqueCount.Rules)) + for i, rule := range uniqueCount.Rules { + timeWindow := types.StringValue(logsUniqueCountTimeWindowValueProtoToSchemaMap[rule.Condition.TimeWindow.GetLogsUniqueValueTimeWindowSpecificValue()]) + rulesRaw[i] = LogsUniqueCountRuleModel{ + MaxUniqueCountPerGroupByKey: wrapperspbInt64ToTypeInt64(rule.Condition.GetMaxUniqueCountPerGroupByKey()), + MaxUniqueCount: wrapperspbInt64ToTypeInt64(rule.Condition.GetMaxUniqueCount()), + TimeWindow: timeWindow, + UniqueCountKeypath: wrapperspbStringToTypeString(rule.Condition.UniqueCountKeypath), + } } + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: logsRatioThresholdRulesAttr()}, rulesRaw) + logsUniqueCountModel := LogsUniqueCountModel{ - LogsFilter: logsFilter, - UniqueCountKeypath: wrapperspbStringToTypeString(uniqueCount.GetUniqueCountKeypath()), - MaxUniqueCount: wrapperspbInt64ToTypeInt64(uniqueCount.GetMaxUniqueCount()), - TimeWindow: timeWindow, - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(uniqueCount.GetNotificationPayloadFilter()), - MaxUniqueCountPerGroupByKey: wrapperspbInt64ToTypeInt64(uniqueCount.GetMaxUniqueCountPerGroupByKey()), + LogsFilter: logsFilter, + Rules: rules, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(uniqueCount.GetNotificationPayloadFilter()), } return types.ObjectValueFrom(ctx, logsUniqueCountAttr(), logsUniqueCountModel) } -func flattenLogsUniqueCountTimeWindow(ctx context.Context, timeWindow *cxsdk.LogsUniqueValueTimeWindow) (types.Object, diag.Diagnostics) { - if timeWindow == nil { - return types.ObjectNull(logsTimeWindowAttr()), nil - } - - switch timeWindowType := timeWindow.Type.(type) { - case *cxsdk.LogsUniqueValueTimeWindow_LogsUniqueValueTimeWindowSpecificValue: - return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsUniqueCountTimeWindowModel{ - SpecificValue: types.StringValue(logsUniqueCountTimeWindowValueProtoToSchemaMap[timeWindowType.LogsUniqueValueTimeWindowSpecificValue]), - }) - default: - return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} - } - -} - -func flattenLogsNewValue(ctx context.Context, newValue *cxsdk.LogsNewValueTypeDefinition) (types.Object, diag.Diagnostics) { +func flattenLogsNewValue(ctx context.Context, newValue *cxsdk.LogsNewValueType) (types.Object, diag.Diagnostics) { if newValue == nil { return types.ObjectNull(logsNewValueAttr()), nil } @@ -3888,35 +3661,25 @@ func flattenLogsNewValue(ctx context.Context, newValue *cxsdk.LogsNewValueTypeDe return types.ObjectNull(logsNewValueAttr()), diags } - timeWindow, diags := flattenLogsNewValueTimeWindow(ctx, newValue.GetTimeWindow()) - if diags.HasError() { - return types.ObjectNull(logsNewValueAttr()), diags + rulesRaw := make([]NewValueRuleModel, len(newValue.Rules)) + for i, rule := range newValue.Rules { + timeWindow := types.StringValue(logsNewValueTimeWindowValueProtoToSchemaMap[rule.Condition.TimeWindow.GetLogsNewValueTimeWindowSpecificValue()]) + rulesRaw[i] = NewValueRuleModel{ + KeypathToTrack: wrapperspbStringToTypeString(rule.Condition.GetKeypathToTrack()), + TimeWindow: timeWindow, + } } + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: logsRatioThresholdRulesAttr()}, rulesRaw) + logsNewValueModel := LogsNewValueModel{ LogsFilter: logsFilter, - KeypathToTrack: wrapperspbStringToTypeString(newValue.GetKeypathToTrack()), - TimeWindow: timeWindow, + Rules: rules, NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(newValue.GetNotificationPayloadFilter()), } return types.ObjectValueFrom(ctx, logsNewValueAttr(), logsNewValueModel) } -func flattenLogsNewValueTimeWindow(ctx context.Context, window *cxsdk.LogsNewValueTimeWindow) (types.Object, diag.Diagnostics) { - if window == nil { - return types.ObjectNull(logsTimeWindowAttr()), nil - } - - switch timeWindowType := window.Type.(type) { - case *cxsdk.LogsNewValueTimeWindow_LogsNewValueTimeWindowSpecificValue: - return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), LogsNewValueTimeWindowModel{ - SpecificValue: types.StringValue(logsNewValueTimeWindowValueProtoToSchemaMap[timeWindowType.LogsNewValueTimeWindowSpecificValue]), - }) - default: - return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} - } -} - func flattenAlertSchedule(ctx context.Context, alertProperties *cxsdk.AlertDefProperties) (types.Object, diag.Diagnostics) { if alertProperties.Schedule == nil { return types.ObjectNull(alertScheduleAttr()), nil @@ -3925,7 +3688,7 @@ func flattenAlertSchedule(ctx context.Context, alertProperties *cxsdk.AlertDefPr var alertScheduleModel AlertScheduleModel var diags diag.Diagnostics switch alertScheduleType := alertProperties.Schedule.(type) { - case *cxsdk.AlertDefProperties_ActiveOn: + case *cxsdk.AlertDefPropertiesActiveOn: alertScheduleModel.ActiveOn, diags = flattenActiveOn(ctx, alertScheduleType.ActiveOn) default: return types.ObjectNull(alertScheduleAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Alert Schedule", fmt.Sprintf("Alert Schedule %v is not supported", alertScheduleType))} @@ -3938,7 +3701,7 @@ func flattenAlertSchedule(ctx context.Context, alertProperties *cxsdk.AlertDefPr return types.ObjectValueFrom(ctx, alertScheduleAttr(), alertScheduleModel) } -func flattenActiveOn(ctx context.Context, activeOn *cxsdk.ActivitySchedule) (types.Object, diag.Diagnostics) { +func flattenActiveOn(ctx context.Context, activeOn *cxsdk.AlertDefActivitySchedule) (types.Object, diag.Diagnostics) { if activeOn == nil { return types.ObjectNull(alertScheduleActiveOnAttr()), nil } @@ -3966,7 +3729,7 @@ func flattenActiveOn(ctx context.Context, activeOn *cxsdk.ActivitySchedule) (typ return types.ObjectValueFrom(ctx, alertScheduleActiveOnAttr(), activeOnModel) } -func flattenDaysOfWeek(ctx context.Context, daysOfWeek []cxsdk.DayOfWeek) (types.List, diag.Diagnostics) { +func flattenDaysOfWeek(ctx context.Context, daysOfWeek []cxsdk.AlertDayOfWeek) (types.List, diag.Diagnostics) { var daysOfWeekStrings []types.String for _, dow := range daysOfWeek { daysOfWeekStrings = append(daysOfWeekStrings, types.StringValue(daysOfWeekProtoToSchemaMap[dow])) @@ -3974,7 +3737,7 @@ func flattenDaysOfWeek(ctx context.Context, daysOfWeek []cxsdk.DayOfWeek) (types return types.ListValueFrom(ctx, types.StringType, daysOfWeekStrings) } -func flattenTimeOfDay(ctx context.Context, time *cxsdk.TimeOfDay) (types.Object, diag.Diagnostics) { +func flattenTimeOfDay(ctx context.Context, time *cxsdk.AlertTimeOfDay) (types.Object, diag.Diagnostics) { if time == nil { return types.ObjectNull(timeOfDayAttr()), nil } @@ -3984,55 +3747,75 @@ func flattenTimeOfDay(ctx context.Context, time *cxsdk.TimeOfDay) (types.Object, }) } -func flattenLogsTimeRelativeMoreThan(ctx context.Context, logsTimeRelativeMoreThan *cxsdk.LogsTimeRelativeMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { - if logsTimeRelativeMoreThan == nil { - return types.ObjectNull(logsTimeRelativeMoreThanAttr()), nil +func flattenLogsTimeRelativeThreshold(ctx context.Context, logsTimeRelativeThreshold *cxsdk.LogsTimeRelativeThresholdType) (types.Object, diag.Diagnostics) { + if logsTimeRelativeThreshold == nil { + return types.ObjectNull(logsTimeRelativeAttr()), nil } - logsFilter, diags := flattenAlertsLogsFilter(ctx, logsTimeRelativeMoreThan.GetLogsFilter()) + logsFilter, diags := flattenAlertsLogsFilter(ctx, logsTimeRelativeThreshold.GetLogsFilter()) if diags.HasError() { - return types.ObjectNull(logsTimeRelativeMoreThanAttr()), diags + return types.ObjectNull(logsTimeRelativeAttr()), diags } - logsTimeRelativeMoreThanModel := LogsTimeRelativeMoreThanModel{ + rulesRaw := make([]RuleModel, len(logsTimeRelativeThreshold.Rules)) + for i, rule := range logsTimeRelativeThreshold.Rules { + rulesRaw[i] = RuleModel{ + Threshold: wrapperspbDoubleToTypeFloat64(rule.Condition.GetThreshold()), + ComparedTo: types.StringValue(logsTimeRelativeComparedToProtoToSchemaMap[rule.Condition.ComparedTo]), + IgnoreInfinity: wrapperspbBoolToTypeBool(rule.Condition.GetIgnoreInfinity()), + } + } + + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: logsTimeRelativeRulesAttr()}, rulesRaw) + + logsTimeRelativeThresholdModel := LogsTimeRelativeThresholdModel{ LogsFilter: logsFilter, - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(logsTimeRelativeMoreThan.GetNotificationPayloadFilter()), - Threshold: wrapperspbUint32ToTypeInt64(logsTimeRelativeMoreThan.GetThreshold()), - ComparedTo: types.StringValue(logsTimeRelativeComparedToProtoToSchemaMap[logsTimeRelativeMoreThan.GetComparedTo()]), - IgnoreInfinity: wrapperspbBoolToTypeBool(logsTimeRelativeMoreThan.GetIgnoreInfinity()), + Rules: rules, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(logsTimeRelativeThreshold.GetNotificationPayloadFilter()), } - return types.ObjectValueFrom(ctx, logsTimeRelativeMoreThanAttr(), logsTimeRelativeMoreThanModel) + return types.ObjectValueFrom(ctx, logsTimeRelativeAttr(), logsTimeRelativeThresholdModel) } -func flattenMetricMoreThan(ctx context.Context, metricMoreThan *cxsdk.MetricMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { - if metricMoreThan == nil { - return types.ObjectNull(metricMoreThanAttr()), nil +func flattenMetricThreshold(ctx context.Context, metricThreshold *cxsdk.MetricThresholdType) (types.Object, diag.Diagnostics) { + if metricThreshold == nil { + return types.ObjectNull(metricThresholdAttr()), nil } - metricFilter, diags := flattenMetricFilter(ctx, metricMoreThan.GetMetricFilter()) + metricFilter, diags := flattenMetricFilter(ctx, metricThreshold.GetMetricFilter()) if diags.HasError() { - return types.ObjectNull(metricMoreThanAttr()), diags + return types.ObjectNull(metricThresholdAttr()), diags } - ofTheLast, diags := flattenMetricTimeWindow(ctx, metricMoreThan.GetOfTheLast()) + undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, metricThreshold.GetUndetectedValuesManagement()) if diags.HasError() { - return types.ObjectNull(metricMoreThanAttr()), diags + return types.ObjectNull(metricThresholdAttr()), diags } - missingValues, diags := flattenMissingValues(ctx, metricMoreThan.GetMissingValues()) - if diags.HasError() { - return types.ObjectNull(metricMoreThanAttr()), diags + rulesRaw := make([]MetricRule, len(metricThreshold.Rules)) + for i, rule := range metricThreshold.Rules { + missingValues, diags := flattenMissingValues(ctx, rule.Condition.MissingValues) + if diags.HasError() { + return types.ObjectNull(metricThresholdAttr()), diags + } + + rulesRaw[i] = MetricRule{ + Threshold: wrapperspbDoubleToTypeFloat64(rule.Condition.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(rule.Condition.GetForOverPct()), + OfTheLast: types.StringValue(metricFilterOperationTypeProtoToSchemaMap[rule.Condition.OfTheLast.GetMetricTimeWindowSpecificValue()]), + Condition: types.StringValue(metricsThresholdConditionMap[rule.Condition.ConditionType]), + MissingValues: missingValues, + } } - metricMoreThanModel := MetricMoreThanModel{ - MetricFilter: metricFilter, - Threshold: wrapperspbFloat64ToTypeFloat64(metricMoreThan.GetThreshold()), - ForOverPct: wrapperspbUint32ToTypeInt64(metricMoreThan.GetForOverPct()), - OfTheLast: ofTheLast, - MissingValues: missingValues, + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: metricThresholdRulesAttr()}, rulesRaw) + + metricThresholdModel := MetricThresholdModel{ + MetricFilter: metricFilter, + Rules: rules, + UndetectedValuesManagement: undetectedValuesManagement, } - return types.ObjectValueFrom(ctx, metricMoreThanAttr(), metricMoreThanModel) + return types.ObjectValueFrom(ctx, metricThresholdAttr(), metricThresholdModel) } func flattenMetricFilter(ctx context.Context, filter *cxsdk.MetricFilter) (types.Object, diag.Diagnostics) { @@ -4041,7 +3824,7 @@ func flattenMetricFilter(ctx context.Context, filter *cxsdk.MetricFilter) (types } switch filterType := filter.Type.(type) { - case *cxsdk.MetricFilter_Promql: + case *cxsdk.MetricFilterPromql: return types.ObjectValueFrom(ctx, metricFilterAttr(), MetricFilterModel{ Promql: wrapperspbStringToTypeString(filterType.Promql), }) @@ -4050,21 +3833,6 @@ func flattenMetricFilter(ctx context.Context, filter *cxsdk.MetricFilter) (types } } -func flattenMetricTimeWindow(ctx context.Context, last *cxsdk.MetricTimeWindow) (types.Object, diag.Diagnostics) { - if last == nil { - return types.ObjectNull(metricTimeWindowAttr()), nil - } - - switch timeWindowType := last.Type.(type) { - case *cxsdk.MetricTimeWindow_MetricTimeWindowSpecificValue: - return types.ObjectValueFrom(ctx, metricTimeWindowAttr(), MetricTimeWindowModel{ - SpecificValue: types.StringValue(metricFilterOperationTypeProtoToSchemaMap[timeWindowType.MetricTimeWindowSpecificValue]), - }) - default: - return types.ObjectNull(metricTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} - } -} - func flattenMissingValues(ctx context.Context, missingValues *cxsdk.MetricMissingValues) (types.Object, diag.Diagnostics) { if missingValues == nil { return types.ObjectNull(metricMissingValuesAttr()), nil @@ -4072,9 +3840,9 @@ func flattenMissingValues(ctx context.Context, missingValues *cxsdk.MetricMissin metricMissingValuesModel := MetricMissingValuesModel{} switch missingValuesType := missingValues.MissingValues.(type) { - case *cxsdk.MetricMissingValues_ReplaceWithZero: + case *cxsdk.MetricMissingValuesReplaceWithZero: metricMissingValuesModel.ReplaceWithZero = wrapperspbBoolToTypeBool(missingValuesType.ReplaceWithZero) - case *cxsdk.MetricMissingValues_MinNonNullValuesPct: + case *cxsdk.MetricMissingValuesMinNonNullValuesPct: metricMissingValuesModel.MinNonNullValuesPct = wrapperspbUint32ToTypeInt64(missingValuesType.MinNonNullValuesPct) default: return types.ObjectNull(metricMissingValuesAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Missing Values", fmt.Sprintf("Missing Values %v is not supported", missingValuesType))} @@ -4083,77 +3851,25 @@ func flattenMissingValues(ctx context.Context, missingValues *cxsdk.MetricMissin return types.ObjectValueFrom(ctx, metricMissingValuesAttr(), metricMissingValuesModel) } -func flattenMetricLessThan(ctx context.Context, metricLessThan *cxsdk.MetricLessThanTypeDefinition) (types.Object, diag.Diagnostics) { - if metricLessThan == nil { - return types.ObjectNull(metricLessThanAttr()), nil - } - - metricFilter, diags := flattenMetricFilter(ctx, metricLessThan.GetMetricFilter()) - if diags.HasError() { - return types.ObjectNull(metricLessThanAttr()), diags - } - - ofTheLast, diags := flattenMetricTimeWindow(ctx, metricLessThan.GetOfTheLast()) - if diags.HasError() { - return types.ObjectNull(metricLessThanAttr()), diags - } - - missingValues, diags := flattenMissingValues(ctx, metricLessThan.GetMissingValues()) - if diags.HasError() { - return types.ObjectNull(metricLessThanAttr()), diags - } - - undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, metricLessThan.GetUndetectedValuesManagement()) - if diags.HasError() { - return types.ObjectNull(metricLessThanAttr()), diags - } - - metricLessThanModel := MetricLessThanModel{ - MetricFilter: metricFilter, - Threshold: wrapperspbFloat64ToTypeFloat64(metricLessThan.GetThreshold()), - ForOverPct: wrapperspbUint32ToTypeInt64(metricLessThan.GetForOverPct()), - OfTheLast: ofTheLast, - MissingValues: missingValues, - UndetectedValuesManagement: undetectedValuesManagement, - } - return types.ObjectValueFrom(ctx, metricLessThanAttr(), metricLessThanModel) -} - -func flattenLogsTimeRelativeLessThan(ctx context.Context, timeRelativeLessThan *cxsdk.LogsTimeRelativeLessThanTypeDefinition) (types.Object, diag.Diagnostics) { - if timeRelativeLessThan == nil { - return types.ObjectNull(logsTimeRelativeLessThanAttr()), nil - } - - logsFilter, diags := flattenAlertsLogsFilter(ctx, timeRelativeLessThan.GetLogsFilter()) - if diags.HasError() { - return types.ObjectNull(logsTimeRelativeLessThanAttr()), diags - } - - undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, timeRelativeLessThan.GetUndetectedValuesManagement()) - if diags.HasError() { - return types.ObjectNull(logsTimeRelativeLessThanAttr()), diags - } - - logsTimeRelativeLessThanModel := LogsTimeRelativeLessThanModel{ - LogsFilter: logsFilter, - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(timeRelativeLessThan.GetNotificationPayloadFilter()), - Threshold: wrapperspbUint32ToTypeInt64(timeRelativeLessThan.GetThreshold()), - ComparedTo: types.StringValue(logsTimeRelativeComparedToProtoToSchemaMap[timeRelativeLessThan.GetComparedTo()]), - IgnoreInfinity: wrapperspbBoolToTypeBool(timeRelativeLessThan.GetIgnoreInfinity()), - UndetectedValuesManagement: undetectedValuesManagement, - } - - return types.ObjectValueFrom(ctx, logsTimeRelativeLessThanAttr(), logsTimeRelativeLessThanModel) -} - -func flattenTracingImmediate(ctx context.Context, tracingImmediate *cxsdk.TracingImmediateTypeDefinition) (types.Object, diag.Diagnostics) { +func flattenTracingImmediate(ctx context.Context, tracingImmediate *cxsdk.TracingImmediateType) (types.Object, diag.Diagnostics) { if tracingImmediate == nil { return types.ObjectNull(tracingImmediateAttr()), nil } - tracingQuery, diag := flattenTracingFilter(ctx, tracingImmediate.GetTracingFilter()) - if diag.HasError() { - return types.ObjectNull(tracingImmediateAttr()), diag + var tracingQuery types.Object + + switch filtersType := tracingImmediate.TracingFilter.FilterType.(type) { + case *cxsdk.TracingFilterSimpleFilter: + filter, diag := flattenTracingSimpleFilter(ctx, filtersType.SimpleFilter) + if diag.HasError() { + return types.ObjectNull(tracingImmediateAttr()), diag + } + tracingQuery, diag = types.ObjectValueFrom(ctx, tracingQueryAttr(), filter) + if diag.HasError() { + return types.ObjectNull(tracingImmediateAttr()), diag + } + default: + return types.ObjectNull(tracingImmediateAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Tracing Query Filters", fmt.Sprintf("Tracing Query Filters %v is not supported", filtersType))} } tracingImmediateModel := TracingImmediateModel{ @@ -4164,15 +3880,38 @@ func flattenTracingImmediate(ctx context.Context, tracingImmediate *cxsdk.Tracin return types.ObjectValueFrom(ctx, tracingImmediateAttr(), tracingImmediateModel) } -func flattenTracingFilter(ctx context.Context, tracingQuery *cxsdk.TracingFilter) (types.Object, diag.Diagnostics) { +// Also called query filters +func flattenTracingFilter(ctx context.Context, tracingFilter *cxsdk.TracingFilter) (types.Object, diag.Diagnostics) { + switch filtersType := tracingFilter.FilterType.(type) { + case *cxsdk.TracingFilterSimpleFilter: + filter, diag := flattenTracingSimpleFilter(ctx, filtersType.SimpleFilter) + if diag.HasError() { + return types.ObjectNull(tracingQueryAttr()), diag + } + tracingQuery, diag := types.ObjectValueFrom(ctx, tracingQueryAttr(), filter) + if diag.HasError() { + return types.ObjectNull(tracingQueryAttr()), diag + } + return tracingQuery, nil + default: + return types.ObjectNull(tracingQueryAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Tracing Query Filters", fmt.Sprintf("Tracing Query Filters %v is not supported", filtersType))} + } + +} + +func flattenTracingSimpleFilter(ctx context.Context, tracingQuery *cxsdk.TracingSimpleFilter) (types.Object, diag.Diagnostics) { if tracingQuery == nil { return types.ObjectNull(tracingQueryAttr()), nil } + labelFilters, diags := flattenTracingLabelFilters(ctx, tracingQuery.TracingLabelFilters) + if diags.HasError() { + return types.ObjectNull(tracingQueryAttr()), diags + } tracingQueryModel := &TracingFilterModel{ - LatencyThresholdMs: wrapperspbUint32ToTypeInt64(tracingQuery.GetLatencyThresholdMs()), + LatencyThresholdMs: wrapperspbUint32ToTypeInt64(tracingQuery.LatencyThresholdMs), + TracingLabelFilters: labelFilters, } - tracingQueryModel, diags := flattenTracingFilterFilters(ctx, tracingQueryModel, tracingQuery) if diags.HasError() { return types.ObjectNull(tracingQueryAttr()), diags } @@ -4180,22 +3919,6 @@ func flattenTracingFilter(ctx context.Context, tracingQuery *cxsdk.TracingFilter return types.ObjectValueFrom(ctx, tracingQueryAttr(), tracingQueryModel) } -func flattenTracingFilterFilters(ctx context.Context, tracingQueryModel *TracingFilterModel, tracingQuery *cxsdk.TracingFilter) (*TracingFilterModel, diag.Diagnostics) { - if tracingQuery == nil || tracingQuery.Filters == nil { - return nil, nil - } - - var diags diag.Diagnostics - switch filtersType := tracingQuery.Filters.(type) { - case *cxsdk.TracingFilter_TracingLabelFilters: - tracingQueryModel.TracingLabelFilters, diags = flattenTracingLabelFilters(ctx, filtersType.TracingLabelFilters) - default: - return nil, diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Tracing Query Filters", fmt.Sprintf("Tracing Query Filters %v is not supported", filtersType))} - } - - return tracingQueryModel, diags -} - func flattenTracingLabelFilters(ctx context.Context, filters *cxsdk.TracingLabelFilters) (types.Object, diag.Diagnostics) { if filters == nil { return types.ObjectNull(tracingLabelFiltersAttr()), nil @@ -4284,163 +4007,66 @@ func flattenTracingSpanField(ctx context.Context, spanField *cxsdk.TracingSpanFi }, nil } -func flattenTracingMoreThan(ctx context.Context, tracingMoreThan *cxsdk.TracingMoreThanTypeDefinition) (types.Object, diag.Diagnostics) { - if tracingMoreThan == nil { - return types.ObjectNull(tracingMoreThanAttr()), nil +func flattenTracingThreshold(ctx context.Context, tracingThreshold *cxsdk.TracingThresholdType) (types.Object, diag.Diagnostics) { + if tracingThreshold == nil { + return types.ObjectNull(tracingThresholdAttr()), nil } - tracingQuery, diags := flattenTracingFilter(ctx, tracingMoreThan.GetTracingFilter()) + tracingQuery, diags := flattenTracingFilter(ctx, tracingThreshold.GetTracingFilter()) if diags.HasError() { - return types.ObjectNull(tracingMoreThanAttr()), diags + return types.ObjectNull(tracingThresholdAttr()), diags } + rulesRaw := make([]TracingThresholdRuleModel, len(tracingThreshold.Rules)) + for i, rule := range tracingThreshold.Rules { - timeWindow, diags := flattenTracingTimeWindow(ctx, tracingMoreThan.GetTimeWindow()) - if diags.HasError() { - return types.ObjectNull(tracingMoreThanAttr()), diags - } + timeWindow := types.StringValue(tracingTimeWindowProtoToSchemaMap[rule.Condition.TimeWindow.GetTracingTimeWindowValue()]) + rulesRaw[i] = TracingThresholdRuleModel{ - tracingMoreThanModel := TracingMoreThanModel{ - TracingFilter: tracingQuery, - NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(tracingMoreThan.GetNotificationPayloadFilter()), - TimeWindow: timeWindow, - SpanAmount: wrapperspbUint32ToTypeInt64(tracingMoreThan.GetSpanAmount()), + TimeWindow: timeWindow, + SpanAmount: wrapperspbDoubleToTypeFloat64(rule.Condition.SpanAmount), + } } - return types.ObjectValueFrom(ctx, tracingMoreThanAttr(), tracingMoreThanModel) -} -func flattenTracingTimeWindow(ctx context.Context, window *cxsdk.TracingTimeWindow) (types.Object, diag.Diagnostics) { - if window == nil { - return types.ObjectNull(logsTimeWindowAttr()), nil - } + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: metricThresholdRulesAttr()}, rulesRaw) - switch timeWindowType := window.Type.(type) { - case *cxsdk.TracingTimeWindow_TracingTimeWindowValue: - return types.ObjectValueFrom(ctx, logsTimeWindowAttr(), TracingTimeWindowModel{ - SpecificValue: types.StringValue(tracingTimeWindowProtoToSchemaMap[timeWindowType.TracingTimeWindowValue]), - }) - default: - return types.ObjectNull(logsTimeWindowAttr()), diag.Diagnostics{diag.NewErrorDiagnostic("Invalid Time Window", fmt.Sprintf("Time Window %v is not supported", timeWindowType))} + tracingThresholdModel := TracingThresholdModel{ + TracingFilter: tracingQuery, + Rules: rules, + NotificationPayloadFilter: wrappedStringSliceToTypeStringSet(tracingThreshold.GetNotificationPayloadFilter()), } - + return types.ObjectValueFrom(ctx, tracingThresholdAttr(), tracingThresholdModel) } -func flattenMetricMoreThanUsual(ctx context.Context, metricMoreThanUsual *cxsdk.MetricMoreThanUsualTypeDefinition) (types.Object, diag.Diagnostics) { +func flattenMetricUnusual(ctx context.Context, metricMoreThanUsual *cxsdk.MetricUnusualType) (types.Object, diag.Diagnostics) { if metricMoreThanUsual == nil { - return types.ObjectNull(metricMoreThanUsualAttr()), nil + return types.ObjectNull(metricUnusualAttr()), nil } metricFilter, diags := flattenMetricFilter(ctx, metricMoreThanUsual.GetMetricFilter()) if diags.HasError() { - return types.ObjectNull(metricMoreThanUsualAttr()), diags - } - - ofTheLast, diags := flattenMetricTimeWindow(ctx, metricMoreThanUsual.GetOfTheLast()) - if diags.HasError() { - return types.ObjectNull(metricMoreThanUsualAttr()), diags - } - - metricMoreThanUsualModel := MetricMoreThanUsualModel{ - MetricFilter: metricFilter, - OfTheLast: ofTheLast, - Threshold: wrapperspbUint32ToTypeInt64(metricMoreThanUsual.GetThreshold()), - ForOverPct: wrapperspbUint32ToTypeInt64(metricMoreThanUsual.GetForOverPct()), - MinNonNullValuesPct: wrapperspbUint32ToTypeInt64(metricMoreThanUsual.GetMinNonNullValuesPct()), - } - return types.ObjectValueFrom(ctx, metricMoreThanUsualAttr(), metricMoreThanUsualModel) -} - -func flattenMetricLessThanUsual(ctx context.Context, metricLessThanUsual *cxsdk.MetricLessThanUsualTypeDefinition) (types.Object, diag.Diagnostics) { - if metricLessThanUsual == nil { - return types.ObjectNull(metricLessThanUsualAttr()), nil - } - - metricFilter, diags := flattenMetricFilter(ctx, metricLessThanUsual.GetMetricFilter()) - if diags.HasError() { - return types.ObjectNull(metricLessThanUsualAttr()), diags - } - - ofTheLast, diags := flattenMetricTimeWindow(ctx, metricLessThanUsual.GetOfTheLast()) - if diags.HasError() { - return types.ObjectNull(metricLessThanUsualAttr()), diags - } - - metricLessThanUsualModel := MetricLessThanUsualModel{ - MetricFilter: metricFilter, - OfTheLast: ofTheLast, - Threshold: wrapperspbUint32ToTypeInt64(metricLessThanUsual.GetThreshold()), - ForOverPct: wrapperspbUint32ToTypeInt64(metricLessThanUsual.GetForOverPct()), - MinNonNullValuesPct: wrapperspbUint32ToTypeInt64(metricLessThanUsual.GetMinNonNullValuesPct()), - } - return types.ObjectValueFrom(ctx, metricLessThanUsualAttr(), metricLessThanUsualModel) -} - -func flattenMetricMoreThanOrEquals(ctx context.Context, equals *cxsdk.MetricMoreThanOrEqualsTypeDefinition) (types.Object, diag.Diagnostics) { - if equals == nil { - return types.ObjectNull(metricMoreThanOrEqualsAttr()), nil - } - - metricFilter, diags := flattenMetricFilter(ctx, equals.GetMetricFilter()) - if diags.HasError() { - return types.ObjectNull(metricMoreThanOrEqualsAttr()), diags - } - - ofTheLast, diags := flattenMetricTimeWindow(ctx, equals.GetOfTheLast()) - if diags.HasError() { - return types.ObjectNull(metricMoreThanOrEqualsAttr()), diags - } - - missingValues, diags := flattenMissingValues(ctx, equals.GetMissingValues()) - if diags.HasError() { - return types.ObjectNull(metricMoreThanOrEqualsAttr()), diags - } - - metricMoreThanOrEqualsModel := MetricMoreThanOrEqualsModel{ - MetricFilter: metricFilter, - Threshold: wrapperspbFloat64ToTypeFloat64(equals.GetThreshold()), - ForOverPct: wrapperspbUint32ToTypeInt64(equals.GetForOverPct()), - OfTheLast: ofTheLast, - MissingValues: missingValues, - } - return types.ObjectValueFrom(ctx, metricMoreThanOrEqualsAttr(), metricMoreThanOrEqualsModel) -} - -func flattenMetricLessThanOrEquals(ctx context.Context, equals *cxsdk.MetricLessThanOrEqualsTypeDefinition) (types.Object, diag.Diagnostics) { - if equals == nil { - return types.ObjectNull(metricLessThanOrEqualsAttr()), nil - } - - metricFilter, diags := flattenMetricFilter(ctx, equals.GetMetricFilter()) - if diags.HasError() { - return types.ObjectNull(metricLessThanOrEqualsAttr()), diags - } - - ofTheLast, diags := flattenMetricTimeWindow(ctx, equals.GetOfTheLast()) - if diags.HasError() { - return types.ObjectNull(metricLessThanOrEqualsAttr()), diags + return types.ObjectNull(metricUnusualAttr()), diags } - missingValues, diags := flattenMissingValues(ctx, equals.GetMissingValues()) - if diags.HasError() { - return types.ObjectNull(metricLessThanOrEqualsAttr()), diags + rulesRaw := make([]MetricRule, len(metricMoreThanUsual.Rules)) + for i, rule := range metricMoreThanUsual.Rules { + rulesRaw[i] = MetricRule{ + OfTheLast: types.StringValue(metricFilterOperationTypeProtoToSchemaMap[rule.Condition.GetOfTheLast().GetMetricTimeWindowSpecificValue()]), + Threshold: wrapperspbDoubleToTypeFloat64(rule.Condition.GetThreshold()), + ForOverPct: wrapperspbUint32ToTypeInt64(rule.Condition.GetForOverPct()), + MinNonNullValuesPct: wrapperspbUint32ToTypeInt64(rule.Condition.GetMinNonNullValuesPct()), + } } - undetectedValuesManagement, diags := flattenUndetectedValuesManagement(ctx, equals.GetUndetectedValuesManagement()) - if diags.HasError() { - return types.ObjectNull(metricLessThanOrEqualsAttr()), diags - } + rules, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: metricUnusualRulesAttr()}, rulesRaw) - metricLessThanOrEqualsModel := MetricLessThanOrEqualsModel{ - MetricFilter: metricFilter, - Threshold: wrapperspbFloat64ToTypeFloat64(equals.GetThreshold()), - ForOverPct: wrapperspbUint32ToTypeInt64(equals.GetForOverPct()), - OfTheLast: ofTheLast, - MissingValues: missingValues, - UndetectedValuesManagement: undetectedValuesManagement, + metricMoreThanUsualModel := MetricUnusualModel{ + MetricFilter: metricFilter, + Rules: rules, } - return types.ObjectValueFrom(ctx, metricLessThanOrEqualsAttr(), metricLessThanOrEqualsModel) + return types.ObjectValueFrom(ctx, metricUnusualAttr(), metricMoreThanUsualModel) } -func flattenFlow(ctx context.Context, flow *cxsdk.FlowTypeDefinition) (types.Object, diag.Diagnostics) { +func flattenFlow(ctx context.Context, flow *cxsdk.FlowType) (types.Object, diag.Diagnostics) { if flow == nil { return types.ObjectNull(flowAttr()), nil } @@ -4588,20 +4214,14 @@ func alertTypeDefinitionAttr() map[string]attr.Type { "logs_immediate": types.ObjectType{ AttrTypes: logsImmediateAttr(), }, - "logs_more_than": types.ObjectType{ - AttrTypes: logsMoreThanAttr(), - }, - "logs_less_than": types.ObjectType{ - AttrTypes: logsLessThanAttr(), + "logs_threshold": types.ObjectType{ + AttrTypes: logsThresholdAttr(), }, - "logs_more_than_usual": types.ObjectType{ - AttrTypes: logsMoreThanUsualAttr(), + "logs_unusual": types.ObjectType{ + AttrTypes: logsUnusualAttr(), }, - "logs_ratio_more_than": types.ObjectType{ - AttrTypes: logsRatioMoreThanAttr(), - }, - "logs_ratio_less_than": types.ObjectType{ - AttrTypes: logsRatioLessThanAttr(), + "logs_ratio_threshold": types.ObjectType{ + AttrTypes: logsRatioThresholdAttr(), }, "logs_new_value": types.ObjectType{ AttrTypes: logsNewValueAttr(), @@ -4609,35 +4229,20 @@ func alertTypeDefinitionAttr() map[string]attr.Type { "logs_unique_count": types.ObjectType{ AttrTypes: logsUniqueCountAttr(), }, - "logs_time_relative_more_than": types.ObjectType{ - AttrTypes: logsTimeRelativeMoreThanAttr(), - }, - "logs_time_relative_less_than": types.ObjectType{ - AttrTypes: logsTimeRelativeLessThanAttr(), - }, - "metric_more_than": types.ObjectType{ - AttrTypes: metricMoreThanAttr(), - }, - "metric_less_than": types.ObjectType{ - AttrTypes: metricLessThanAttr(), + "logs_time_relative_threshold": types.ObjectType{ + AttrTypes: logsTimeRelativeAttr(), }, - "metric_more_than_usual": types.ObjectType{ - AttrTypes: metricMoreThanUsualAttr(), + "metric_threshold": types.ObjectType{ + AttrTypes: metricThresholdAttr(), }, - "metric_less_than_usual": types.ObjectType{ - AttrTypes: metricLessThanUsualAttr(), - }, - "metric_more_than_or_equals": types.ObjectType{ - AttrTypes: metricMoreThanOrEqualsAttr(), - }, - "metric_less_than_or_equals": types.ObjectType{ - AttrTypes: metricLessThanOrEqualsAttr(), + "metric_unusual": types.ObjectType{ + AttrTypes: metricUnusualAttr(), }, "tracing_immediate": types.ObjectType{ AttrTypes: tracingImmediateAttr(), }, - "tracing_more_than": types.ObjectType{ - AttrTypes: tracingMoreThanAttr(), + "tracing_threshold": types.ObjectType{ + AttrTypes: tracingThresholdAttr(), }, "flow": types.ObjectType{ AttrTypes: flowAttr(), @@ -4645,41 +4250,6 @@ func alertTypeDefinitionAttr() map[string]attr.Type { } } -func metricLessThanOrEqualsAttr() map[string]attr.Type { - return map[string]attr.Type{ - "metric_filter": types.ObjectType{ - AttrTypes: metricFilterAttr(), - }, - "threshold": types.Int64Type, - "for_over_pct": types.Int64Type, - "of_the_last": types.ObjectType{ - AttrTypes: metricTimeWindowAttr(), - }, - "missing_values": types.ObjectType{ - AttrTypes: metricMissingValuesAttr(), - }, - "undetected_values_management": types.ObjectType{ - AttrTypes: undetectedValuesManagementAttr(), - }, - } -} - -func metricMoreThanOrEqualsAttr() map[string]attr.Type { - return map[string]attr.Type{ - "metric_filter": types.ObjectType{ - AttrTypes: metricFilterAttr(), - }, - "threshold": types.Int64Type, - "for_over_pct": types.Int64Type, - "of_the_last": types.ObjectType{ - AttrTypes: metricTimeWindowAttr(), - }, - "missing_values": types.ObjectType{ - AttrTypes: metricMissingValuesAttr(), - }, - } -} - func logsImmediateAttr() map[string]attr.Type { return map[string]attr.Type{ "logs_filter": types.ObjectType{ @@ -4726,13 +4296,19 @@ func labelFiltersAttr() map[string]attr.Type { } } -func logsMoreThanAttr() map[string]attr.Type { +func logsThresholdAttr() map[string]attr.Type { return map[string]attr.Type{ - "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "threshold": types.Int64Type, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, - "evaluation_window": types.StringType, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: logsThresholdRulesAttr()}}, + "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, + } +} + +func logsThresholdRulesAttr() map[string]attr.Type { + return map[string]attr.Type{ + "threshold": types.Float64Type, + "time_window": types.StringType, } } @@ -4742,15 +4318,28 @@ func logsTimeWindowAttr() map[string]attr.Type { } } -func logsRatioMoreThanAttr() map[string]attr.Type { +func logsUnusualAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: logsUnusualRulesAttr()}}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + } +} + +func logsUnusualRulesAttr() map[string]attr.Type { + return map[string]attr.Type{ + "minimum_threshold": types.Float64Type, + "time_window": types.StringType, + } +} + +func logsRatioThresholdAttr() map[string]attr.Type { return map[string]attr.Type{ - "numerator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "numerator_alias": types.StringType, - "denominator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "denominator_alias": types.StringType, - "threshold": types.Int64Type, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, - "ignore_infinity": types.BoolType, + "numerator": types.ObjectType{AttrTypes: logsFilterAttr()}, + "numerator_alias": types.StringType, + "denominator": types.ObjectType{AttrTypes: logsFilterAttr()}, + "denominator_alias": types.StringType, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: logsThresholdRulesAttr()}}, "notification_payload_filter": types.SetType{ ElemType: types.StringType, }, @@ -4758,39 +4347,26 @@ func logsRatioMoreThanAttr() map[string]attr.Type { } } -func logsRatioLessThanAttr() map[string]attr.Type { +func logsRatioThresholdRulesAttr() map[string]attr.Type { return map[string]attr.Type{ - "numerator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "numerator_alias": types.StringType, - "denominator_logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "denominator_alias": types.StringType, - "threshold": types.Int64Type, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, - "ignore_infinity": types.BoolType, - "notification_payload_filter": types.SetType{ - ElemType: types.StringType, - }, - "group_by_for": types.StringType, - "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, + "threshold": types.Float64Type, + "time_window": types.StringType, + "ignore_infinity": types.BoolType, } } -func logsMoreThanUsualAttr() map[string]attr.Type { +func logsNewValueAttr() map[string]attr.Type { return map[string]attr.Type{ "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "minimum_threshold": types.Int64Type, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: logsNewValueRulesAttr()}}, "notification_payload_filter": types.SetType{ElemType: types.StringType}, } } -func logsLessThanAttr() map[string]attr.Type { +func logsNewValueRulesAttr() map[string]attr.Type { return map[string]attr.Type{ - "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "threshold": types.Int64Type, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, - "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "keypath_to_track": types.StringType, + "time_window": types.StringType, } } @@ -4801,6 +4377,23 @@ func undetectedValuesManagementAttr() map[string]attr.Type { } } +func logsUniqueCountAttr() map[string]attr.Type { + return map[string]attr.Type{ + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: logsUniqueCountRulesAttr()}}, + } +} + +func logsUniqueCountRulesAttr() map[string]attr.Type { + return map[string]attr.Type{ + "time_window": types.StringType, + "unique_count_keypath": types.StringType, + "max_unique_count": types.Int64Type, + "max_unique_count_per_group_by_key": types.Int64Type, + } +} + func alertScheduleAttr() map[string]attr.Type { return map[string]attr.Type{ "active_on": types.ObjectType{ @@ -4830,33 +4423,38 @@ func timeOfDayAttr() map[string]attr.Type { } } -func logsNewValueAttr() map[string]attr.Type { +func logsTimeRelativeAttr() map[string]attr.Type { return map[string]attr.Type{ - "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "keypath_to_track": types.StringType, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: logsTimeRelativeRulesAttr()}}, } } -func logsUniqueCountAttr() map[string]attr.Type { +func logsTimeRelativeRulesAttr() map[string]attr.Type { return map[string]attr.Type{ - "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "unique_count_keypath": types.StringType, - "max_unique_count": types.Int64Type, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, - "max_unique_count_per_group_by_key": types.Int64Type, + "threshold": types.Float64Type, + "compared_to": types.StringType, + "ignore_infinity": types.BoolType, } } -func metricMoreThanAttr() map[string]attr.Type { +func metricThresholdAttr() map[string]attr.Type { + return map[string]attr.Type{ + "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, + "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: metricThresholdRulesAttr()}}, + } +} + +func metricThresholdRulesAttr() map[string]attr.Type { return map[string]attr.Type{ - "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, "threshold": types.Float64Type, "for_over_pct": types.Int64Type, "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, "missing_values": types.ObjectType{AttrTypes: metricMissingValuesAttr()}, + "condition": types.StringType, } } @@ -4879,13 +4477,42 @@ func metricMissingValuesAttr() map[string]attr.Type { } } -func metricLessThanUsualAttr() map[string]attr.Type { +func metricUnusualAttr() map[string]attr.Type { return map[string]attr.Type{ - "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, - "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, - "threshold": types.Int64Type, + "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: metricUnusualRulesAttr()}}, + } +} + +func metricUnusualRulesAttr() map[string]attr.Type { + return map[string]attr.Type{ + "threshold": types.Float64Type, "for_over_pct": types.Int64Type, - "min_non_null_values_pct": types.Int64Type, + "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, + "min_non_null_values_pct": types.ObjectType{AttrTypes: metricMissingValuesAttr()}, + "condition": types.StringType, + } +} + +func tracingImmediateAttr() map[string]attr.Type { + return map[string]attr.Type{ + "tracing_filter": types.ObjectType{AttrTypes: tracingQueryAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + } +} + +func tracingThresholdAttr() map[string]attr.Type { + return map[string]attr.Type{ + "tracing_filter": types.ObjectType{AttrTypes: tracingQueryAttr()}, + "notification_payload_filter": types.SetType{ElemType: types.StringType}, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: tracingThresholdRulesAttr()}}, + } +} + +func tracingThresholdRulesAttr() map[string]attr.Type { + return map[string]attr.Type{ + "span_amount": types.Float64Type, + "time_window": types.StringType, } } @@ -4931,64 +4558,6 @@ func alertDefsAttr() map[string]attr.Type { } } -func tracingMoreThanAttr() map[string]attr.Type { - return map[string]attr.Type{ - "tracing_filter": types.ObjectType{AttrTypes: tracingQueryAttr()}, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, - "time_window": types.ObjectType{AttrTypes: logsTimeWindowAttr()}, - "span_amount": types.Int64Type, - } -} - -func tracingImmediateAttr() map[string]attr.Type { - return map[string]attr.Type{ - "tracing_filter": types.ObjectType{AttrTypes: tracingQueryAttr()}, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, - } -} - -func metricMoreThanUsualAttr() map[string]attr.Type { - return map[string]attr.Type{ - "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, - "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, - "threshold": types.Int64Type, - "for_over_pct": types.Int64Type, - "min_non_null_values_pct": types.Int64Type, - } -} - -func metricLessThanAttr() map[string]attr.Type { - return map[string]attr.Type{ - "metric_filter": types.ObjectType{AttrTypes: metricFilterAttr()}, - "threshold": types.Float64Type, - "for_over_pct": types.Int64Type, - "of_the_last": types.ObjectType{AttrTypes: metricTimeWindowAttr()}, - "missing_values": types.ObjectType{AttrTypes: metricMissingValuesAttr()}, - "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, - } -} - -func logsTimeRelativeLessThanAttr() map[string]attr.Type { - return map[string]attr.Type{ - "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "threshold": types.Int64Type, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, - "compared_to": types.StringType, - "ignore_infinity": types.BoolType, - "undetected_values_management": types.ObjectType{AttrTypes: undetectedValuesManagementAttr()}, - } -} - -func logsTimeRelativeMoreThanAttr() map[string]attr.Type { - return map[string]attr.Type{ - "logs_filter": types.ObjectType{AttrTypes: logsFilterAttr()}, - "notification_payload_filter": types.SetType{ElemType: types.StringType}, - "threshold": types.Int64Type, - "compared_to": types.StringType, - "ignore_infinity": types.BoolType, - } -} - func tracingQueryAttr() map[string]attr.Type { return map[string]attr.Type{ "latency_threshold_ms": types.Int64Type, From 1257ed12a8d6517bce8d1e439172d341133de3df Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Tue, 24 Sep 2024 09:46:36 +0200 Subject: [PATCH 06/12] feat: alerts v3 --- coralogix/data_source_coralogix_alert.go | 110 +++++++++++------- .../data_source_coralogix_alert_test._go | 55 +++++++++ coralogix/data_source_coralogix_alert_test.go | 35 +----- coralogix/data_source_coralogix_alertv3.go | 92 --------------- .../data_source_coralogix_alertv3_test.go | 32 ----- coralogix/resource_coralogix_action.go | 4 +- coralogix/resource_coralogix_alert_test.go | 9 +- coralogix/resource_coralogix_api_key.go | 2 +- coralogix/resource_coralogix_group.go | 22 ++-- coralogix/resource_coralogix_rules_group.go | 8 +- coralogix/utils.go | 24 ---- go.mod | 2 +- 12 files changed, 153 insertions(+), 242 deletions(-) create mode 100644 coralogix/data_source_coralogix_alert_test._go delete mode 100644 coralogix/data_source_coralogix_alertv3.go delete mode 100644 coralogix/data_source_coralogix_alertv3_test.go diff --git a/coralogix/data_source_coralogix_alert.go b/coralogix/data_source_coralogix_alert.go index 7ea04d01..b6331319 100644 --- a/coralogix/data_source_coralogix_alert.go +++ b/coralogix/data_source_coralogix_alert.go @@ -1,64 +1,90 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package coralogix import ( "context" + "fmt" "log" + cxsdk "github.com/coralogix/coralogix-management-sdk/go" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/resource" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" - - "terraform-provider-coralogix/coralogix/clientset" - alertsv1 "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v2" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "google.golang.org/protobuf/types/known/wrapperspb" ) -func dataSourceCoralogixAlert() *schema.Resource { - alertSchema := datasourceSchemaFromResourceSchema(AlertSchema()) - alertSchema["id"] = &schema.Schema{ - Type: schema.TypeString, - Required: true, - } +var _ datasource.DataSourceWithConfigure = &AlertDataSource{} - return &schema.Resource{ - ReadContext: dataSourceCoralogixAlertRead, +func NewAlertDataSource() datasource.DataSource { + return &AlertDataSource{} +} + +type AlertDataSource struct { + client *cxsdk.AlertsClient +} + +func (d *AlertDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_alert" +} - Schema: alertSchema, +func (d *AlertDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return } + + clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Resource Configure Type", + fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = clientSet.Alerts() +} + +func (d *AlertDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + var r AlertResource + var resourceResp resource.SchemaResponse + r.Schema(ctx, resource.SchemaRequest{}, &resourceResp) + + resp.Schema = frameworkDatasourceSchemaFromFrameworkResourceSchema(resourceResp.Schema) } -func dataSourceCoralogixAlertRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - id := wrapperspb.String(d.Get("id").(string)) - getAlertRequest := &alertsv1.GetAlertByUniqueIdRequest{ - Id: id, +func (d *AlertDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data *AlertResourceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return } - log.Printf("[INFO] Reading alert %s", id) - alertResp, err := meta.(*clientset.ClientSet).Alerts().GetAlert(ctx, getAlertRequest) + // Get refreshed Alert value from Coralogix + id := data.ID.ValueString() + log.Printf("[INFO] Reading Alert: %s", id) + getAlertReq := &cxsdk.GetAlertDefRequest{Id: wrapperspb.String(id)} + getAlertResp, err := d.client.Get(ctx, getAlertReq) if err != nil { - reqStr := protojson.Format(getAlertRequest) log.Printf("[ERROR] Received error: %s", err.Error()) - return diag.Errorf(formatRpcErrors(err, getAlertURL, reqStr)) + if status.Code(err) == codes.NotFound { + resp.Diagnostics.AddWarning(err.Error(), + fmt.Sprintf("Alert %q is in state, but no longer exists in Coralogix backend", id)) + } else { + resp.Diagnostics.AddError( + "Error reading Alert", + formatRpcErrors(err, getAlertURL, protojson.Format(getAlertReq)), + ) + } + return } - alert := alertResp.GetAlert() - log.Printf("[INFO] Received alert: %s", protojson.Format(alert)) + log.Printf("[INFO] Received Alert: %s", protojson.Format(getAlertResp)) - d.SetId(alert.GetId().GetValue()) - - return setAlert(d, alert) + data, diags := flattenAlert(ctx, getAlertResp.GetAlertDef()) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } diff --git a/coralogix/data_source_coralogix_alert_test._go b/coralogix/data_source_coralogix_alert_test._go new file mode 100644 index 00000000..9a604dde --- /dev/null +++ b/coralogix/data_source_coralogix_alert_test._go @@ -0,0 +1,55 @@ +// Copyright 2024 Coralogix Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package coralogix + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +var alertDataSourceName = "data." + alertResourceName + +func TestAccCoralogixDataSourceAlert_basic(t *testing.T) { + alert := standardAlertTestParams{ + alertCommonTestParams: *getRandomAlert(), + groupBy: []string{"EventType"}, + occurrencesThreshold: acctest.RandIntRange(1, 1000), + timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), + deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + ProviderFactories: testAccProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccCoralogixResourceAlertStandard(&alert) + + testAccCoralogixDataSourceAlert_read(), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(alertDataSourceName, "name", alert.name), + ), + }, + }, + }) +} + +func testAccCoralogixDataSourceAlert_read() string { + return `data "coralogix_alert" "test" { + id = coralogix_alert.test.id +} +` +} diff --git a/coralogix/data_source_coralogix_alert_test.go b/coralogix/data_source_coralogix_alert_test.go index 9a604dde..362290c5 100644 --- a/coralogix/data_source_coralogix_alert_test.go +++ b/coralogix/data_source_coralogix_alert_test.go @@ -1,52 +1,29 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package coralogix import ( "testing" - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) var alertDataSourceName = "data." + alertResourceName -func TestAccCoralogixDataSourceAlert_basic(t *testing.T) { - alert := standardAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - occurrencesThreshold: acctest.RandIntRange(1, 1000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), - } - +func TestAccCoralogixDataSourceAlert(t *testing.T) { resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, + CheckDestroy: testAccCheckActionDestroy, Steps: []resource.TestStep{ { - Config: testAccCoralogixResourceAlertStandard(&alert) + + Config: testAccCoralogixResourceAlertLogsImmediate() + testAccCoralogixDataSourceAlert_read(), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(alertDataSourceName, "name", alert.name), + resource.TestCheckResourceAttr(alertDataSourceName, "name", "logs immediate alert"), ), }, }, }) } - func testAccCoralogixDataSourceAlert_read() string { return `data "coralogix_alert" "test" { id = coralogix_alert.test.id diff --git a/coralogix/data_source_coralogix_alertv3.go b/coralogix/data_source_coralogix_alertv3.go deleted file mode 100644 index d604eb95..00000000 --- a/coralogix/data_source_coralogix_alertv3.go +++ /dev/null @@ -1,92 +0,0 @@ -package coralogix - -import ( - "context" - "fmt" - "log" - - "terraform-provider-coralogix/coralogix/clientset" - alerts "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v3" - - "github.com/hashicorp/terraform-plugin-framework/datasource" - "github.com/hashicorp/terraform-plugin-framework/resource" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -var _ datasource.DataSourceWithConfigure = &AlertDataSource{} - -func NewAlertDataSource() datasource.DataSource { - return &AlertDataSource{} -} - -type AlertDataSource struct { - client *clientset.AlertsClient -} - -func (d *AlertDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_alertv3" -} - -func (d *AlertDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { - if req.ProviderData == nil { - return - } - - clientSet, ok := req.ProviderData.(*clientset.ClientSet) - if !ok { - resp.Diagnostics.AddError( - "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), - ) - return - } - - d.client = clientSet.Alerts() -} - -func (d *AlertDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { - var r AlertV3Resource - var resourceResp resource.SchemaResponse - r.Schema(ctx, resource.SchemaRequest{}, &resourceResp) - - resp.Schema = frameworkDatasourceSchemaFromFrameworkResourceSchema(resourceResp.Schema) -} - -func (d *AlertDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - var data *AlertV3ResourceModel - resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) - if resp.Diagnostics.HasError() { - return - } - - //Get refreshed Alert value from Coralogix - id := data.ID.ValueString() - log.Printf("[INFO] Reading Alert: %s", id) - getAlertReq := &alerts.GetAlertDefRequest{Id: wrapperspb.String(id)} - getAlertResp, err := d.client.GetAlert(ctx, getAlertReq) - if err != nil { - log.Printf("[ERROR] Received error: %s", err.Error()) - if status.Code(err) == codes.NotFound { - resp.Diagnostics.AddWarning(err.Error(), - fmt.Sprintf("Alert %q is in state, but no longer exists in Coralogix backend", id)) - } else { - resp.Diagnostics.AddError( - "Error reading Alert", - formatRpcErrors(err, getAlertURL, protojson.Format(getAlertReq)), - ) - } - return - } - log.Printf("[INFO] Received Alert: %s", protojson.Format(getAlertResp)) - - data, diags := flattenAlert(ctx, getAlertResp.GetAlertDef()) - if diags.HasError() { - resp.Diagnostics.Append(diags...) - return - } - // Save data into Terraform state - resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) -} diff --git a/coralogix/data_source_coralogix_alertv3_test.go b/coralogix/data_source_coralogix_alertv3_test.go deleted file mode 100644 index 362290c5..00000000 --- a/coralogix/data_source_coralogix_alertv3_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package coralogix - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-testing/helper/resource" -) - -var alertDataSourceName = "data." + alertResourceName - -func TestAccCoralogixDataSourceAlert(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - CheckDestroy: testAccCheckActionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertLogsImmediate() + - testAccCoralogixDataSourceAlert_read(), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(alertDataSourceName, "name", "logs immediate alert"), - ), - }, - }, - }) -} -func testAccCoralogixDataSourceAlert_read() string { - return `data "coralogix_alert" "test" { - id = coralogix_alert.test.id -} -` -} diff --git a/coralogix/resource_coralogix_action.go b/coralogix/resource_coralogix_action.go index bca34015..0e492a78 100644 --- a/coralogix/resource_coralogix_action.go +++ b/coralogix/resource_coralogix_action.go @@ -42,8 +42,8 @@ var ( _ resource.ResourceWithConfigure = &ActionResource{} _ resource.ResourceWithImportState = &ActionResource{} actionSchemaSourceTypeToProtoSourceType = map[string]cxsdk.SourceType{ - "Log": cxsdk.SourceTypeSourceTypeLog, - "DataMap": cxsdk.SourceTypeSourceTypeDataMap, + "Log": cxsdk.SourceTypeLog, + "DataMap": cxsdk.SourceTypeDataMap, } actionProtoSourceTypeToSchemaSourceType = ReverseMap(actionSchemaSourceTypeToProtoSourceType) actionValidSourceTypes = GetKeys(actionSchemaSourceTypeToProtoSourceType) diff --git a/coralogix/resource_coralogix_alert_test.go b/coralogix/resource_coralogix_alert_test.go index 3ef8db30..78704d54 100644 --- a/coralogix/resource_coralogix_alert_test.go +++ b/coralogix/resource_coralogix_alert_test.go @@ -5,11 +5,12 @@ import ( "fmt" "testing" + "terraform-provider-coralogix/coralogix/clientset" + + cxsdk "github.com/coralogix/coralogix-management-sdk/go" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "google.golang.org/protobuf/types/known/wrapperspb" - "terraform-provider-coralogix/coralogix/clientset" - alertsv3 "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v3" ) var alertResourceName = "coralogix_alert.test" @@ -1312,13 +1313,13 @@ func testAccCheckAlertDestroy(s *terraform.State) error { continue } - req := &alertsv3.GetAlertDefRequest{ + req := &cxsdk.GetAlertDefRequest{ Id: wrapperspb.String(rs.Primary.ID), } resp, err := client.GetAlert(ctx, req) if err == nil { - if resp.GetAlertDef().Id.Value == rs.Primary.ID { + if resp.GetAlert().Id.Value == rs.Primary.ID { return fmt.Errorf("alert still exists: %s", rs.Primary.ID) } } diff --git a/coralogix/resource_coralogix_api_key.go b/coralogix/resource_coralogix_api_key.go index 28c9ea09..f09d2bee 100644 --- a/coralogix/resource_coralogix_api_key.go +++ b/coralogix/resource_coralogix_api_key.go @@ -518,7 +518,7 @@ func makeCreateApiKeyRequest(ctx context.Context, apiKeyModel *ApiKeyModel) (*cx return &cxsdk.CreateAPIKeyRequest{ Name: apiKeyModel.Name.ValueString(), Owner: &owner, - KeyPermissions: &cxsdk.APIKeyPermissionsCreate{ + KeyPermissions: &cxsdk.APIKeyPermissions{ Presets: presets, Permissions: permissions, }, diff --git a/coralogix/resource_coralogix_group.go b/coralogix/resource_coralogix_group.go index b2d4cefd..fe4378f5 100644 --- a/coralogix/resource_coralogix_group.go +++ b/coralogix/resource_coralogix_group.go @@ -128,7 +128,7 @@ func (r *GroupResource) Create(ctx context.Context, req resource.CreateRequest, ) return } - getResp, err := r.client.GetGroup(ctx, createResp.ID) + getResp, err := r.client.Get(ctx, createResp.GroupId) groupStr, _ = json.Marshal(getResp) log.Printf("[INFO] Getting group: %s", groupStr) state, diags := flattenSCIMGroup(getResp) @@ -304,11 +304,11 @@ func (r *GroupResource) Delete(ctx context.Context, req resource.DeleteRequest, } type GroupResourceModel struct { - ID types.String `tfsdk:"id"` - DisplayName types.String `tfsdk:"display_name"` - Members types.Set `tfsdk:"members"` // Set of strings - Role types.String `tfsdk:"role"` - ScopeID types.String `tfsdk:"scope_id"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"display_name"` + Members types.Set `tfsdk:"members"` // Set of strings + Role types.String `tfsdk:"role"` + ScopeID types.String `tfsdk:"scope_id"` } func extractGroup(ctx context.Context, plan *GroupResourceModel) (*cxsdk.Group, diag.Diagnostics) { @@ -317,11 +317,11 @@ func extractGroup(ctx context.Context, plan *GroupResourceModel) (*cxsdk.Group, return nil, diags } - return &cxsdk.SCIMGroup{ - DisplayName: plan.DisplayName.ValueString(), - Members: members, - Role: plan.Role.ValueString(), - ScopeID: plan.ScopeID.ValueString(), + return &cxsdk.TeamGroup{ + Name: plan.Name.ValueString(), + Members: members, + Role: plan.Role.ValueString(), + ScopeID: plan.ScopeID.ValueString(), }, nil } diff --git a/coralogix/resource_coralogix_rules_group.go b/coralogix/resource_coralogix_rules_group.go index 500a3ce2..04162395 100644 --- a/coralogix/resource_coralogix_rules_group.go +++ b/coralogix/resource_coralogix_rules_group.go @@ -43,8 +43,8 @@ var ( "Error": "VALUE_ERROR", "Critical": "VALUE_CRITICAL", } - rulesProtoSeverityToSchemaSeverity = reverseMapStrings(rulesSchemaSeverityToProtoSeverity) - rulesValidSeverities = getKeysStrings(rulesSchemaSeverityToProtoSeverity) + rulesProtoSeverityToSchemaSeverity = ReverseMap(rulesSchemaSeverityToProtoSeverity) + rulesValidSeverities = GetKeys(rulesSchemaSeverityToProtoSeverity) rulesSchemaDestinationFieldToProtoDestinationField = map[string]rulesv1.JsonExtractParameters_DestinationField{ "Category": rulesv1.JsonExtractParameters_DESTINATION_FIELD_CATEGORY_OR_UNSPECIFIED, "Class": rulesv1.JsonExtractParameters_DESTINATION_FIELD_CLASSNAME, @@ -64,8 +64,8 @@ var ( "MicroTS": "FORMAT_STANDARD_MICROTS", "NanoTS": "FORMAT_STANDARD_NANOTS", } - rulesProtoFormatStandardToSchemaFormatStandard = reverseMapStrings(rulesSchemaFormatStandardToProtoFormatStandard) - rulesValidFormatStandards = getKeysStrings(rulesSchemaFormatStandardToProtoFormatStandard) + rulesProtoFormatStandardToSchemaFormatStandard = ReverseMap(rulesSchemaFormatStandardToProtoFormatStandard) + rulesValidFormatStandards = GetKeys(rulesSchemaFormatStandardToProtoFormatStandard) createParsingRuleURL = "com.coralogix.rules.v1.RuleGroupsService/CreateRuleGroup" getParsingRuleURL = "com.coralogix.rules.v1.RuleGroupsService/GetRuleGroup" updateParsingRuleURL = "com.coralogix.rules.v1.RuleGroupsService/UpdateRuleGroup" diff --git a/coralogix/utils.go b/coralogix/utils.go index 5aff0b6d..fc5ca52f 100644 --- a/coralogix/utils.go +++ b/coralogix/utils.go @@ -569,30 +569,6 @@ func getKeysInt32(m map[string]int32) []string { return result } -func getKeysRelativeTimeFrame(m map[string]protoTimeFrameAndRelativeTimeFrame) []string { - result := make([]string, 0) - for k := range m { - result = append(result, k) - } - return result -} - -func reverseMapStrings(m map[string]string) map[string]string { - n := make(map[string]string) - for k, v := range m { - n[v] = k - } - return n -} - -func reverseMapRelativeTimeFrame(m map[string]protoTimeFrameAndRelativeTimeFrame) map[protoTimeFrameAndRelativeTimeFrame]string { - n := make(map[protoTimeFrameAndRelativeTimeFrame]string) - for k, v := range m { - n[v] = k - } - return n -} - func strToUint32(str string) uint32 { n, _ := strconv.ParseUint(str, 10, 32) return uint32(n) diff --git a/go.mod b/go.mod index 4f37c249..6d3f5c38 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module terraform-provider-coralogix -go 1.22.5 +go 1.23 toolchain go1.23.0 From 7f0ce37b178c6ee26846d2cdacd2decdddc057e5 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Thu, 26 Sep 2024 09:55:31 +0200 Subject: [PATCH 07/12] feat: transitioned several APIs to SDK --- coralogix/data_source_coralogix_alert.go | 35 ++++++++++++++++++++++++ coralogix/provider.go | 4 +-- coralogix/resource_coralogix_action.go | 2 +- coralogix/resource_coralogix_api_key.go | 4 +-- 4 files changed, 40 insertions(+), 5 deletions(-) diff --git a/coralogix/data_source_coralogix_alert.go b/coralogix/data_source_coralogix_alert.go index b6331319..299a2d45 100644 --- a/coralogix/data_source_coralogix_alert.go +++ b/coralogix/data_source_coralogix_alert.go @@ -16,6 +16,41 @@ import ( var _ datasource.DataSourceWithConfigure = &AlertDataSource{} +// func dataSourceCoralogixAlert() *schema.Resource { +// alertSchema := datasourceSchemaFromResourceSchema(AlertSchema()) +// alertSchema["id"] = &schema.Schema{ +// Type: schema.TypeString, +// Required: true, +// } + +// return &schema.Resource{ +// ReadContext: dataSourceCoralogixAlertRead, + +// Schema: alertSchema, +// } +// } + +// func dataSourceCoralogixAlertRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +// id := wrapperspb.String(d.Get("id").(string)) +// getAlertRequest := &alertsv1.GetAlertByUniqueIdRequest{ +// Id: id, +// } + +// log.Printf("[INFO] Reading alert %s", id) +// alertResp, err := meta.(*clientset.ClientSet).Alerts().GetAlert(ctx, getAlertRequest) +// if err != nil { +// reqStr := protojson.Format(getAlertRequest) +// log.Printf("[ERROR] Received error: %s", err.Error()) +// return diag.Errorf(formatRpcErrors(err, getAlertURL, reqStr)) +// } +// alert := alertResp.GetAlert() +// log.Printf("[INFO] Received alert: %s", protojson.Format(alert)) + +// d.SetId(alert.GetId().GetValue()) + +// return setAlert(d, alert) +// } + func NewAlertDataSource() datasource.DataSource { return &AlertDataSource{} } diff --git a/coralogix/provider.go b/coralogix/provider.go index e4a3225f..47ffad24 100644 --- a/coralogix/provider.go +++ b/coralogix/provider.go @@ -88,7 +88,6 @@ func OldProvider() *oldSchema.Provider { DataSourcesMap: map[string]*oldSchema.Resource{ "coralogix_rules_group": dataSourceCoralogixRulesGroup(), - "coralogix_alert": dataSourceCoralogixAlert(), "coralogix_enrichment": dataSourceCoralogixEnrichment(), "coralogix_data_set": dataSourceCoralogixDataSet(), "coralogix_hosted_dashboard": dataSourceCoralogixHostedDashboard(), @@ -96,7 +95,6 @@ func OldProvider() *oldSchema.Provider { ResourcesMap: map[string]*oldSchema.Resource{ "coralogix_rules_group": resourceCoralogixRulesGroup(), - "coralogix_alert": resourceCoralogixAlert(), "coralogix_enrichment": resourceCoralogixEnrichment(), "coralogix_data_set": resourceCoralogixDataSet(), "coralogix_hosted_dashboard": resourceCoralogixHostedDashboard(), @@ -326,6 +324,7 @@ func (p *coralogixProvider) DataSources(context.Context) []func() datasource.Dat NewTeamDataSource, NewScopeDataSource, NewIntegrationDataSource, + NewAlertDataSource, } } @@ -351,5 +350,6 @@ func (p *coralogixProvider) Resources(context.Context) []func() resource.Resourc NewUserResource, NewScopeResource, NewIntegrationResource, + NewAlertResource, } } diff --git a/coralogix/resource_coralogix_action.go b/coralogix/resource_coralogix_action.go index 0e492a78..e4e84854 100644 --- a/coralogix/resource_coralogix_action.go +++ b/coralogix/resource_coralogix_action.go @@ -280,7 +280,7 @@ func (r ActionResource) Update(ctx context.Context, req resource.UpdateRequest, } else { resp.Diagnostics.AddError( "Error reading Action", - formatRpcErrors(err, getActionURL, protojson.Format(getActionReq)), + formatRpcErrors(err, cxsdk.GetActionRpc, protojson.Format(getActionReq)), ) } return diff --git a/coralogix/resource_coralogix_api_key.go b/coralogix/resource_coralogix_api_key.go index f09d2bee..a3a5fea9 100644 --- a/coralogix/resource_coralogix_api_key.go +++ b/coralogix/resource_coralogix_api_key.go @@ -134,7 +134,7 @@ func resourceSchemaV1() schema.Schema { }, }, Required: true, - MarkdownDescription: "Api Key Owner.It can either be a team_id or a user_id ", + MarkdownDescription: "Api Key Owner. It can either be a team_id or a user_id ", }, "active": schema.BoolAttribute{ @@ -209,7 +209,7 @@ func resourceSchemaV0() schema.Schema { }, }, Required: true, - MarkdownDescription: "Api Key Owner.It can either be a team_id or a user_id ", + MarkdownDescription: "Api Key Owner. It can either be a team_id or a user_id ", }, "active": schema.BoolAttribute{ From 8856d6b47b0c94187455a26a84814a2256afdaf2 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Thu, 26 Sep 2024 10:13:41 +0200 Subject: [PATCH 08/12] feat: restore old groups client --- coralogix/data_source_coralogix_group.go | 32 ++++------- .../resource_coralogix_alerts_scheduler.go | 14 +++-- coralogix/resource_coralogix_group.go | 54 +++++++++---------- 3 files changed, 47 insertions(+), 53 deletions(-) diff --git a/coralogix/data_source_coralogix_group.go b/coralogix/data_source_coralogix_group.go index 58c78e62..ff3104ab 100644 --- a/coralogix/data_source_coralogix_group.go +++ b/coralogix/data_source_coralogix_group.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,14 +19,12 @@ import ( "encoding/json" "fmt" "log" - "strconv" - cxsdk "github.com/coralogix/coralogix-management-sdk/go" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/resource" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/encoding/protojson" + "terraform-provider-coralogix/coralogix/clientset" ) var _ datasource.DataSourceWithConfigure = &GroupDataSource{} @@ -36,7 +34,7 @@ func NewGroupDataSource() datasource.DataSource { } type GroupDataSource struct { - client *cxsdk.GroupsClient + client *clientset.GroupsClient } func (d *GroupDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { @@ -48,11 +46,11 @@ func (d *GroupDataSource) Configure(_ context.Context, req datasource.ConfigureR return } - clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) + clientSet, ok := req.ProviderData.(*clientset.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *cxsdk.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return } @@ -76,16 +74,9 @@ func (d *GroupDataSource) Read(ctx context.Context, req datasource.ReadRequest, return } //Get refreshed Group value from Coralogix - id, _ := strconv.ParseUint(data.ID.ValueString(), 10, 32) - - request := cxsdk.GetTeamGroupRequest{ - GroupId: &cxsdk.GroupsTeamGroupID{ - Id: id, - }, - } - + id := data.ID.ValueString() log.Printf("[INFO] Reading Group: %s", id) - getGroupResp, err := d.client.Get(ctx, request) + getGroupResp, err := d.client.GetGroup(ctx, id) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) if status.Code(err) == codes.NotFound { @@ -94,10 +85,9 @@ func (d *GroupDataSource) Read(ctx context.Context, req datasource.ReadRequest, fmt.Sprintf("Group %q is in state, but no longer exists in Coralogix backend", id), ) } else { - resp.Diagnostics.AddError( - "Error reading API Keys", - formatRpcErrors(err, cxsdk.GetTeamGroupRpc, protojson.Format(request)), + "Error reading Group", + formatRpcErrors(err, fmt.Sprintf("%s/%s", d.client.TargetUrl, id), ""), ) } return diff --git a/coralogix/resource_coralogix_alerts_scheduler.go b/coralogix/resource_coralogix_alerts_scheduler.go index 62eeaa4e..6d900fdc 100644 --- a/coralogix/resource_coralogix_alerts_scheduler.go +++ b/coralogix/resource_coralogix_alerts_scheduler.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -19,6 +19,9 @@ import ( "fmt" "log" + "terraform-provider-coralogix/coralogix/clientset" + alertsSchedulers "terraform-provider-coralogix/coralogix/clientset/grpc/alerts-scheduler" + "github.com/hashicorp/terraform-plugin-framework-validators/objectvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" @@ -35,8 +38,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" - "terraform-provider-coralogix/coralogix/clientset" - alertsSchedulers "terraform-provider-coralogix/coralogix/clientset/grpc/alerts-scheduler" ) var ( @@ -70,6 +71,9 @@ var ( updateAlertsSchedulerURL = "com.coralogixapis.alerting.alert_scheduler_rule_protobuf.v1.AlertSchedulerRuleService/UpdateAlertSchedulerRule" deleteAlertsSchedulerURL = "com.coralogixapis.alerting.alert_scheduler_rule_protobuf.v1.AlertSchedulerRuleService/DeleteAlertSchedulerRule" getAlertsSchedulerURL = "com.coralogixapis.alerting.alert_scheduler_rule_protobuf.v1.AlertSchedulerRuleService/GetAlertSchedulerRule" + + validTimeZones = []string{"UTC-11", "UTC-10", "UTC-9", "UTC-8", "UTC-7", "UTC-6", "UTC-5", "UTC-4", "UTC-3", "UTC-2", "UTC-1", + "UTC+0", "UTC+1", "UTC+2", "UTC+3", "UTC+4", "UTC+5", "UTC+6", "UTC+7", "UTC+8", "UTC+9", "UTC+10", "UTC+11", "UTC+12", "UTC+13", "UTC+14"} ) func NewAlertsSchedulerResource() resource.Resource { diff --git a/coralogix/resource_coralogix_group.go b/coralogix/resource_coralogix_group.go index fe4378f5..49138f96 100644 --- a/coralogix/resource_coralogix_group.go +++ b/coralogix/resource_coralogix_group.go @@ -1,11 +1,11 @@ // Copyright 2024 Coralogix Ltd. -// +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at -// +// // https://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,7 +20,8 @@ import ( "fmt" "log" - cxsdk "github.com/coralogix/coralogix-management-sdk/go" + "terraform-provider-coralogix/coralogix/clientset" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" @@ -34,7 +35,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "google.golang.org/protobuf/encoding/protojson" ) func NewGroupResource() resource.Resource { @@ -42,7 +42,7 @@ func NewGroupResource() resource.Resource { } type GroupResource struct { - client *cxsdk.GroupsClient + client *clientset.GroupsClient } func (r *GroupResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { @@ -54,11 +54,11 @@ func (r *GroupResource) Configure(_ context.Context, req resource.ConfigureReque return } - clientSet, ok := req.ProviderData.(*cxsdk.ClientSet) + clientSet, ok := req.ProviderData.(*clientset.ClientSet) if !ok { resp.Diagnostics.AddError( "Unexpected Resource Configure Type", - fmt.Sprintf("Expected *cxsdk.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), + fmt.Sprintf("Expected *clientset.ClientSet, got: %T. Please report this issue to the provider developers.", req.ProviderData), ) return } @@ -119,16 +119,16 @@ func (r *GroupResource) Create(ctx context.Context, req resource.CreateRequest, } groupStr, _ := json.Marshal(createGroupRequest) log.Printf("[INFO] Creating new group: %s", string(groupStr)) - createResp, err := r.client.Create(ctx, createGroupRequest) + createResp, err := r.client.CreateGroup(ctx, createGroupRequest) if err != nil { log.Printf("[ERROR] Received error: %s", err.Error()) resp.Diagnostics.AddError( "Error creating Group", - formatRpcErrors(err, cxsdk.CreateTeamGroupRpc, protojson.Format(groupStr)), + formatRpcErrors(err, r.client.TargetUrl, string(groupStr)), ) return } - getResp, err := r.client.Get(ctx, createResp.GroupId) + getResp, err := r.client.GetGroup(ctx, createResp.ID) groupStr, _ = json.Marshal(getResp) log.Printf("[INFO] Getting group: %s", groupStr) state, diags := flattenSCIMGroup(getResp) @@ -142,7 +142,7 @@ func (r *GroupResource) Create(ctx context.Context, req resource.CreateRequest, resp.Diagnostics.Append(diags...) } -func flattenSCIMGroup(group *cxsdk.GroupsTeamGroup) (*GroupResourceModel, diag.Diagnostics) { +func flattenSCIMGroup(group *clientset.SCIMGroup) (*GroupResourceModel, diag.Diagnostics) { members, diags := flattenSCIMGroupMembers(group.Members) if diags.HasError() { return nil, diags @@ -162,7 +162,7 @@ func flattenSCIMGroup(group *cxsdk.GroupsTeamGroup) (*GroupResourceModel, diag.D }, nil } -func flattenSCIMGroupMembers(members []cxsdk.SCIMGroupMember) (types.Set, diag.Diagnostics) { +func flattenSCIMGroupMembers(members []clientset.SCIMGroupMember) (types.Set, diag.Diagnostics) { if len(members) == 0 { return types.SetNull(types.StringType), nil } @@ -304,30 +304,30 @@ func (r *GroupResource) Delete(ctx context.Context, req resource.DeleteRequest, } type GroupResourceModel struct { - ID types.String `tfsdk:"id"` - Name types.String `tfsdk:"display_name"` - Members types.Set `tfsdk:"members"` // Set of strings - Role types.String `tfsdk:"role"` - ScopeID types.String `tfsdk:"scope_id"` + ID types.String `tfsdk:"id"` + DisplayName types.String `tfsdk:"display_name"` + Members types.Set `tfsdk:"members"` // Set of strings + Role types.String `tfsdk:"role"` + ScopeID types.String `tfsdk:"scope_id"` } -func extractGroup(ctx context.Context, plan *GroupResourceModel) (*cxsdk.Group, diag.Diagnostics) { +func extractGroup(ctx context.Context, plan *GroupResourceModel) (*clientset.SCIMGroup, diag.Diagnostics) { members, diags := extractGroupMembers(ctx, plan.Members) if diags.HasError() { return nil, diags } - return &cxsdk.TeamGroup{ - Name: plan.Name.ValueString(), - Members: members, - Role: plan.Role.ValueString(), - ScopeID: plan.ScopeID.ValueString(), + return &clientset.SCIMGroup{ + DisplayName: plan.DisplayName.ValueString(), + Members: members, + Role: plan.Role.ValueString(), + ScopeID: plan.ScopeID.ValueString(), }, nil } -func extractGroupMembers(ctx context.Context, members types.Set) ([]cxsdk.SCIMGroupMember, diag.Diagnostics) { +func extractGroupMembers(ctx context.Context, members types.Set) ([]clientset.SCIMGroupMember, diag.Diagnostics) { membersElements := members.Elements() - groupMembers := make([]cxsdk.SCIMGroupMember, 0, len(membersElements)) + groupMembers := make([]clientset.SCIMGroupMember, 0, len(membersElements)) var diags diag.Diagnostics for _, member := range membersElements { val, err := member.ToTerraformValue(ctx) @@ -341,7 +341,7 @@ func extractGroupMembers(ctx context.Context, members types.Set) ([]cxsdk.SCIMGr diags.AddError("Failed to convert value to string", err.Error()) continue } - groupMembers = append(groupMembers, cxsdk.SCIMGroupMember{Value: str}) + groupMembers = append(groupMembers, clientset.SCIMGroupMember{Value: str}) } if diags.HasError() { return nil, diags From ab101ba750ef115efee780651ece07301086c3f6 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Thu, 26 Sep 2024 14:45:27 +0200 Subject: [PATCH 09/12] chore: updated to latest sdk on github --- go.mod | 6 +----- go.sum | 2 ++ 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 6d3f5c38..d85c1e86 100644 --- a/go.mod +++ b/go.mod @@ -2,12 +2,9 @@ module terraform-provider-coralogix go 1.23 -toolchain go1.23.0 - -replace github.com/coralogix/coralogix-management-sdk => ../coralogix-management-sdk - require ( github.com/ahmetalpbalkan/go-linq v3.0.0+incompatible + github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240926124132-1adf5d665a2e github.com/google/uuid v1.6.0 github.com/grafana/grafana-api-golang-client v0.27.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 @@ -24,7 +21,6 @@ require ( google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 - github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240828115216-6f699f7a4510 ) require ( diff --git a/go.sum b/go.sum index 61f823c6..34844eaf 100644 --- a/go.sum +++ b/go.sum @@ -42,6 +42,8 @@ github.com/coralogix/coralogix-management-sdk v0.2.1 h1:5g5F37DGfZ3AL91S3J1vtmAI github.com/coralogix/coralogix-management-sdk v0.2.1/go.mod h1:1aa/coMEMe5M1NvnRymOrBF2iCdefaWR0CMaMjPu0oI= github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240828115216-6f699f7a4510 h1:KSQGSBFQBcePt8rFRbHQenyiMxqiEHvZGq6p/o47K7c= github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240828115216-6f699f7a4510/go.mod h1:1aa/coMEMe5M1NvnRymOrBF2iCdefaWR0CMaMjPu0oI= +github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240926124132-1adf5d665a2e h1:i2nfdlHuBZI3bwz0xkmRFtosVmJpGVqHr/JHVhurIxk= +github.com/coralogix/coralogix-management-sdk v0.2.2-0.20240926124132-1adf5d665a2e/go.mod h1:1aa/coMEMe5M1NvnRymOrBF2iCdefaWR0CMaMjPu0oI= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From e57dcc09bf440243313860f2715ca317e708915a Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Thu, 26 Sep 2024 14:53:37 +0200 Subject: [PATCH 10/12] ci: update go version in ci --- .github/workflows/acc-test.yml | 2 +- .github/workflows/build.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/acc-test.yml b/.github/workflows/acc-test.yml index 1c39f4cf..900a31b0 100644 --- a/.github/workflows/acc-test.yml +++ b/.github/workflows/acc-test.yml @@ -16,7 +16,7 @@ jobs: GO111MODULE: on strategy: matrix: - go-version: [ 1.20.x ] + go-version: [ 1.23 ] os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 5b52ea1b..9e10673e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ jobs: GO111MODULE: on strategy: matrix: - go-version: [ 1.20.x ] + go-version: [ 1.23 ] os: [ ubuntu-latest ] runs-on: ${{ matrix.os }} steps: From b731a6c2853c882a91d0f8da71f75ad5d7a410c5 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Thu, 26 Sep 2024 15:21:13 +0200 Subject: [PATCH 11/12] chore: tests --- ...esource_coralogix_alerts_scheduler_test.go | 14 --- ...ource_coralogix_alerts_schedulerv3_test.go | 117 ------------------ 2 files changed, 131 deletions(-) delete mode 100644 coralogix/resource_coralogix_alerts_schedulerv3_test.go diff --git a/coralogix/resource_coralogix_alerts_scheduler_test.go b/coralogix/resource_coralogix_alerts_scheduler_test.go index 3ef1f4ac..f59fc90d 100644 --- a/coralogix/resource_coralogix_alerts_scheduler_test.go +++ b/coralogix/resource_coralogix_alerts_scheduler_test.go @@ -1,17 +1,3 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package coralogix import ( diff --git a/coralogix/resource_coralogix_alerts_schedulerv3_test.go b/coralogix/resource_coralogix_alerts_schedulerv3_test.go deleted file mode 100644 index f59fc90d..00000000 --- a/coralogix/resource_coralogix_alerts_schedulerv3_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package coralogix - -import ( - "context" - "fmt" - "testing" - - terraform2 "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" - "terraform-provider-coralogix/coralogix/clientset" - alertsSchedulers "terraform-provider-coralogix/coralogix/clientset/grpc/alerts-scheduler" -) - -var ( - alertsSchedulerResourceName = "coralogix_alerts_scheduler.test" -) - -func TestAccCoralogixResourceResourceAlertsScheduler(t *testing.T) { - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - CheckDestroy: testAccCheckAlertsSchedulerDestroy, - ProtoV6ProviderFactories: testAccProtoV6ProviderFactories, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertsScheduler(), - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "name", "example"), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "filter.what_expression", "source logs | filter $d.cpodId:string == '122'"), - resource.TestCheckTypeSetElemNestedAttrs(alertsSchedulerResourceName, "filter.meta_labels.*", map[string]string{ - "key": "key", - "value": "value", - }), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.operation", "active"), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.repeat_every", "2"), - resource.TestCheckTypeSetElemAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.frequency.weekly.days.*", "Sunday"), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.start_time", "2021-01-04T00:00:00.000"), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.duration.for_over", "2"), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.duration.frequency", "hours"), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.time_frame.time_zone", "UTC+2"), - resource.TestCheckResourceAttr(alertsSchedulerResourceName, "schedule.recurring.dynamic.termination_date", "2025-01-01T00:00:00.000"), - ), - }, - { - ResourceName: alertsSchedulerResourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccCheckAlertsSchedulerDestroy(s *terraform.State) error { - testAccProvider = OldProvider() - rc := terraform2.ResourceConfig{} - testAccProvider.Configure(context.Background(), &rc) - client := testAccProvider.Meta().(*clientset.ClientSet).AlertSchedulers() - ctx := context.TODO() - - for _, rs := range s.RootModule().Resources { - if rs.Type != "coralogix_alerts_scheduler" { - continue - } - - req := &alertsSchedulers.GetAlertSchedulerRuleRequest{ - AlertSchedulerRuleId: rs.Primary.ID, - } - - resp, err := client.GetAlertScheduler(ctx, req) - if err == nil { - if resp.GetAlertSchedulerRule().GetId() == rs.Primary.ID { - return fmt.Errorf("alerts-scheduler still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCoralogixResourceAlertsScheduler() string { - return `resource "coralogix_alerts_scheduler" "test" { - name = "example" - description = "example" - filter = { - what_expression = "source logs | filter $d.cpodId:string == '122'" - meta_labels = [ - { - key = "key" - value = "value" - } - ] - } - schedule = { - operation = "active" - recurring = { - dynamic = { - repeat_every = 2 - frequency = { - weekly = { - days = ["Sunday"] - } - } - time_frame = { - start_time = "2021-01-04T00:00:00.000" - duration = { - for_over = 2 - frequency = "hours" - } - time_zone = "UTC+2" - } - termination_date = "2025-01-01T00:00:00.000" - } - } - } -} -` -} From c48afd670a7ab52a2ac669887f45ab6ea3d1e094 Mon Sep 17 00:00:00 2001 From: Claus Matzinger Date: Thu, 26 Sep 2024 15:21:32 +0200 Subject: [PATCH 12/12] fix: tests --- .../resource_coralogix_alert_test.go.old | 1286 ----------------- 1 file changed, 1286 deletions(-) delete mode 100644 coralogix/resource_coralogix_alert_test.go.old diff --git a/coralogix/resource_coralogix_alert_test.go.old b/coralogix/resource_coralogix_alert_test.go.old deleted file mode 100644 index b0688f0b..00000000 --- a/coralogix/resource_coralogix_alert_test.go.old +++ /dev/null @@ -1,1286 +0,0 @@ -// Copyright 2024 Coralogix Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package coralogix - -import ( - "context" - "fmt" - "math" - "strconv" - "testing" - - "terraform-provider-coralogix/coralogix/clientset" - alertsv1 "terraform-provider-coralogix/coralogix/clientset/grpc/alerts/v2" - - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -var alertResourceName = "coralogix_alert.test" - -func TestAccCoralogixResourceAlert_standard(t *testing.T) { - alert := standardAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - occurrencesThreshold: acctest.RandIntRange(1, 1000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), - } - checks := extractStandardAlertChecks(alert) - - updatedAlert := standardAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - occurrencesThreshold: acctest.RandIntRange(1, 1000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - deadmanRatio: selectRandomlyFromSlice(alertValidDeadmanRatioValues), - } - updatedAlertChecks := extractStandardAlertChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertStandard(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertStandard(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_ratio(t *testing.T) { - alert := ratioAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - q2Severities: selectManyRandomlyFromSlice(alertValidLogSeverities), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - ratio: randFloat(), - groupBy: []string{"EventType"}, - q2SearchQuery: "remote_addr_enriched:/.*/", - ignoreInfinity: randBool(), - } - checks := extractRatioAlertChecks(alert) - - updatedAlert := ratioAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - q2Severities: selectManyRandomlyFromSlice(alertValidLogSeverities), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - ratio: randFloat(), - groupBy: []string{"EventType"}, - q2SearchQuery: "remote_addr_enriched:/.*/", - ignoreInfinity: randBool(), - } - updatedAlertChecks := extractRatioAlertChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertRatio(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertRatio(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_newValue(t *testing.T) { - alert := newValueAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - keyToTrack: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidNewValueTimeFrames), - } - alert.notifyOn = "Triggered_only" - checks := extractNewValueChecks(alert) - - updatedAlert := newValueAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - keyToTrack: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidNewValueTimeFrames), - } - updatedAlert.notifyOn = "Triggered_only" - updatedAlertChecks := extractNewValueChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertNewValue(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertNewValue(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_uniqueCount(t *testing.T) { - alert := uniqueCountAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - uniqueCountKey: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidUniqueCountTimeFrames), - groupByKey: "metadata.name", - maxUniqueValues: 2, - maxUniqueValuesForGroupBy: 20, - } - checks := extractUniqueCountAlertChecks(alert) - - updatedAlert := uniqueCountAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - uniqueCountKey: "EventType", - timeWindow: selectRandomlyFromSlice(alertValidUniqueCountTimeFrames), - groupByKey: "metadata.name", - maxUniqueValues: 2, - maxUniqueValuesForGroupBy: 20, - } - updatedAlertChecks := extractUniqueCountAlertChecks(updatedAlert) - updatedAlertChecks = updatedAlertChecks[:len(updatedAlertChecks)-1] // remove group_by check - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertUniqueCount(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertUniqueCount(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_timeRelative(t *testing.T) { - alert := timeRelativeAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - ratioThreshold: acctest.RandIntRange(0, 1000), - relativeTimeWindow: selectRandomlyFromSlice(alertValidRelativeTimeFrames), - groupBy: []string{"EventType"}, - ignoreInfinity: randBool(), - } - checks := extractTimeRelativeChecks(alert) - - updatedAlert := timeRelativeAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - ratioThreshold: acctest.RandIntRange(0, 1000), - relativeTimeWindow: selectRandomlyFromSlice(alertValidRelativeTimeFrames), - groupBy: []string{"EventType"}, - ignoreInfinity: randBool(), - } - updatedAlertChecks := extractTimeRelativeChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertTimeRelative(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertTimeRelative(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_metricLucene(t *testing.T) { - alert := metricLuceneAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - metricField: "subsystem", - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - threshold: acctest.RandIntRange(0, 1000), - arithmeticOperator: selectRandomlyFromSlice(alertValidArithmeticOperators), - } - if alert.arithmeticOperator == "Percentile" { - alert.arithmeticOperatorModifier = acctest.RandIntRange(0, 100) - } - checks := extractLuceneMetricChecks(alert) - - updatedAlert := metricLuceneAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - groupBy: []string{"EventType"}, - metricField: "subsystem", - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - threshold: acctest.RandIntRange(0, 1000), - arithmeticOperator: selectRandomlyFromSlice(alertValidArithmeticOperators), - } - if updatedAlert.arithmeticOperator == "Percentile" { - updatedAlert.arithmeticOperatorModifier = acctest.RandIntRange(0, 100) - } - updatedAlertChecks := extractLuceneMetricChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertMetricLucene(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertMetricLucene(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_metricPromql(t *testing.T) { - alert := metricPromqlAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - threshold: acctest.RandIntRange(0, 1000), - nonNullPercentage: 10 * acctest.RandIntRange(0, 10), - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - condition: "less_than", - } - checks := extractMetricPromqlAlertChecks(alert) - - updatedAlert := metricPromqlAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - threshold: acctest.RandIntRange(0, 1000), - nonNullPercentage: 10 * acctest.RandIntRange(0, 10), - timeWindow: selectRandomlyFromSlice(alertValidMetricTimeFrames), - condition: "more_than", - } - updatedAlertChecks := extractMetricPromqlAlertChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertMetricPromql(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertMetricPromql(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_tracing(t *testing.T) { - alert := tracingAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - conditionLatencyMs: math.Round(randFloat()*1000) / 1000, - occurrencesThreshold: acctest.RandIntRange(1, 10000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - groupBy: []string{"EventType"}, - } - checks := extractTracingAlertChecks(alert) - - updatedAlert := tracingAlertTestParams{ - alertCommonTestParams: *getRandomAlert(), - conditionLatencyMs: math.Round(randFloat()*1000) / 1000, - occurrencesThreshold: acctest.RandIntRange(1, 10000), - timeWindow: selectRandomlyFromSlice(alertValidTimeFrames), - groupBy: []string{"EventType"}, - } - updatedAlertChecks := extractTracingAlertChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertTracing(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: alertResourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertTracing(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func TestAccCoralogixResourceAlert_flow(t *testing.T) { - resourceName := "coralogix_alert.test" - - alert := flowAlertTestParams{ - name: acctest.RandomWithPrefix("tf-acc-test"), - description: acctest.RandomWithPrefix("tf-acc-test"), - emailRecipients: []string{"user@example.com"}, - webhookID: "10761", - severity: selectRandomlyFromSlice(alertValidSeverities), - activeWhen: randActiveWhen(), - notifyEveryMin: acctest.RandIntRange(1500 /*to avoid notify_every < condition.0.time_window*/, 3600), - notifyOn: "Triggered_only", - } - checks := extractFlowAlertChecks(alert) - - updatedAlert := flowAlertTestParams{ - name: acctest.RandomWithPrefix("tf-acc-test"), - description: acctest.RandomWithPrefix("tf-acc-test"), - emailRecipients: []string{"user@example.com"}, - webhookID: "10761", - severity: selectRandomlyFromSlice(alertValidSeverities), - activeWhen: randActiveWhen(), - notifyEveryMin: acctest.RandIntRange(1500 /*to avoid notify_every < condition.0.time_window*/, 3600), - notifyOn: "Triggered_only", - } - updatedAlertChecks := extractFlowAlertChecks(updatedAlert) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactories, - CheckDestroy: testAccCheckAlertDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCoralogixResourceAlertFLow(&alert), - Check: resource.ComposeAggregateTestCheckFunc(checks...), - }, - { - ResourceName: resourceName, - ImportState: true, - }, - { - Config: testAccCoralogixResourceAlertFLow(&updatedAlert), - Check: resource.ComposeAggregateTestCheckFunc(updatedAlertChecks...), - }, - }, - }) -} - -func getRandomAlert() *alertCommonTestParams { - return &alertCommonTestParams{ - name: acctest.RandomWithPrefix("tf-acc-test"), - description: acctest.RandomWithPrefix("tf-acc-test"), - webhookID: "10761", - emailRecipients: []string{"user@example.com"}, - searchQuery: "remote_addr_enriched:/.*/", - severity: selectRandomlyFromSlice(alertValidSeverities), - activeWhen: randActiveWhen(), - notifyEveryMin: acctest.RandIntRange(2160 /*to avoid notify_every < condition.0.time_window*/, 3600), - notifyOn: selectRandomlyFromSlice(validNotifyOn), - alertFilters: alertFilters{ - severities: selectManyRandomlyFromSlice(alertValidLogSeverities), - }, - } -} - -func extractStandardAlertChecks(alert standardAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "standard") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "meta_labels.alert_type", "security"), - resource.TestCheckResourceAttr(alertResourceName, "meta_labels.security_severity", "high"), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.threshold", strconv.Itoa(alert.occurrencesThreshold)), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.less_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "true"), - resource.TestCheckResourceAttr(alertResourceName, "standard.0.condition.0.manage_undetected_values.0.auto_retire_ratio", alert.deadmanRatio), - ) - return checks -} - -func extractRatioAlertChecks(alert ratioAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "ratio.0.query_1") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.query_2.0.search_query", alert.q2SearchQuery), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.more_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.ratio_threshold", fmt.Sprintf("%f", alert.ratio)), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.group_by_q1", "true"), - resource.TestCheckResourceAttr(alertResourceName, "ratio.0.condition.0.ignore_infinity", fmt.Sprintf("%t", alert.ignoreInfinity)), - ) - checks = appendSeveritiesCheck(checks, alert.alertFilters.severities, "ratio.0.query_2") - - return checks -} - -func extractNewValueChecks(alert newValueAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "new_value") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "new_value.0.condition.0.key_to_track", alert.keyToTrack), - resource.TestCheckResourceAttr(alertResourceName, "new_value.0.condition.0.time_window", alert.timeWindow), - ) - return checks -} - -func extractUniqueCountAlertChecks(alert uniqueCountAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "unique_count") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.unique_count_key", alert.uniqueCountKey), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.unique_count_key", alert.uniqueCountKey), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.max_unique_values", strconv.Itoa(alert.maxUniqueValues)), - resource.TestCheckResourceAttr(alertResourceName, "unique_count.0.condition.0.max_unique_values_for_group_by", strconv.Itoa(alert.maxUniqueValuesForGroupBy)), - ) - return checks -} - -func extractTimeRelativeChecks(alert timeRelativeAlertTestParams) []resource.TestCheckFunc { - checks := extractCommonChecks(&alert.alertCommonTestParams, "time_relative") - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.ratio_threshold", strconv.Itoa(alert.ratioThreshold)), - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.relative_time_window", alert.relativeTimeWindow), - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "time_relative.0.condition.0.ignore_infinity", fmt.Sprintf("%t", alert.ignoreInfinity)), - ) - - return checks -} - -func extractLuceneMetricChecks(alert metricLuceneAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.search_query", alert.searchQuery), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.metric_field", alert.metricField), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.arithmetic_operator", alert.arithmeticOperator), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.less_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.threshold", strconv.Itoa(alert.threshold)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.arithmetic_operator_modifier", strconv.Itoa(alert.arithmeticOperatorModifier)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.sample_threshold_percentage", strconv.Itoa(alert.sampleThresholdPercentage)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.group_by.0", alert.groupBy[0]), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.lucene.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "false"), - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractMetricPromqlAlertChecks(alert metricPromqlAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.search_query", "http_requests_total{status!~\"4..\"}"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.threshold", strconv.Itoa(alert.threshold)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.sample_threshold_percentage", strconv.Itoa(alert.sampleThresholdPercentage)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.min_non_null_values_percentage", strconv.Itoa(alert.nonNullPercentage)), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.time_window", alert.timeWindow), - } - if alert.condition == "less_than" { - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.less_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.manage_undetected_values.0.enable_triggering_on_undetected_values", "true"), - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.manage_undetected_values.0.auto_retire_ratio", "Never"), - ) - } else { - checks = append(checks, - resource.TestCheckResourceAttr(alertResourceName, "metric.0.promql.0.condition.0.more_than", "true"), - ) - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractTracingAlertChecks(alert tracingAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.latency_threshold_milliseconds", fmt.Sprintf("%.3f", alert.conditionLatencyMs)), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.more_than", "true"), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.time_window", alert.timeWindow), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.condition.0.threshold", strconv.Itoa(alert.occurrencesThreshold)), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.applications.0", "nginx"), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.subsystems.0", "subsystem-name"), - resource.TestCheckResourceAttr(alertResourceName, "tracing.0.tag_filter.0.field", "Status"), - resource.TestCheckTypeSetElemAttr(alertResourceName, "tracing.0.tag_filter.0.values.*", "filter:contains:400"), - resource.TestCheckTypeSetElemAttr(alertResourceName, "tracing.0.tag_filter.0.values.*", "500"), - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractFlowAlertChecks(alert flowAlertTestParams) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.notify_on", alert.notifyOn), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.retriggering_period_minutes", strconv.Itoa(alert.notifyEveryMin)), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.0.sub_alerts.0.operator", "OR"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.0.next_operator", "OR"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.sub_alerts.0.operator", "AND"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.sub_alerts.0.flow_alert.0.not", "true"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.group.1.next_operator", "AND"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.stage.0.time_window.0.minutes", "20"), - resource.TestCheckResourceAttr(alertResourceName, "flow.0.group_by.0", "coralogix.metadata.sdkId"), - } - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - return checks -} - -func extractCommonChecks(alert *alertCommonTestParams, alertType string) []resource.TestCheckFunc { - checks := []resource.TestCheckFunc{ - resource.TestCheckResourceAttrSet(alertResourceName, "id"), - resource.TestCheckResourceAttr(alertResourceName, "enabled", "true"), - resource.TestCheckResourceAttr(alertResourceName, "name", alert.name), - resource.TestCheckResourceAttr(alertResourceName, "description", alert.description), - resource.TestCheckResourceAttr(alertResourceName, "severity", alert.severity), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "integration_id": alert.webhookID, - }), - resource.TestCheckTypeSetElemNestedAttrs(alertResourceName, "notifications_group.0.notification.*", - map[string]string{ - "email_recipients.0": alert.emailRecipients[0], - }), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.notify_on", alert.notifyOn), - resource.TestCheckResourceAttr(alertResourceName, "incident_settings.0.retriggering_period_minutes", strconv.Itoa(alert.notifyEveryMin)), - resource.TestCheckResourceAttr(alertResourceName, fmt.Sprintf("%s.0.search_query", alertType), alert.searchQuery), - } - - checks = appendSchedulingChecks(checks, alert.daysOfWeek, alert.activityStarts, alert.activityEnds) - - checks = appendSeveritiesCheck(checks, alert.alertFilters.severities, alertType) - - return checks -} - -func appendSeveritiesCheck(checks []resource.TestCheckFunc, severities []string, alertType string) []resource.TestCheckFunc { - for _, s := range severities { - checks = append(checks, - resource.TestCheckTypeSetElemAttr(alertResourceName, fmt.Sprintf("%s.0.severities.*", alertType), s)) - } - return checks -} - -func appendSchedulingChecks(checks []resource.TestCheckFunc, daysOfWeek []string, startTime, endTime string) []resource.TestCheckFunc { - for _, d := range daysOfWeek { - checks = append(checks, resource.TestCheckTypeSetElemAttr(alertResourceName, "scheduling.0.time_frame.0.days_enabled.*", d)) - } - checks = append(checks, resource.TestCheckResourceAttr(alertResourceName, "scheduling.0.time_frame.0.start_time", startTime)) - checks = append(checks, resource.TestCheckResourceAttr(alertResourceName, "scheduling.0.time_frame.0.end_time", endTime)) - return checks -} - -func testAccCheckAlertDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*clientset.ClientSet).Alerts() - - ctx := context.TODO() - - for _, rs := range s.RootModule().Resources { - if rs.Type != "coralogix_alert" { - continue - } - - req := &alertsv1.GetAlertByUniqueIdRequest{ - Id: wrapperspb.String(rs.Primary.ID), - } - - resp, err := client.GetAlert(ctx, req) - if err == nil { - if resp.Alert.Id.Value == rs.Primary.ID { - return fmt.Errorf("alert still exists: %s", rs.Primary.ID) - } - } - } - - return nil -} - -func testAccCoralogixResourceAlertStandard(a *standardAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification { - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - meta_labels = { - alert_type = "security" - security_severity = "high" - } - - standard { - severities = %s - search_query = "%s" - condition { - group_by = %s - less_than = true - threshold = %d - time_window = "%s" - manage_undetected_values { - enable_triggering_on_undetected_values = true - auto_retire_ratio = "%s" - } - } - } -} -`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, sliceToString(a.groupBy), a.occurrencesThreshold, a.timeWindow, a.deadmanRatio) -} - -func testAccCoralogixResourceAlertRatio(a *ratioAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification { - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - ratio { - query_1 { - severities = %s - search_query = "%s" - } - query_2 { - severities = %s - search_query = "%s" - } - condition { - more_than = true - ratio_threshold = %f - time_window = "%s" - group_by = %s - group_by_q1 = true - ignore_infinity = %t - } - } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, sliceToString(a.q2Severities), a.q2SearchQuery, - a.ratio, a.timeWindow, sliceToString(a.groupBy), a.ignoreInfinity) -} - -func testAccCoralogixResourceAlertNewValue(a *newValueAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - new_value { - severities = %s - search_query = "%s" - condition { - key_to_track = "%s" - time_window = "%s" - } - } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, a.keyToTrack, a.timeWindow) -} - -func testAccCoralogixResourceAlertUniqueCount(a *uniqueCountAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - group_by_fields = %s - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - unique_count { - severities = %s - search_query = "%s" - condition { - unique_count_key = "%s" - max_unique_values = %d - time_window = "%s" - group_by_key = "%s" - max_unique_values_for_group_by = %d - } - } -}`, - a.name, a.description, a.severity, sliceToString([]string{a.groupByKey}), a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, sliceToString(a.severities), - a.searchQuery, a.uniqueCountKey, a.maxUniqueValues, a.timeWindow, a.groupByKey, a.maxUniqueValuesForGroupBy) -} - -func testAccCoralogixResourceAlertTimeRelative(a *timeRelativeAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - time_relative { - severities = %s - search_query = "%s" - condition { - more_than = true - group_by = %s - ratio_threshold = %d - relative_time_window = "%s" - ignore_infinity = %t - } - } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - sliceToString(a.severities), a.searchQuery, sliceToString(a.groupBy), a.ratioThreshold, a.relativeTimeWindow, a.ignoreInfinity) -} - -func testAccCoralogixResourceAlertMetricLucene(a *metricLuceneAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - metric { - lucene { - search_query = "%s" - condition { - metric_field = "%s" - arithmetic_operator = "%s" - less_than = true - threshold = %d - arithmetic_operator_modifier = %d - sample_threshold_percentage = %d - time_window = "%s" - group_by = %s - manage_undetected_values{ - enable_triggering_on_undetected_values = false - } - } - } - } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, a.searchQuery, a.metricField, a.arithmeticOperator, - a.threshold, a.arithmeticOperatorModifier, a.sampleThresholdPercentage, a.timeWindow, sliceToString(a.groupBy)) -} - -func testAccCoralogixResourceAlertMetricPromql(a *metricPromqlAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - metric { - promql { - search_query = "http_requests_total{status!~\"4..\"}" - condition { - %s = true - threshold = %d - sample_threshold_percentage = %d - time_window = "%s" - min_non_null_values_percentage = %d - } - } - } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, a.condition, a.threshold, a.sampleThresholdPercentage, - a.timeWindow, a.nonNullPercentage) -} - -func testAccCoralogixResourceAlertTracing(a *tracingAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - tracing { - latency_threshold_milliseconds = %f - applications = ["nginx"] - subsystems = ["subsystem-name"] - tag_filter { - field = "Status" - values = ["filter:contains:400", "500"] - } - - condition { - more_than = true - time_window = "%s" - threshold = %d - } - } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds, - a.conditionLatencyMs, a.timeWindow, a.occurrencesThreshold) -} - -func testAccCoralogixResourceAlertFLow(a *flowAlertTestParams) string { - return fmt.Sprintf(`resource "coralogix_alert" "standard_alert" { - name = "standard" - severity = "Info" - - notifications_group { - notification { - email_recipients = ["example@coralogix.com"] - retriggering_period_minutes = 1 - notify_on = "Triggered_only" - } - } - - standard { - condition { - more_than = true - threshold = 5 - time_window = "30Min" - group_by = ["coralogix.metadata.sdkId"] - } - } -} - - resource "coralogix_alert" "test" { - name = "%s" - description = "%s" - severity = "%s" - - notifications_group { - notification { - integration_id = "%s" - } - notification{ - email_recipients = %s - } - } - - incident_settings { - notify_on = "%s" - retriggering_period_minutes = %d - } - - scheduling { - time_zone = "%s" - time_frame { - days_enabled = %s - start_time = "%s" - end_time = "%s" - } - } - - flow { - stage { - group { - sub_alerts { - operator = "OR" - flow_alert{ - user_alert_id = coralogix_alert.standard_alert.id - } - } - next_operator = "OR" - } - group { - sub_alerts { - operator = "AND" - flow_alert{ - not = true - user_alert_id = coralogix_alert.standard_alert.id - } - } - next_operator = "AND" - } - time_window { - minutes = 20 - } - } - stage { - group { - sub_alerts { - operator = "AND" - flow_alert { - user_alert_id = coralogix_alert.standard_alert.id - } - flow_alert { - not = true - user_alert_id = coralogix_alert.standard_alert.id - } - } - next_operator = "OR" - } - } - group_by = ["coralogix.metadata.sdkId"] - } -}`, - a.name, a.description, a.severity, a.webhookID, sliceToString(a.emailRecipients), a.notifyOn, a.notifyEveryMin, a.timeZone, - sliceToString(a.daysOfWeek), a.activityStarts, a.activityEnds) -} - -type standardAlertTestParams struct { - groupBy []string - occurrencesThreshold int - timeWindow string - deadmanRatio string - alertCommonTestParams -} - -type ratioAlertTestParams struct { - q2Severities, groupBy []string - ratio float64 - timeWindow, q2SearchQuery string - ignoreInfinity bool - alertCommonTestParams -} - -type newValueAlertTestParams struct { - keyToTrack, timeWindow string - alertCommonTestParams -} - -type uniqueCountAlertTestParams struct { - uniqueCountKey, timeWindow, groupByKey string - maxUniqueValues, maxUniqueValuesForGroupBy int - alertCommonTestParams -} - -type timeRelativeAlertTestParams struct { - alertCommonTestParams - ratioThreshold int - relativeTimeWindow string - groupBy []string - ignoreInfinity bool -} - -type metricLuceneAlertTestParams struct { - alertCommonTestParams - groupBy []string - metricField, timeWindow, arithmeticOperator string - threshold, arithmeticOperatorModifier, sampleThresholdPercentage int -} - -type metricPromqlAlertTestParams struct { - alertCommonTestParams - threshold, nonNullPercentage, sampleThresholdPercentage int - timeWindow string - condition string -} - -type tracingAlertTestParams struct { - alertCommonTestParams - occurrencesThreshold int - conditionLatencyMs float64 - timeWindow string - groupBy []string -} - -type flowAlertTestParams struct { - name, description, severity string - emailRecipients []string - webhookID string - notifyEveryMin int - notifyOn string - activeWhen -} - -type alertCommonTestParams struct { - name, description, severity string - webhookID string - emailRecipients []string - notifyEveryMin int - notifyOn string - searchQuery string - alertFilters - activeWhen -} - -type alertFilters struct { - severities []string -} - -type activeWhen struct { - daysOfWeek []string - activityStarts, activityEnds, timeZone string -} - -func randActiveWhen() activeWhen { - return activeWhen{ - timeZone: selectRandomlyFromSlice(validTimeZones), - daysOfWeek: selectManyRandomlyFromSlice(alertValidDaysOfWeek), - activityStarts: randHourStr(), - activityEnds: randHourStr(), - } -} - -func randHourStr() string { - return fmt.Sprintf("%s:%s", - toTwoDigitsFormat(int32(acctest.RandIntRange(0, 24))), - toTwoDigitsFormat(int32(acctest.RandIntRange(0, 60)))) -}