diff --git a/terraform/README.md b/terraform/README.md index df6c1e19e..ba87802eb 100644 --- a/terraform/README.md +++ b/terraform/README.md @@ -1,6 +1,8 @@ # Terraform Configuration -Most everything that is part of UNIVAF runs in AWS, and the configuration for pretty much everything is stored here as [Terraform][] code. Whenever these files are changed, Terraform Cloud (a Terraform as a service offering) will pick up on it and develop a plan for what needs to actually change in AWS and, if the commit is on the `main` branch, automatically apply those changes. +The production UNIVAF service at [getmyvax.org](https://getmyvax.org) was shut down on June 15, 2023. The remaining configuration code here supports historical archives and a shutdown notice page. If you are planning to deploy your own copy of UNIVAF to AWS, you can use the former production Terraform code in the [`./deprecated`](./deprecated) folder as guide. + +Whenever these files are changed, Terraform Cloud (a Terraform as a service offering) will pick up on it and develop a plan for what needs to actually change in AWS and, if the commit is on the `main` branch, automatically apply those changes. For more on Terraform configuration files, check out the [reference docs][terraform-docs]. diff --git a/terraform/deprecated/api-autoscaling.tf b/terraform/deprecated/api-autoscaling.tf new file mode 100644 index 000000000..8e1bf33c2 --- /dev/null +++ b/terraform/deprecated/api-autoscaling.tf @@ -0,0 +1,92 @@ +# Scale the API service up and down depending on usage. +# +# The target defines the resource to be scaled and its limits, while the +# policies do the actual scaling (and define how much and how often to do so), +# and the alarms ultimately define the conditions under which to scale and call +# the policies when those conditions are met. + +resource "aws_appautoscaling_target" "target" { + service_namespace = "ecs" + resource_id = "service/${aws_ecs_cluster.main.name}/${aws_ecs_service.api_service.name}" + scalable_dimension = "ecs:service:DesiredCount" + min_capacity = 2 + max_capacity = 4 +} + +# Automatically scale capacity up by one +resource "aws_appautoscaling_policy" "up" { + name = "api_scale_up" + service_namespace = aws_appautoscaling_target.target.service_namespace + resource_id = aws_appautoscaling_target.target.resource_id + scalable_dimension = aws_appautoscaling_target.target.scalable_dimension + + step_scaling_policy_configuration { + adjustment_type = "ChangeInCapacity" + cooldown = 60 + metric_aggregation_type = "Maximum" + + step_adjustment { + metric_interval_lower_bound = 0 + scaling_adjustment = 1 + } + } +} + +# Automatically scale capacity down by one +resource "aws_appautoscaling_policy" "down" { + name = "api_scale_down" + service_namespace = aws_appautoscaling_target.target.service_namespace + resource_id = aws_appautoscaling_target.target.resource_id + scalable_dimension = aws_appautoscaling_target.target.scalable_dimension + + step_scaling_policy_configuration { + adjustment_type = "ChangeInCapacity" + cooldown = 60 + metric_aggregation_type = "Maximum" + + step_adjustment { + metric_interval_upper_bound = 0 + scaling_adjustment = -1 + } + } +} + +# CloudWatch alarm that triggers the autoscaling up policy +resource "aws_cloudwatch_metric_alarm" "service_cpu_high" { + alarm_name = "api_cpu_utilization_high" + comparison_operator = "GreaterThanOrEqualToThreshold" + datapoints_to_alarm = "1" + evaluation_periods = "1" + metric_name = "CPUUtilization" + namespace = "AWS/ECS" + period = "60" + statistic = "Average" + threshold = "55" + + dimensions = { + ClusterName = aws_ecs_cluster.main.name + ServiceName = aws_ecs_service.api_service.name + } + + alarm_actions = [aws_appautoscaling_policy.up.arn] +} + +# CloudWatch alarm that triggers the autoscaling down policy +resource "aws_cloudwatch_metric_alarm" "service_cpu_low" { + alarm_name = "api_cpu_utilization_low" + comparison_operator = "LessThanOrEqualToThreshold" + datapoints_to_alarm = "2" + evaluation_periods = "2" + metric_name = "CPUUtilization" + namespace = "AWS/ECS" + period = "60" + statistic = "Average" + threshold = "15" + + dimensions = { + ClusterName = aws_ecs_cluster.main.name + ServiceName = aws_ecs_service.api_service.name + } + + alarm_actions = [aws_appautoscaling_policy.down.arn] +} diff --git a/terraform/deprecated/api-domains.tf b/terraform/deprecated/api-domains.tf new file mode 100644 index 000000000..0b4d229db --- /dev/null +++ b/terraform/deprecated/api-domains.tf @@ -0,0 +1,195 @@ +# Domains and CDN/Caching Layers +# +# The DNS zone (defined by the `domain_name` variable) should be manually +# created in the AWS console, but all the records for the domain and subdomains +# are managed here in code. +# +# The domains all point to CloudFront distributions for caching and DOS +# protection. These are only turned on if there is also an SSL certificate +# (set in the `ssl_certificate_arn` variable, and which also needs to be +# created manually in the AWS console). + +locals { + # The domain of the API service's load balancer (not for public use). + api_internal_subdomain = "api.internal" + api_internal_domain = ( + var.domain_name != "" + ? "${local.api_internal_subdomain}.${var.domain_name}" + : "" + ) + + # Domain at which to serve archived, historical data (stored in S3). + data_snapshots_subdomain = "archives" + data_snapshots_domain = ( + var.domain_name != "" + ? "${local.data_snapshots_subdomain}.${var.domain_name}" + : "" + ) +} + +# Domain DNS Recods ----------------------------------------------------------- + +data "aws_route53_zone" "domain_zone" { + count = var.domain_name != "" ? 1 : 0 + name = var.domain_name +} + +# DNS record for the domain specified in the `domain_name` variable. +resource "aws_route53_record" "api_domain_record" { + count = var.domain_name != "" ? 1 : 0 + + zone_id = data.aws_route53_zone.domain_zone[0].zone_id + name = var.domain_name + type = "A" + + alias { + name = aws_cloudfront_distribution.univaf_api_ecs[0].domain_name + zone_id = aws_cloudfront_distribution.univaf_api_ecs[0].hosted_zone_id + evaluate_target_health = false + } +} + +# The `www.` subdomain. It is an alias for the primary domain name. +resource "aws_route53_record" "api_www_domain_record" { + count = var.domain_name != "" ? 1 : 0 + zone_id = data.aws_route53_zone.domain_zone[0].zone_id + name = "www" + type = "CNAME" + records = [var.domain_name] + ttl = 300 +} + +# The `api.internal` subdomain. Used for the API service's load balancer so it +# can be secured with HTTPS. +resource "aws_route53_record" "api_load_balancer_domain_record" { + count = var.domain_name != "" ? 1 : 0 + + zone_id = data.aws_route53_zone.domain_zone[0].zone_id + name = local.api_internal_subdomain + type = "A" + + alias { + name = aws_alb.main.dns_name + zone_id = aws_alb.main.zone_id + evaluate_target_health = false + } +} + +# The `ecs.` subdomain. +# This specifically points to the deployment on ECS (as opposed to a possible +# external host). +resource "aws_route53_record" "api_ecs_domain_record" { + count = var.domain_name != "" ? 1 : 0 + + zone_id = data.aws_route53_zone.domain_zone[0].zone_id + name = "ecs" + type = "A" + + alias { + name = aws_cloudfront_distribution.univaf_api_ecs[0].domain_name + zone_id = aws_cloudfront_distribution.univaf_api_ecs[0].hosted_zone_id + evaluate_target_health = false + } +} + + +# CloudFront ------------------------------------------------------------------ + +# Use CloudFront as a caching layer in front of the API server that's running +# in ECS. Enabled only if var.domain and var.ssl_certificate_arn are provided. +resource "aws_cloudfront_distribution" "univaf_api_ecs" { + count = ( + var.domain_name != "" + && var.ssl_certificate_arn != "" ? 1 : 0 + ) + enabled = true + price_class = "PriceClass_100" # North America + aliases = [ + var.domain_name, + "www.${var.domain_name}", + "ecs.${var.domain_name}" + ] + http_version = "http2and3" + + origin { + origin_id = "ecs.${var.domain_name}" + domain_name = local.api_internal_domain + + custom_header { + name = var.api_cloudfront_secret_header_name + value = var.api_cloudfront_secret + } + + custom_origin_config { + http_port = 80 + https_port = 443 + origin_ssl_protocols = ["SSLv3", "TLSv1", "TLSv1.1", "TLSv1.2"] + origin_protocol_policy = "https-only" + } + } + + default_cache_behavior { + allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] + cached_methods = ["GET", "HEAD"] + target_origin_id = "ecs.${var.domain_name}" + viewer_protocol_policy = "redirect-to-https" + min_ttl = 0 + max_ttl = 3600 + + forwarded_values { + headers = ["Host", "Origin", "Authorization", "x-api-key"] + query_string = true + + cookies { + forward = "none" + } + } + } + + viewer_certificate { + acm_certificate_arn = var.ssl_certificate_arn + ssl_support_method = "sni-only" + } + + restrictions { + geo_restriction { + restriction_type = "none" + } + } +} + +# Provide a protective caching layer and a nice domain name for the S3 bucket +# with historical data. (Allowing direct public access can get expensive.) +# Docs: https://github.com/cloudposse/terraform-aws-cloudfront-s3-cdn +module "univaf_data_snaphsots_cdn" { + count = ( + var.domain_name != "" + && var.ssl_certificate_arn != "" ? 1 : 0 + ) + source = "cloudposse/cloudfront-s3-cdn/aws" + version = "0.90.0" + + origin_bucket = aws_s3_bucket.data_snapshots.bucket + dns_alias_enabled = true + aliases = [local.data_snapshots_domain] + parent_zone_id = data.aws_route53_zone.domain_zone[0].zone_id + acm_certificate_arn = var.ssl_certificate_arn + cloudfront_access_logging_enabled = false + + default_ttl = 60 * 60 * 24 * 7 # 1 Week + http_version = "http2and3" + allowed_methods = ["GET", "HEAD", "OPTIONS"] + cached_methods = ["GET", "HEAD", "OPTIONS"] + # By default, CORS headers are forwarded, but we don't really care about them + # since the bucket is not operating in "website" mode. + forward_header_values = [] + + # HACK: this module creates bad values if you don't explicitly set one or + # more of namespace, environment, stage, name, or attributes. + # Basically, Cloud Posse modules generate an internal ID from the above, + # and that ID is used for lots of things. Bad stuff happens if it is empty. + # This issue is marked as closed, but is not actually solved: + # https://github.com/cloudposse/terraform-aws-cloudfront-s3-cdn/issues/151 + namespace = "cp" + name = "univaf_data_snaphsots_cdn" +} diff --git a/terraform/deprecated/api.tf b/terraform/deprecated/api.tf new file mode 100644 index 000000000..72b8ecbd4 --- /dev/null +++ b/terraform/deprecated/api.tf @@ -0,0 +1,264 @@ +# API Service +# +# The API server (code in the `server` directory) runs as a service on ECS. It +# serves the public API and receives updates from the loaders. +# +# The API also has a scheduled job that runs once a day to archive the state of +# the database and log all the updates it received that day. That job is also +# a task that runs on ECS (but just a task that runs to completion, not a +# service that ECS keeps running). + +# Only HTTP(S) traffic should go through the API server's load balancer. +resource "aws_security_group" "lb" { + name = "univaf-api-load-balancer-security-group" + description = "Controls access to the API server load balancer" + vpc_id = aws_vpc.main.id + + # HTTP + ingress { + protocol = "tcp" + from_port = 80 + to_port = 80 + cidr_blocks = ["0.0.0.0/0"] + } + + # HTTPS + ingress { + protocol = "tcp" + from_port = 443 + to_port = 443 + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + protocol = "-1" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] + } +} + +# Traffic to the tasks that run the the API server in ECS should only accept +# traffic from the load balancer; the public internet and other resources in AWS +# should not be able to route directly to them. +resource "aws_security_group" "api_server_tasks" { + name = "univaf-api-server-tasks-security-group" + description = "Allow inbound access only from the load balancer" + vpc_id = aws_vpc.main.id + + ingress { + protocol = "tcp" + from_port = var.api_port + to_port = var.api_port + security_groups = [aws_security_group.lb.id] + } + + egress { + protocol = "-1" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] + } +} + +# API Service ----------------------------------------------------------------- + +# The actual task that runs on ECS. +module "api_task" { + source = "./modules/task" + + name = "api" + image = "${aws_ecr_repository.server_repository.repository_url}:${var.api_release_version}" + role = aws_iam_role.ecs_task_execution_role.arn + # Only certain CPU/Memory combinations are allowed. See: + # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#fargate-tasks-size + cpu = var.api_cpu + memory = var.api_memory + port = var.api_port + + # Enable Datadog + datadog_enabled = true + datadog_api_key = var.datadog_api_key + + env_vars = { + RELEASE = var.api_release_version + DB_HOST = module.db.host + DB_NAME = module.db.db_name + DB_USERNAME = var.db_user + DB_PASSWORD = var.db_password + DB_POOL_SIZE_DATA = format("%d", var.api_db_pool_size_data) + DB_POOL_SIZE_AVAILABILITY = format("%d", var.api_db_pool_size_availability) + API_KEYS = join(",", var.api_keys) + SENTRY_DSN = var.api_sentry_dsn + SENTRY_TRACES_SAMPLE_RATE = format("%.2f", var.api_sentry_traces_sample_rate) + PRIMARY_HOST = var.domain_name + API_SUNSET_DATE = var.api_sunset_date + } + + depends_on = [aws_alb_listener.front_end, aws_iam_role_policy_attachment.ecs_task_execution_role] +} + +# The service's load balancer. +resource "aws_alb" "main" { + name = "api-load-balancer" + subnets = aws_subnet.public.*.id + security_groups = [aws_security_group.lb.id] + drop_invalid_header_fields = true +} + +resource "aws_alb_target_group" "api" { + name = "api-target-group" + port = var.api_port + protocol = "HTTP" + vpc_id = aws_vpc.main.id + target_type = "ip" + + health_check { + healthy_threshold = "3" + interval = "30" + protocol = "HTTP" + matcher = "200" + timeout = "3" + path = var.api_health_check_path + unhealthy_threshold = "2" + } +} + +# Redirect all traffic from the ALB to the target group +resource "aws_alb_listener" "front_end" { + load_balancer_arn = aws_alb.main.id + port = 80 + protocol = "HTTP" + + default_action { + type = "redirect" + + redirect { + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +# Redirect all traffic from the ALB to the target group +resource "aws_alb_listener" "front_end_https" { + load_balancer_arn = aws_alb.main.id + port = 443 + protocol = "HTTPS" + ssl_policy = "ELBSecurityPolicy-FS-1-2-Res-2020-10" + certificate_arn = var.ssl_certificate_arn_api_internal + + # Other rules below will forward if the request is OK. + default_action { + type = "fixed-response" + + fixed_response { + content_type = "text/plain" + message_body = "Access Denied" + status_code = "403" + } + } +} + +resource "aws_alb_listener_rule" "redirect_www" { + listener_arn = aws_alb_listener.front_end_https.arn + priority = 10 + + condition { + host_header { + values = ["www.${var.domain_name}"] + } + } + + action { + type = "redirect" + + redirect { + host = var.domain_name + port = "443" + protocol = "HTTPS" + status_code = "HTTP_301" + } + } +} + +# If a special secret is required for access (i.e. to allow only CloudFront and +# not any request directly from the public internet), check it before forwarding +# to the API service. +resource "aws_lb_listener_rule" "api_forward_if_secret_header" { + listener_arn = aws_alb_listener.front_end_https.arn + priority = 20 + + action { + type = "forward" + target_group_arn = aws_alb_target_group.api.arn + } + + # Add a condition requiring a secret header only if there's a secret to check. + dynamic "condition" { + for_each = var.api_cloudfront_secret != "" ? [1] : [] + content { + http_header { + http_header_name = var.api_cloudfront_secret_header_name + values = [var.api_cloudfront_secret] + } + } + } + + # There must be >= 1 condition; this is a no-op in case there's no secret. + condition { + source_ip { + values = ["0.0.0.0/0"] + } + } +} + +# Allow requests in if they have a valid API key. +resource "aws_lb_listener_rule" "api_forward_if_api_key" { + listener_arn = aws_alb_listener.front_end_https.arn + priority = 30 + + action { + type = "forward" + target_group_arn = aws_alb_target_group.api.arn + } + + condition { + http_header { + http_header_name = "x-api-key" + values = var.api_keys + } + } +} + +# This service definition keeps the API server task running, connects it to the +# load balancer, and manages multiple instances. (The actual scaling policies +# are in a separate file.) +resource "aws_ecs_service" "api_service" { + name = "api" + cluster = aws_ecs_cluster.main.id + task_definition = module.api_task.arn + desired_count = 1 # This will get adjusted by autoscaling rules + launch_type = "FARGATE" + + lifecycle { + # Autoscaling rules will tweak this dynamically. Ignore it so Terraform + # doesn't reset things on every run. + ignore_changes = [desired_count] + } + + network_configuration { + security_groups = [aws_security_group.api_server_tasks.id, module.db.access_group_id] + subnets = aws_subnet.public.*.id + assign_public_ip = true + } + + load_balancer { + target_group_arn = aws_alb_target_group.api.id + container_name = "api" + container_port = var.api_port + } + + depends_on = [aws_alb_listener.front_end, aws_iam_role_policy_attachment.ecs_task_execution_role, module.api_task] +} diff --git a/terraform/deprecated/bastion.tf b/terraform/deprecated/bastion.tf new file mode 100644 index 000000000..17edfe386 --- /dev/null +++ b/terraform/deprecated/bastion.tf @@ -0,0 +1,28 @@ +# Bastion Server +# +# To access to services running on a private subnet, you can SSH into the +# "bastion" server (which is in one of the public subnets and can see into the +# private ones) and do your work from that SSH session. +# +# The bastion server itself is manually created, and uses this security group. +resource "aws_security_group" "bastion_security_group" { + + name = "bastion-security" + description = "Allows SSH access to bastion server" + vpc_id = aws_vpc.main.id + + ingress { + description = "" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} diff --git a/terraform/deprecated/cron-data-snapshot.tf b/terraform/deprecated/cron-data-snapshot.tf new file mode 100644 index 000000000..696305fd2 --- /dev/null +++ b/terraform/deprecated/cron-data-snapshot.tf @@ -0,0 +1,35 @@ +# Daily Data Snapshot Cron Job +# +# The "daily data snapshot" task runs once a day to archive the contents of the +# database and update logs to S3 so others can do historical analysis. + +module "daily_data_snapshot_task" { + source = "./modules/task" + + name = "daily-data-snapshot" + image = "${aws_ecr_repository.server_repository.repository_url}:${var.api_release_version}" + command = ["node", "dist/scripts/availability_dump.js", "--write-to-s3", "--clear-log"] + role = aws_iam_role.ecs_task_execution_role.arn + + env_vars = { + DB_HOST = module.db.host + DB_NAME = module.db.db_name + DB_USERNAME = var.db_user + DB_PASSWORD = var.db_password + SENTRY_DSN = var.api_sentry_dsn + DATA_SNAPSHOT_S3_BUCKET = var.data_snapshot_s3_bucket + AWS_ACCESS_KEY_ID = var.data_snapshot_aws_key_id + AWS_SECRET_ACCESS_KEY = var.data_snapshot_aws_secret_key + AWS_DEFAULT_REGION = var.aws_region + } +} + +module "daily_data_snapshot_schedule" { + source = "./modules/schedule" + + schedule = "cron(0 1 * * ? *)" + task = module.daily_data_snapshot_task + cluster_arn = aws_ecs_cluster.main.arn + subnets = aws_subnet.public.*.id + security_groups = [aws_security_group.cron_job_tasks.id, module.db.access_group_id] +} diff --git a/terraform/deprecated/cron-loaders.tf b/terraform/deprecated/cron-loaders.tf new file mode 100644 index 000000000..3b2caa803 --- /dev/null +++ b/terraform/deprecated/cron-loaders.tf @@ -0,0 +1,105 @@ +# Data Loaders +# +# The loader is a script that reads data from various sources (e.g. Walgreens, +# PrepMod, the CDC), transforms the data into UNIVAF's format, and sends it to +# the API server to store in the database. These are basically ETL jobs. +# +# To run it on ECS, we define a separate task that runs the loader for each +# data source on a given schedule (it's possible to run multiple sources at +# once, but keeping them as separate tasks makes management a little easier). +# Some sources have additional CLI options or environment variables (e.g. API +# keys relevant to that source). + +locals { + # Define the loader tasks. The keys name the task, and the values are a map + # that can have: + # - `schedule` (required) a `cron()` or `rate()` expression for when to run. + # - `env_vars` a map of extra environment variables to set. + # - `options` list of extra CLI options to pass to the loader. + # - `sources` list of sources to load. If not set, the key will be used. + # For example, these listings are the same: + # njvss = { schedule = "rate(5 minutes)" } + # njvss = { schedule = "rate(5 minutes)", sources = ["njvss"] } + loaders = { + njvss = { + schedule = "rate(15 minutes)" + env_vars = { + NJVSS_AWS_KEY_ID = var.njvss_aws_key_id + NJVSS_AWS_SECRET_KEY = var.njvss_aws_secret_key + } + }, + waDoh = { schedule = "cron(3/30 * * * ? *)" } + cvsSmart = { schedule = "cron(3/15 * * * ? *)" } + walgreensSmart = { schedule = "cron(2/15 * * * ? *)" } + albertsonsScraper = { schedule = "cron(20/30 * * * ? *)" } + hyvee = { schedule = "cron(8/15 * * * ? *)" } + heb = { schedule = "cron(1/15 * * * ? *)" } + cdcApi = { + schedule = "cron(0 0,12 * * ? *)" + # CDC updates are often slow; set stale threshold to 3 days. + options = ["--stale-threshold", "172800000"] + } + riteAidScraper = { schedule = "cron(5/30 * * * ? *)" } + riteAidApi = { + schedule = "cron(0/15 * * * ? *)" + env_vars = { + RITE_AID_URL = var.rite_aid_api_url + RITE_AID_KEY = var.rite_aid_api_key + } + } + prepmod = { + schedule = "cron(9/15 * * * ? *)" + options = ["--states", "AK,WA", "--hide-missing-locations"] + } + + # Kroger appears to have shut things off entirely. We just want to run them + # enough to know if they start working again. + krogerSmart = { schedule = "cron(0 1/6 * * ? *)" } + } +} + +module "source_loader" { + source = "./modules/task" + for_each = local.loaders + + depends_on = [aws_alb.main] + + name = each.key + command = concat( + ["--filter-stale-data"], + lookup(each.value, "options", []), + lookup(each.value, "sources", [each.key]), + ) + env_vars = merge({ + # NOTE: loaders go directly to the API load balancer, not CloudFront. + API_URL = ( + local.api_internal_domain != "" + ? "https://${local.api_internal_domain}" + : "http://${aws_alb.main.dns_name}" + ) + API_KEY = var.api_keys[0] + API_CONCURRENCY = "5" + DD_API_KEY = var.datadog_api_key + SENTRY_DSN = var.loader_sentry_dsn + }, lookup(each.value, "env_vars", {})) + image = "${aws_ecr_repository.loader_repository.repository_url}:${var.loader_release_version}" + role = aws_iam_role.ecs_task_execution_role.arn + + # Only certain CPU/Memory combinations are allowed. See: + # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html#fargate-tasks-size + cpu = 256 + memory = 512 +} + +module "source_loader_schedule" { + source = "./modules/schedule" + for_each = local.loaders + + schedule = each.value.schedule + task = module.source_loader[each.key] + cluster_arn = aws_ecs_cluster.main.arn + # Loaders do a lot of traffic getting data from external sources on the public + # internet, so they run in our "public" network with an internet gateway. + subnets = aws_subnet.public.*.id + security_groups = [aws_security_group.cron_job_tasks.id] +} diff --git a/terraform/deprecated/cron.tf b/terraform/deprecated/cron.tf new file mode 100644 index 000000000..e86d902b3 --- /dev/null +++ b/terraform/deprecated/cron.tf @@ -0,0 +1,14 @@ +# Cron Job-like tasks on ECS may need to reach out to other services to load +# things, but nothing else should be initiating communication with them. +resource "aws_security_group" "cron_job_tasks" { + name = "univaf-cron-job-tasks-security-group" + description = "No inbound access, in univaf-vpc, for ECS Cron Jobs" + vpc_id = aws_vpc.main.id + + egress { + protocol = "-1" + from_port = 0 + to_port = 0 + cidr_blocks = ["0.0.0.0/0"] + } +} diff --git a/terraform/deprecated/db.tf b/terraform/deprecated/db.tf new file mode 100644 index 000000000..ca3b6f53f --- /dev/null +++ b/terraform/deprecated/db.tf @@ -0,0 +1,22 @@ +# Database on RDS +# +# The application's database is managed through RDS. The definition here +# provides a nice roll-up of settings that are important to manage. See the +# `rds` module for all the guts of how this is implemented in detail. +module "db" { + source = "./modules/rds" + + name = "univaf-db" + database = "univaf" # RDS does not allow hyphens + password = var.db_password + username = var.db_user + + allocated_storage = var.db_size + instance_class = var.db_instance + engine = "postgres" + engine_version = "14" + performance_insights_enabled = true + + vpc_id = aws_vpc.main.id + subnet_ids = aws_subnet.private[*].id +} diff --git a/terraform/deprecated/ecr.tf b/terraform/deprecated/ecr.tf new file mode 100644 index 000000000..1f90869f6 --- /dev/null +++ b/terraform/deprecated/ecr.tf @@ -0,0 +1,70 @@ +# ECR (Elastic Container Registry) +# +# All our code is run as tasks on ECS, which means they need repositories to +# store the container images. Images are built and published to these +# repositories by GitHub Actions. + +locals { + ecr_keep_10_images_policy = <= 0.0 && var.api_sentry_traces_sample_rate <= 1.0 + error_message = "The api_sentry_traces_sample_rate variable must be between 0 and 1." + } +} + +variable "api_sunset_date" { + description = "ISO 8601 Date or Datetime when the API will be turned off." + default = "" +} + +variable "loader_sentry_dsn" { + description = "The Sentry.io DSN to use for the loaders" + default = "" + sensitive = true +} + +variable "datadog_api_key" { + description = "API key for sending metrics to Datadog" + sensitive = true +} + +variable "njvss_aws_key_id" { + sensitive = true +} + +variable "njvss_aws_secret_key" { + sensitive = true +} + +variable "rite_aid_api_url" { + description = "The Rite Aid API URL" + default = "https://api.riteaid.com/digital/Covid19-Vaccine/ProviderDetails" +} + +variable "rite_aid_api_key" { + description = "The Rite Aid API Key" + sensitive = true +} + +# These AWS variables are present to clean up warnings in terraform +variable "AWS_SECRET_ACCESS_KEY" { + default = "" +} + +variable "AWS_ACCESS_KEY_ID" { + default = "" +}