Skip to content

Commit

Permalink
Merge branch 'master' into feature/tf_gluedb
Browse files Browse the repository at this point in the history
  • Loading branch information
rpoluri authored Feb 26, 2025
2 parents 7d9e8fa + e62a45b commit 5d2556c
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 24 deletions.
6 changes: 5 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@ All notable changes to this project will be documented in this file.

The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).

## [7.9.3] - 2025-02-24
## [7.9.4] - 2025-02-26
### Added
- Add option to provision glue databases using terraform instead of metastore startup.

## [7.9.3] - 2025-02-25
### Fixed
- Merged all S3 lifecycle configurations into `aws_s3_bucket_lifecycle_configuration` object.

## [7.9.2] - 2025-02-24
### Fixed
- Fix gluesync on k8s deployment
Expand Down
44 changes: 21 additions & 23 deletions s3.tf
Original file line number Diff line number Diff line change
Expand Up @@ -50,33 +50,12 @@ resource "aws_s3_bucket" "apiary_data_bucket" {
target_bucket = local.enable_apiary_s3_log_management ? aws_s3_bucket.apiary_managed_logs_bucket[0].id : var.apiary_log_bucket
target_prefix = "${var.apiary_log_prefix}${each.value["data_bucket"]}/"
}

lifecycle_rule {
id = "cost_optimization"
enabled = true

abort_incomplete_multipart_upload_days = var.s3_lifecycle_abort_incomplete_multipart_upload_days

dynamic "transition" {
for_each = each.value["s3_object_expiration_days_num"] == "-1" || each.value["s3_lifecycle_policy_transition_period"] < each.value["s3_object_expiration_days_num"] ? [1] : []
content {
days = each.value["s3_lifecycle_policy_transition_period"]
storage_class = each.value["s3_storage_class"]
}
}

dynamic "expiration" {
for_each = each.value["s3_object_expiration_days_num"] != "-1" ? [1] : []
content {
days = each.value["s3_object_expiration_days_num"]
}
}
}
}

resource "aws_s3_bucket_versioning" "apiary_data_bucket_versioning" {
for_each = {
for schema in local.schemas_info : "${schema["schema_name"]}" => schema
if lookup(schema, "s3_versioning_enabled", "") != ""
}
bucket = each.value["data_bucket"]
versioning_configuration {
Expand All @@ -89,7 +68,7 @@ resource "aws_s3_bucket_lifecycle_configuration" "apiary_data_bucket_versioning_
for schema in local.schemas_info : "${schema["schema_name"]}" => schema
}
bucket = each.value["data_bucket"]
# Rule enabled when expiration max days is set
# Rule for s3 versioning expiration
rule {
id = "expire-noncurrent-versions-days"
status = lookup(each.value, "s3_versioning_enabled", "") != "" ? "Enabled" : "Disabled"
Expand All @@ -98,6 +77,25 @@ resource "aws_s3_bucket_lifecycle_configuration" "apiary_data_bucket_versioning_
noncurrent_days = tonumber(lookup(each.value, "s3_versioning_expiration_days", var.s3_versioning_expiration_days))
}
}
# Rule s3 intelligent tiering transition
rule {
id = "cost_optimization_transition"
status = each.value["s3_object_expiration_days_num"] == "-1" || each.value["s3_lifecycle_policy_transition_period"] < each.value["s3_object_expiration_days_num"] ? "Enabled" : "Disabled"

transition {
days = each.value["s3_lifecycle_policy_transition_period"]
storage_class = each.value["s3_storage_class"]
}
}
# Rule s3 object expiration
rule {
id = "cost_optimization_expiration"
status = each.value["s3_object_expiration_days_num"] != "-1" ? "Enabled" : "Disabled"

expiration {
days = each.value["s3_object_expiration_days_num"]
}
}
}

resource "aws_s3_bucket_inventory" "apiary_bucket" {
Expand Down

0 comments on commit 5d2556c

Please sign in to comment.