diff --git a/Makefile b/Makefile index e091370b..5c471b8e 100644 --- a/Makefile +++ b/Makefile @@ -51,4 +51,4 @@ help: @echo "" @echo "Options:" @echo " MODULE= Run tests for specific module (e.g., MODULE=frontend)" - @echo " VERBOSE=1 Show output of passing tests (integration tests only)" + @echo " VERBOSE=1 Show output of passing tests (integration tests only)" \ No newline at end of file diff --git a/azure-apps/deployment/modules/README.md b/azure-apps/deployment/modules/README.md new file mode 100644 index 00000000..1e1d8ae2 --- /dev/null +++ b/azure-apps/deployment/modules/README.md @@ -0,0 +1,242 @@ +# Azure App Service Terraform Module + +This Terraform module provisions an Azure App Service with: +- Docker container support +- Custom domain with DNS A record +- Free managed SSL certificate +- Deployment slots for blue-green / canary deployments +- Environment variables from JSON configuration + +## Architecture + +``` + ┌─────────────────────────────────────────┐ + │ Azure DNS Zone │ + │ (example.com) │ + └──────────────┬──────────────────────────┘ + │ + ┌──────────────▼──────────────┐ + │ A Record / CNAME │ + │ api.example.com │ + └──────────────┬──────────────┘ + │ + ┌──────────────▼──────────────┐ + │ Custom Domain Binding │ + │ + Managed SSL Cert │ + └──────────────┬──────────────┘ + │ + ┌─────────────────────────┴─────────────────────────┐ + │ │ + ┌──────────▼──────────┐ ┌──────────────▼──────────┐ + │ Production Slot │ ◄───── Traffic ───────► │ Staging Slot │ + │ (90% traffic) │ Splitting │ (10% traffic) │ + │ │ │ │ + │ ┌───────────────┐ │ │ ┌───────────────┐ │ + │ │ Docker Image │ │ │ │ Docker Image │ │ + │ │ myapp:v1.0.0 │ │ │ │ myapp:v1.1.0 │ │ + │ └───────────────┘ │ │ └───────────────┘ │ + └─────────────────────┘ └─────────────────────────┘ + │ │ + └─────────────────────────┬─────────────────────────┘ + │ + ┌──────────────▼──────────────┐ + │ App Service Plan │ + │ (S1 - Standard) │ + └─────────────────────────────┘ +``` + +## Prerequisites + +1. Azure CLI installed and authenticated (`az login`) +2. Terraform >= 1.0 +3. An existing Azure Resource Group +4. An existing Azure DNS Zone (or you can modify to create one) +5. Docker images pushed to a container registry (ACR, Docker Hub, etc.) + +## Quick Start + +### 1. Clone and configure + +```bash +# Copy the example tfvars +cp terraform.tfvars.example terraform.tfvars + +# Edit with your values +vim terraform.tfvars +``` + +### 2. Set sensitive variables via environment + +```bash +# For Azure Container Registry +export TF_VAR_docker_registry_username="your-acr-username" +export TF_VAR_docker_registry_password="your-acr-password" + +# Or use Azure managed identity (recommended for ACR) +``` + +### 3. Deploy + +```bash +terraform init +terraform plan +terraform apply +``` + +## Variables + +| Name | Description | Type | Required | +|------|-------------|------|----------| +| `resource_group_name` | Name of the resource group | string | yes | +| `app_name` | Globally unique name for the App Service | string | yes | +| `docker_image` | Docker image with tag (e.g., `myapp:v1.0.0`) | string | yes | +| `dns_zone_name` | Azure DNS zone name | string | yes | +| `dns_zone_resource_group` | Resource group containing DNS zone | string | yes | +| `resource_tags` | Tags to apply to resources | map(string) | no | +| `parameter_json` | JSON string with environment variables | string | no | +| `custom_subdomain` | Subdomain (use `@` for apex) | string | no | +| `docker_registry_url` | Registry URL | string | no | +| `sku_name` | App Service Plan SKU | string | no | +| `enable_staging_slot` | Create staging slot | bool | no | + +## Environment Variables (parameter_json) + +Pass environment variables as a JSON string: + +```hcl +parameter_json = < 0 ? 1 : 0 + name = "${var.app_name}-alerts" + resource_group_name = var.resource_group_name + short_name = substr(var.app_name, 0, 12) + + tags = local.common_tags + + dynamic "email_receiver" { + for_each = var.alert_email_recipients + content { + name = "email-${email_receiver.key}" + email_address = email_receiver.value + } + } +} + +# --------------------------------------------------------------------------- +# HTTP 5XX ERRORS ALERT +# --------------------------------------------------------------------------- +resource "azurerm_monitor_metric_alert" "http_5xx" { + count = var.enable_alerts ? 1 : 0 + name = "${var.app_name}-http-5xx" + resource_group_name = var.resource_group_name + scopes = [azurerm_linux_web_app.main.id] + description = "Alert when HTTP 5xx errors exceed threshold" + severity = 2 + frequency = "PT1M" + window_size = "PT5M" + + tags = local.common_tags + + criteria { + metric_namespace = "Microsoft.Web/sites" + metric_name = "Http5xx" + aggregation = "Total" + operator = "GreaterThan" + threshold = var.alert_http_5xx_threshold + } + + dynamic "action" { + for_each = length(var.alert_email_recipients) > 0 ? [1] : [] + content { + action_group_id = azurerm_monitor_action_group.main[0].id + } + } +} + +# --------------------------------------------------------------------------- +# RESPONSE TIME ALERT +# --------------------------------------------------------------------------- +resource "azurerm_monitor_metric_alert" "response_time" { + count = var.enable_alerts ? 1 : 0 + name = "${var.app_name}-response-time" + resource_group_name = var.resource_group_name + scopes = [azurerm_linux_web_app.main.id] + description = "Alert when response time exceeds threshold" + severity = 2 + frequency = "PT1M" + window_size = "PT5M" + + tags = local.common_tags + + criteria { + metric_namespace = "Microsoft.Web/sites" + metric_name = "HttpResponseTime" + aggregation = "Average" + operator = "GreaterThan" + threshold = var.alert_response_time_threshold_ms / 1000 # Convert to seconds + } + + dynamic "action" { + for_each = length(var.alert_email_recipients) > 0 ? [1] : [] + content { + action_group_id = azurerm_monitor_action_group.main[0].id + } + } +} + +# --------------------------------------------------------------------------- +# CPU PERCENTAGE ALERT +# --------------------------------------------------------------------------- +resource "azurerm_monitor_metric_alert" "cpu_percentage" { + count = var.enable_alerts ? 1 : 0 + name = "${var.app_name}-cpu-high" + resource_group_name = var.resource_group_name + scopes = [azurerm_service_plan.main.id] + description = "Alert when CPU percentage exceeds threshold" + severity = 2 + frequency = "PT1M" + window_size = "PT5M" + + tags = local.common_tags + + criteria { + metric_namespace = "Microsoft.Web/serverfarms" + metric_name = "CpuPercentage" + aggregation = "Average" + operator = "GreaterThan" + threshold = var.alert_cpu_percentage_threshold + } + + dynamic "action" { + for_each = length(var.alert_email_recipients) > 0 ? [1] : [] + content { + action_group_id = azurerm_monitor_action_group.main[0].id + } + } +} + +# --------------------------------------------------------------------------- +# MEMORY PERCENTAGE ALERT +# --------------------------------------------------------------------------- +resource "azurerm_monitor_metric_alert" "memory_percentage" { + count = var.enable_alerts ? 1 : 0 + name = "${var.app_name}-memory-high" + resource_group_name = var.resource_group_name + scopes = [azurerm_service_plan.main.id] + description = "Alert when memory percentage exceeds threshold" + severity = 2 + frequency = "PT1M" + window_size = "PT5M" + + tags = local.common_tags + + criteria { + metric_namespace = "Microsoft.Web/serverfarms" + metric_name = "MemoryPercentage" + aggregation = "Average" + operator = "GreaterThan" + threshold = var.alert_memory_percentage_threshold + } + + dynamic "action" { + for_each = length(var.alert_email_recipients) > 0 ? [1] : [] + content { + action_group_id = azurerm_monitor_action_group.main[0].id + } + } +} + +# --------------------------------------------------------------------------- +# HEALTH CHECK ALERT +# --------------------------------------------------------------------------- +resource "azurerm_monitor_metric_alert" "health_check" { + count = var.enable_alerts && var.health_check_path != "" ? 1 : 0 + name = "${var.app_name}-health-check-failed" + resource_group_name = var.resource_group_name + scopes = [azurerm_linux_web_app.main.id] + description = "Alert when health check status is unhealthy" + severity = 1 + frequency = "PT1M" + window_size = "PT5M" + + tags = local.common_tags + + criteria { + metric_namespace = "Microsoft.Web/sites" + metric_name = "HealthCheckStatus" + aggregation = "Average" + operator = "LessThan" + threshold = 100 # 100 = healthy, lower = unhealthy instances + } + + dynamic "action" { + for_each = length(var.alert_email_recipients) > 0 ? [1] : [] + content { + action_group_id = azurerm_monitor_action_group.main[0].id + } + } +} diff --git a/azure-apps/deployment/modules/app_service.tf b/azure-apps/deployment/modules/app_service.tf new file mode 100644 index 00000000..fc34e5b4 --- /dev/null +++ b/azure-apps/deployment/modules/app_service.tf @@ -0,0 +1,128 @@ +# ============================================================================= +# APP SERVICE (Linux Web App) +# ============================================================================= + +resource "azurerm_linux_web_app" "main" { + name = var.app_name + location = var.location + resource_group_name = var.resource_group_name + service_plan_id = azurerm_service_plan.main.id + https_only = var.https_only + client_affinity_enabled = var.client_affinity_enabled + virtual_network_subnet_id = var.enable_vnet_integration ? var.vnet_integration_subnet_id : null + + tags = local.common_tags + + # --------------------------------------------------------------------------- + # SITE CONFIGURATION + # --------------------------------------------------------------------------- + site_config { + always_on = var.always_on + http2_enabled = var.http2_enabled + websockets_enabled = var.websockets_enabled + ftps_state = var.ftps_state + minimum_tls_version = var.minimum_tls_version + vnet_route_all_enabled = var.vnet_route_all_enabled + app_command_line = var.app_command_line != "" ? var.app_command_line : null + + # Health check + health_check_path = var.health_check_path != "" ? var.health_check_path : null + health_check_eviction_time_in_min = var.health_check_path != "" ? var.health_check_eviction_time_in_min : null + + # Docker configuration + # Uses local.effective_docker_image which preserves the current production image + # during blue-green deployments (when preserve_production_image is enabled) + application_stack { + docker_registry_url = var.docker_registry_url + docker_image_name = local.effective_docker_image + docker_registry_username = local.docker_registry_username + docker_registry_password = local.docker_registry_password + } + + # IP restrictions + ip_restriction_default_action = var.ip_restriction_default_action + + dynamic "ip_restriction" { + for_each = var.ip_restrictions + content { + name = ip_restriction.value.name + ip_address = ip_restriction.value.ip_address + service_tag = ip_restriction.value.service_tag + priority = ip_restriction.value.priority + action = ip_restriction.value.action + } + } + + # Auto-heal configuration (only set when enabled - provider requires both attributes together) + auto_heal_enabled = var.enable_auto_heal ? true : null + + dynamic "auto_heal_setting" { + for_each = var.enable_auto_heal ? [1] : [] + content { + trigger { + slow_request { + count = var.auto_heal_slow_request_count + interval = var.auto_heal_slow_request_interval + time_taken = var.auto_heal_slow_request_time_taken + } + + status_code { + count = var.auto_heal_status_code_count + interval = var.auto_heal_status_code_interval + status_code_range = var.auto_heal_status_code_range + } + } + + action { + action_type = "Recycle" + minimum_process_execution_time = var.auto_heal_min_process_time + } + } + } + } + + # --------------------------------------------------------------------------- + # APP SETTINGS (Environment Variables) + # --------------------------------------------------------------------------- + app_settings = local.app_settings + + # --------------------------------------------------------------------------- + # LOGGING + # --------------------------------------------------------------------------- + dynamic "logs" { + for_each = var.enable_logging ? [1] : [] + content { + detailed_error_messages = var.detailed_error_messages + failed_request_tracing = var.failed_request_tracing + + http_logs { + file_system { + retention_in_days = var.http_logs_retention_days + retention_in_mb = var.http_logs_retention_mb + } + } + + application_logs { + file_system_level = var.application_logs_level + } + } + } + + # --------------------------------------------------------------------------- + # IDENTITY + # --------------------------------------------------------------------------- + dynamic "identity" { + for_each = local.identity_type != null ? [1] : [] + content { + type = local.identity_type + identity_ids = length(var.user_assigned_identity_ids) > 0 ? var.user_assigned_identity_ids : null + } + } + + # --------------------------------------------------------------------------- + # STICKY SETTINGS (preserved during slot swap) + # --------------------------------------------------------------------------- + sticky_settings { + app_setting_names = ["SLOT_NAME"] + } +} diff --git a/azure-apps/deployment/modules/app_service.tftest.hcl b/azure-apps/deployment/modules/app_service.tftest.hcl new file mode 100644 index 00000000..8b04197a --- /dev/null +++ b/azure-apps/deployment/modules/app_service.tftest.hcl @@ -0,0 +1,1818 @@ +# ============================================================================= +# Unit tests for azure-apps/deployment/modules +# +# Run: tofu test +# ============================================================================= + +mock_provider "azurerm" { + mock_resource "azurerm_linux_web_app" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/my-test-app" + default_hostname = "my-test-app.azurewebsites.net" + outbound_ip_addresses = "1.2.3.4,5.6.7.8" + outbound_ip_address_list = ["1.2.3.4", "5.6.7.8"] + possible_outbound_ip_addresses = "1.2.3.4,5.6.7.8,9.10.11.12" + custom_domain_verification_id = "abc123" + } + } + + mock_resource "azurerm_service_plan" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/serverFarms/my-test-app-plan" + } + } + + mock_resource "azurerm_linux_web_app_slot" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/my-test-app/slots/staging" + default_hostname = "my-test-app-staging.azurewebsites.net" + } + } + + mock_resource "azurerm_log_analytics_workspace" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.OperationalInsights/workspaces/my-test-app-logs" + } + } + + mock_resource "azurerm_application_insights" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/microsoft.insights/components/my-test-app-insights" + connection_string = "InstrumentationKey=00000000-0000-0000-0000-000000000000" + instrumentation_key = "00000000-0000-0000-0000-000000000000" + } + } + + mock_resource "azurerm_monitor_autoscale_setting" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/microsoft.insights/autoscalesettings/my-test-app-autoscale" + } + } + + mock_resource "azurerm_monitor_diagnostic_setting" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Insights/diagnosticSettings/my-test-app-diagnostics" + } + } + + mock_data "azurerm_dns_zone" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/dns-rg/providers/Microsoft.Network/dnszones/example.com" + } + } + + mock_resource "azurerm_dns_a_record" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/dns-rg/providers/Microsoft.Network/dnszones/example.com/A/@" + } + } + + mock_resource "azurerm_dns_cname_record" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/dns-rg/providers/Microsoft.Network/dnszones/example.com/CNAME/api" + } + } + + mock_resource "azurerm_dns_txt_record" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/dns-rg/providers/Microsoft.Network/dnszones/example.com/TXT/asuid" + } + } + + mock_resource "azurerm_app_service_custom_hostname_binding" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/my-test-app/hostNameBindings/api.example.com" + } + } + + mock_resource "azurerm_app_service_managed_certificate" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/certificates/my-test-app-cert" + } + } + + mock_resource "azurerm_app_service_certificate_binding" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Web/sites/my-test-app/hostNameBindings/api.example.com/certificate" + } + } + + mock_resource "azurerm_monitor_action_group" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/microsoft.insights/actionGroups/my-test-app-alerts" + } + } + + mock_resource "azurerm_monitor_metric_alert" { + defaults = { + id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/microsoft.insights/metricAlerts/my-test-app-alert" + } + } +} + +# ============================================================================= +# DEFAULT VARIABLES +# ============================================================================= +variables { + resource_group_name = "test-rg" + location = "eastus" + app_name = "my-test-app" + docker_image = "nginx:latest" + sku_name = "P1v3" + resource_tags = { + Environment = "test" + Team = "platform" + } +} + +# ============================================================================= +# CORE CONFIGURATION TESTS +# ============================================================================= + +run "core_app_name_set_correctly" { + command = plan + assert { + condition = azurerm_linux_web_app.main.name == "my-test-app" + error_message = "App service name should be 'my-test-app'" + } +} + +run "core_location_set_correctly" { + command = plan + assert { + condition = azurerm_linux_web_app.main.location == "eastus" + error_message = "Location should be 'eastus'" + } + assert { + condition = azurerm_service_plan.main.location == "eastus" + error_message = "Service plan location should be 'eastus'" + } +} + +run "core_resource_group_set_correctly" { + command = plan + assert { + condition = azurerm_linux_web_app.main.resource_group_name == "test-rg" + error_message = "Resource group should be 'test-rg'" + } +} + +run "core_tags_propagated_to_all_resources" { + command = plan + assert { + condition = azurerm_service_plan.main.tags["Environment"] == "test" + error_message = "Service plan should have Environment tag" + } + assert { + condition = azurerm_service_plan.main.tags["Team"] == "platform" + error_message = "Service plan should have Team tag" + } + assert { + condition = azurerm_service_plan.main.tags["managed_by"] == "terraform" + error_message = "Service plan should have managed_by tag" + } +} + +run "core_parameter_json_parsed_to_env_vars" { + command = plan + variables { + parameter_json = "{\"DATABASE_URL\": \"postgres://localhost\", \"API_KEY\": \"secret123\"}" + } + assert { + condition = local.env_variables["DATABASE_URL"] == "postgres://localhost" + error_message = "DATABASE_URL should be parsed from parameter_json" + } + assert { + condition = local.env_variables["API_KEY"] == "secret123" + error_message = "API_KEY should be parsed from parameter_json" + } +} + +run "core_app_settings_include_base_settings" { + command = plan + assert { + condition = local.base_app_settings["WEBSITES_ENABLE_APP_SERVICE_STORAGE"] == "false" + error_message = "App settings should include WEBSITES_ENABLE_APP_SERVICE_STORAGE" + } + assert { + condition = local.base_app_settings["DOCKER_ENABLE_CI"] == "true" + error_message = "App settings should include DOCKER_ENABLE_CI" + } +} + +# ============================================================================= +# APP SERVICE PLAN TESTS +# ============================================================================= + +run "plan_default_name_generated" { + command = plan + assert { + condition = azurerm_service_plan.main.name == "my-test-app-plan" + error_message = "Service plan name should default to 'my-test-app-plan'" + } +} + +run "plan_custom_name_override" { + command = plan + variables { + service_plan_name = "custom-plan-name" + } + assert { + condition = azurerm_service_plan.main.name == "custom-plan-name" + error_message = "Service plan name should be 'custom-plan-name'" + } +} + +run "plan_sku_name_set" { + command = plan + assert { + condition = azurerm_service_plan.main.sku_name == "P1v3" + error_message = "Service plan SKU should be 'P1v3'" + } +} + +run "plan_os_type_default_linux" { + command = plan + assert { + condition = azurerm_service_plan.main.os_type == "Linux" + error_message = "OS type should default to 'Linux'" + } +} + +run "plan_per_site_scaling_disabled_by_default" { + command = plan + assert { + condition = azurerm_service_plan.main.per_site_scaling_enabled == false + error_message = "Per-site scaling should be disabled by default" + } +} + +run "plan_per_site_scaling_enabled_when_set" { + command = plan + variables { + per_site_scaling_enabled = true + } + assert { + condition = azurerm_service_plan.main.per_site_scaling_enabled == true + error_message = "Per-site scaling should be enabled when set" + } +} + +run "plan_zone_balancing_disabled_by_default" { + command = plan + assert { + condition = azurerm_service_plan.main.zone_balancing_enabled == false + error_message = "Zone balancing should be disabled by default" + } +} + +run "plan_zone_balancing_enabled_when_set" { + command = plan + variables { + zone_balancing_enabled = true + } + assert { + condition = azurerm_service_plan.main.zone_balancing_enabled == true + error_message = "Zone balancing should be enabled when set" + } +} + +# ============================================================================= +# DOCKER / CONTAINER TESTS +# ============================================================================= + +run "docker_image_set_correctly" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].application_stack[0].docker_image_name == "nginx:latest" + error_message = "Docker image should be 'nginx:latest'" + } +} + +run "docker_registry_url_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].application_stack[0].docker_registry_url == "https://index.docker.io" + error_message = "Docker registry URL should default to Docker Hub" + } +} + +run "docker_registry_url_custom" { + command = plan + variables { + docker_registry_url = "https://myregistry.azurecr.io" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].application_stack[0].docker_registry_url == "https://myregistry.azurecr.io" + error_message = "Docker registry URL should be set to custom value" + } +} + +run "docker_credentials_null_when_empty" { + command = plan + assert { + condition = local.docker_registry_username == null + error_message = "Docker registry username should be null when not provided" + } + assert { + condition = local.docker_registry_password == null + error_message = "Docker registry password should be null when not provided" + } +} + +run "docker_credentials_set_when_provided" { + command = plan + variables { + docker_registry_username = "myuser" + docker_registry_password = "mypassword" + } + assert { + condition = local.docker_registry_username == "myuser" + error_message = "Docker registry username should be set" + } + assert { + condition = local.docker_registry_password == "mypassword" + error_message = "Docker registry password should be set" + } +} + +# ============================================================================= +# APP SERVICE CONFIGURATION TESTS +# ============================================================================= + +run "config_always_on_default_true" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].always_on == true + error_message = "Always on should be enabled by default" + } +} + +run "config_always_on_can_be_disabled" { + command = plan + variables { + always_on = false + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].always_on == false + error_message = "Always on should be disabled when set to false" + } +} + +run "config_https_only_default_true" { + command = plan + assert { + condition = azurerm_linux_web_app.main.https_only == true + error_message = "HTTPS only should be enabled by default" + } +} + +run "config_https_only_can_be_disabled" { + command = plan + variables { + https_only = false + } + assert { + condition = azurerm_linux_web_app.main.https_only == false + error_message = "HTTPS only should be disabled when set to false" + } +} + +run "config_http2_enabled_default_true" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].http2_enabled == true + error_message = "HTTP/2 should be enabled by default" + } +} + +run "config_websockets_disabled_by_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].websockets_enabled == false + error_message = "WebSockets should be disabled by default" + } +} + +run "config_websockets_enabled_when_set" { + command = plan + variables { + websockets_enabled = true + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].websockets_enabled == true + error_message = "WebSockets should be enabled when set" + } +} + +run "config_ftps_state_default_disabled" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].ftps_state == "Disabled" + error_message = "FTPS should be disabled by default" + } +} + +run "config_ftps_state_can_be_changed" { + command = plan + variables { + ftps_state = "FtpsOnly" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].ftps_state == "FtpsOnly" + error_message = "FTPS state should be 'FtpsOnly'" + } +} + +run "config_minimum_tls_version_default_1_2" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].minimum_tls_version == "1.2" + error_message = "Minimum TLS version should default to 1.2" + } +} + +run "config_client_affinity_disabled_by_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.client_affinity_enabled == false + error_message = "Client affinity should be disabled by default" + } +} + +run "config_client_affinity_enabled_when_set" { + command = plan + variables { + client_affinity_enabled = true + } + assert { + condition = azurerm_linux_web_app.main.client_affinity_enabled == true + error_message = "Client affinity should be enabled when set" + } +} + +run "config_app_command_line_null_when_empty" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].app_command_line == null + error_message = "App command line should be null when not provided" + } +} + +run "config_app_command_line_set_when_provided" { + command = plan + variables { + app_command_line = "npm start" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].app_command_line == "npm start" + error_message = "App command line should be 'npm start'" + } +} + +# ============================================================================= +# HEALTH CHECK TESTS +# ============================================================================= + +run "health_check_disabled_by_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].health_check_path == null + error_message = "Health check path should be null when not configured" + } +} + +run "health_check_enabled_when_path_provided" { + command = plan + variables { + health_check_path = "/health" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].health_check_path == "/health" + error_message = "Health check path should be '/health'" + } +} + +run "health_check_eviction_time_set_when_path_provided" { + command = plan + variables { + health_check_path = "/health" + health_check_eviction_time_in_min = 5 + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].health_check_eviction_time_in_min == 5 + error_message = "Health check eviction time should be 5 minutes" + } +} + +# ============================================================================= +# DEPLOYMENT SLOTS TESTS +# ============================================================================= + +run "slot_disabled_by_default" { + command = plan + assert { + condition = length(azurerm_linux_web_app_slot.staging) == 0 + error_message = "Staging slot should not be created by default" + } +} + +run "slot_created_when_enabled" { + command = plan + variables { + enable_staging_slot = true + } + assert { + condition = length(azurerm_linux_web_app_slot.staging) == 1 + error_message = "Staging slot should be created when enabled" + } +} + +run "slot_default_name_staging" { + command = plan + variables { + enable_staging_slot = true + } + assert { + condition = azurerm_linux_web_app_slot.staging[0].name == "staging" + error_message = "Slot name should default to 'staging'" + } +} + +run "slot_custom_name" { + command = plan + variables { + enable_staging_slot = true + staging_slot_name = "preview" + } + assert { + condition = azurerm_linux_web_app_slot.staging[0].name == "preview" + error_message = "Slot name should be 'preview'" + } +} + +run "slot_always_on_default_false" { + command = plan + variables { + enable_staging_slot = true + } + assert { + condition = azurerm_linux_web_app_slot.staging[0].site_config[0].always_on == false + error_message = "Staging slot always_on should default to false" + } +} + +run "slot_always_on_enabled_when_set" { + command = plan + variables { + enable_staging_slot = true + staging_slot_always_on = true + } + assert { + condition = azurerm_linux_web_app_slot.staging[0].site_config[0].always_on == true + error_message = "Staging slot always_on should be enabled when set" + } +} + +run "slot_inherits_https_only" { + command = plan + variables { + enable_staging_slot = true + } + assert { + condition = azurerm_linux_web_app_slot.staging[0].https_only == true + error_message = "Staging slot should inherit https_only setting" + } +} + +run "slot_has_slot_name_env_var" { + command = plan + variables { + enable_staging_slot = true + staging_slot_name = "staging" + } + assert { + condition = local.staging_app_settings["SLOT_NAME"] == "staging" + error_message = "Staging slot should have SLOT_NAME env var" + } +} + +run "slot_docker_image_defaults_to_production" { + command = plan + variables { + enable_staging_slot = true + docker_image = "myapp:v1.0.0" + } + assert { + condition = local.staging_docker_image == "myapp:v1.0.0" + error_message = "Staging slot docker image should default to production image" + } + assert { + condition = azurerm_linux_web_app_slot.staging[0].site_config[0].application_stack[0].docker_image_name == "myapp:v1.0.0" + error_message = "Staging slot should use the default docker image" + } +} + +run "slot_docker_image_can_be_different" { + command = plan + variables { + enable_staging_slot = true + docker_image = "myapp:v1.0.0" + staging_docker_image = "myapp:v2.0.0" + } + assert { + condition = local.staging_docker_image == "myapp:v2.0.0" + error_message = "Staging slot docker image should be the specified value" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].application_stack[0].docker_image_name == "myapp:v1.0.0" + error_message = "Production should use docker_image" + } + assert { + condition = azurerm_linux_web_app_slot.staging[0].site_config[0].application_stack[0].docker_image_name == "myapp:v2.0.0" + error_message = "Staging slot should use staging_docker_image" + } +} + +# ============================================================================= +# BLUE-GREEN IMAGE PRESERVATION TESTS +# ============================================================================= + +run "preserve_production_image_disabled_by_default" { + command = plan + assert { + condition = var.preserve_production_image == false + error_message = "preserve_production_image should be false by default" + } +} + +run "effective_docker_image_uses_var_when_preserve_disabled" { + command = plan + variables { + docker_image = "myapp:v2.0.0" + preserve_production_image = false + } + assert { + condition = local.effective_docker_image == "myapp:v2.0.0" + error_message = "effective_docker_image should use var.docker_image when preserve is disabled" + } +} + +run "effective_docker_image_uses_var_when_no_backend_config" { + command = plan + variables { + docker_image = "myapp:v2.0.0" + preserve_production_image = true + } + assert { + condition = local.effective_docker_image == "myapp:v2.0.0" + error_message = "effective_docker_image should use var.docker_image when no backend config (no state to read)" + } +} + +run "remote_state_not_created_when_preserve_disabled" { + command = plan + variables { + docker_image = "myapp:v2.0.0" + preserve_production_image = false + } + assert { + condition = length(data.terraform_remote_state.current) == 0 + error_message = "terraform_remote_state should not be created when preserve_production_image is false" + } +} + +run "remote_state_not_created_when_no_backend_config" { + command = plan + variables { + docker_image = "myapp:v2.0.0" + preserve_production_image = true + } + assert { + condition = length(data.terraform_remote_state.current) == 0 + error_message = "terraform_remote_state should not be created when backend config is empty" + } +} + +run "production_uses_effective_docker_image" { + command = plan + variables { + docker_image = "myapp:v2.0.0" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].application_stack[0].docker_image_name == local.effective_docker_image + error_message = "Production slot should use local.effective_docker_image" + } +} + +# ============================================================================= +# CUSTOM DOMAIN TESTS +# ============================================================================= + +run "domain_disabled_by_default" { + command = plan + assert { + condition = local.custom_fqdn == "" + error_message = "Custom FQDN should be empty when not enabled" + } +} + +run "domain_fqdn_with_subdomain" { + command = plan + variables { + enable_custom_domain = true + dns_zone_name = "example.com" + dns_zone_resource_group = "dns-rg" + custom_subdomain = "api" + } + assert { + condition = local.custom_fqdn == "api.example.com" + error_message = "Custom FQDN should be 'api.example.com'" + } +} + +run "domain_fqdn_apex_domain" { + command = plan + variables { + enable_custom_domain = true + dns_zone_name = "example.com" + dns_zone_resource_group = "dns-rg" + custom_subdomain = "@" + } + assert { + condition = local.custom_fqdn == "example.com" + error_message = "Custom FQDN for apex should be 'example.com'" + } +} + +run "domain_cname_record_created_for_subdomain" { + command = plan + variables { + enable_custom_domain = true + dns_zone_name = "example.com" + dns_zone_resource_group = "dns-rg" + custom_subdomain = "api" + } + assert { + condition = length(azurerm_dns_cname_record.main) == 1 + error_message = "CNAME record should be created for subdomain" + } + assert { + condition = length(azurerm_dns_a_record.main) == 0 + error_message = "A record should not be created for subdomain" + } +} + +run "domain_a_record_created_for_apex" { + command = plan + variables { + enable_custom_domain = true + dns_zone_name = "example.com" + dns_zone_resource_group = "dns-rg" + custom_subdomain = "@" + } + assert { + condition = length(azurerm_dns_a_record.main) == 1 + error_message = "A record should be created for apex domain" + } + assert { + condition = length(azurerm_dns_cname_record.main) == 0 + error_message = "CNAME record should not be created for apex domain" + } +} + +run "domain_managed_certificate_created_by_default" { + command = plan + variables { + enable_custom_domain = true + dns_zone_name = "example.com" + dns_zone_resource_group = "dns-rg" + custom_subdomain = "api" + } + assert { + condition = length(azurerm_app_service_managed_certificate.main) == 1 + error_message = "Managed certificate should be created by default" + } +} + +run "domain_managed_certificate_disabled_when_set" { + command = plan + variables { + enable_custom_domain = true + dns_zone_name = "example.com" + dns_zone_resource_group = "dns-rg" + custom_subdomain = "api" + enable_managed_certificate = false + } + assert { + condition = length(azurerm_app_service_managed_certificate.main) == 0 + error_message = "Managed certificate should not be created when disabled" + } +} + +# ============================================================================= +# LOGGING TESTS +# ============================================================================= + +run "logging_enabled_by_default" { + command = plan + assert { + condition = length(azurerm_linux_web_app.main.logs) == 1 + error_message = "Logging should be enabled by default" + } +} + +run "logging_disabled_when_set" { + command = plan + variables { + enable_logging = false + } + assert { + condition = length(azurerm_linux_web_app.main.logs) == 0 + error_message = "Logging should be disabled when set to false" + } +} + +run "logging_detailed_error_messages_default_true" { + command = plan + assert { + condition = azurerm_linux_web_app.main.logs[0].detailed_error_messages == true + error_message = "Detailed error messages should be enabled by default" + } +} + +run "logging_failed_request_tracing_default_true" { + command = plan + assert { + condition = azurerm_linux_web_app.main.logs[0].failed_request_tracing == true + error_message = "Failed request tracing should be enabled by default" + } +} + +run "logging_http_retention_days_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.logs[0].http_logs[0].file_system[0].retention_in_days == 7 + error_message = "HTTP logs retention should default to 7 days" + } +} + +run "logging_http_retention_days_custom" { + command = plan + variables { + http_logs_retention_days = 30 + } + assert { + condition = azurerm_linux_web_app.main.logs[0].http_logs[0].file_system[0].retention_in_days == 30 + error_message = "HTTP logs retention should be 30 days" + } +} + +run "logging_http_retention_mb_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.logs[0].http_logs[0].file_system[0].retention_in_mb == 35 + error_message = "HTTP logs retention should default to 35 MB" + } +} + +run "logging_application_level_default_information" { + command = plan + assert { + condition = azurerm_linux_web_app.main.logs[0].application_logs[0].file_system_level == "Information" + error_message = "Application log level should default to 'Information'" + } +} + +run "logging_application_level_custom" { + command = plan + variables { + application_logs_level = "Verbose" + } + assert { + condition = azurerm_linux_web_app.main.logs[0].application_logs[0].file_system_level == "Verbose" + error_message = "Application log level should be 'Verbose'" + } +} + +# ============================================================================= +# APPLICATION INSIGHTS TESTS +# ============================================================================= + +run "insights_disabled_by_default" { + command = plan + assert { + condition = length(azurerm_application_insights.main) == 0 + error_message = "Application Insights should not be created by default" + } +} + +run "insights_created_when_enabled" { + command = plan + variables { + enable_application_insights = true + } + assert { + condition = length(azurerm_application_insights.main) == 1 + error_message = "Application Insights should be created when enabled" + } +} + +run "insights_log_analytics_created_when_insights_enabled" { + command = plan + variables { + enable_application_insights = true + } + assert { + condition = length(azurerm_log_analytics_workspace.main) == 1 + error_message = "Log Analytics Workspace should be created with Application Insights" + } +} + +run "insights_default_name_generated" { + command = plan + variables { + enable_application_insights = true + } + assert { + condition = azurerm_application_insights.main[0].name == "my-test-app-insights" + error_message = "Application Insights name should default to 'my-test-app-insights'" + } +} + +run "insights_custom_name_override" { + command = plan + variables { + enable_application_insights = true + application_insights_name = "custom-insights" + } + assert { + condition = azurerm_application_insights.main[0].name == "custom-insights" + error_message = "Application Insights name should be 'custom-insights'" + } +} + +run "insights_log_analytics_default_name" { + command = plan + variables { + enable_application_insights = true + } + assert { + condition = azurerm_log_analytics_workspace.main[0].name == "my-test-app-logs" + error_message = "Log Analytics Workspace name should default to 'my-test-app-logs'" + } +} + +run "insights_log_analytics_custom_name" { + command = plan + variables { + enable_application_insights = true + log_analytics_workspace_name = "custom-logs" + } + assert { + condition = azurerm_log_analytics_workspace.main[0].name == "custom-logs" + error_message = "Log Analytics Workspace name should be 'custom-logs'" + } +} + +run "insights_log_analytics_retention_default" { + command = plan + variables { + enable_application_insights = true + } + assert { + condition = azurerm_log_analytics_workspace.main[0].retention_in_days == 30 + error_message = "Log Analytics retention should default to 30 days" + } +} + +run "insights_log_analytics_retention_custom" { + command = plan + variables { + enable_application_insights = true + log_analytics_retention_days = 90 + } + assert { + condition = azurerm_log_analytics_workspace.main[0].retention_in_days == 90 + error_message = "Log Analytics retention should be 90 days" + } +} + +run "insights_app_settings_include_connection_string" { + command = plan + variables { + enable_application_insights = true + } + assert { + condition = can(local.app_insights_settings["APPLICATIONINSIGHTS_CONNECTION_STRING"]) + error_message = "App settings should include Application Insights connection string" + } +} + +# ============================================================================= +# DIAGNOSTIC SETTINGS TESTS +# ============================================================================= + +run "diagnostics_disabled_by_default" { + command = plan + assert { + condition = length(azurerm_monitor_diagnostic_setting.app_service) == 0 + error_message = "Diagnostic settings should not be created by default" + } +} + +run "diagnostics_created_when_enabled" { + command = plan + variables { + enable_diagnostic_settings = true + } + assert { + condition = length(azurerm_monitor_diagnostic_setting.app_service) == 1 + error_message = "Diagnostic settings should be created when enabled" + } +} + +run "diagnostics_creates_log_analytics" { + command = plan + variables { + enable_diagnostic_settings = true + } + assert { + condition = length(azurerm_log_analytics_workspace.main) == 1 + error_message = "Log Analytics should be created for diagnostic settings" + } +} + +run "diagnostics_staging_slot_created_when_both_enabled" { + command = plan + variables { + enable_diagnostic_settings = true + enable_staging_slot = true + } + assert { + condition = length(azurerm_monitor_diagnostic_setting.staging_slot) == 1 + error_message = "Staging slot diagnostic settings should be created" + } +} + +# ============================================================================= +# AUTOSCALING TESTS +# ============================================================================= + +run "autoscaling_disabled_by_default" { + command = plan + assert { + condition = length(azurerm_monitor_autoscale_setting.main) == 0 + error_message = "Autoscaling should not be created by default" + } +} + +run "autoscaling_created_when_enabled" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = length(azurerm_monitor_autoscale_setting.main) == 1 + error_message = "Autoscaling should be created when enabled" + } +} + +run "autoscaling_min_instances_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].capacity[0].minimum == 1 + error_message = "Autoscale min instances should default to 1" + } +} + +run "autoscaling_max_instances_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].capacity[0].maximum == 10 + error_message = "Autoscale max instances should default to 10" + } +} + +run "autoscaling_default_instances_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].capacity[0].default == 2 + error_message = "Autoscale default instances should default to 2" + } +} + +run "autoscaling_custom_capacity" { + command = plan + variables { + enable_autoscaling = true + autoscale_min_instances = 3 + autoscale_max_instances = 20 + autoscale_default_instances = 5 + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].capacity[0].minimum == 3 + error_message = "Autoscale min instances should be 3" + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].capacity[0].maximum == 20 + error_message = "Autoscale max instances should be 20" + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].capacity[0].default == 5 + error_message = "Autoscale default instances should be 5" + } +} + +run "autoscaling_cpu_scale_out_threshold_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[0].metric_trigger[0].threshold == 70 + error_message = "CPU scale out threshold should default to 70" + } +} + +run "autoscaling_cpu_scale_in_threshold_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[1].metric_trigger[0].threshold == 30 + error_message = "CPU scale in threshold should default to 30" + } +} + +run "autoscaling_memory_scale_out_threshold_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[2].metric_trigger[0].threshold == 75 + error_message = "Memory scale out threshold should default to 75" + } +} + +run "autoscaling_memory_scale_in_threshold_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[3].metric_trigger[0].threshold == 40 + error_message = "Memory scale in threshold should default to 40" + } +} + +run "autoscaling_custom_thresholds" { + command = plan + variables { + enable_autoscaling = true + cpu_scale_out_threshold = 80 + cpu_scale_in_threshold = 20 + memory_scale_out_threshold = 85 + memory_scale_in_threshold = 30 + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[0].metric_trigger[0].threshold == 80 + error_message = "CPU scale out threshold should be 80" + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[1].metric_trigger[0].threshold == 20 + error_message = "CPU scale in threshold should be 20" + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[2].metric_trigger[0].threshold == 85 + error_message = "Memory scale out threshold should be 85" + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[3].metric_trigger[0].threshold == 30 + error_message = "Memory scale in threshold should be 30" + } +} + +run "autoscaling_scale_out_cooldown_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[0].scale_action[0].cooldown == "PT5M" + error_message = "Scale out cooldown should default to PT5M" + } +} + +run "autoscaling_scale_in_cooldown_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[1].scale_action[0].cooldown == "PT10M" + error_message = "Scale in cooldown should default to PT10M" + } +} + +run "autoscaling_custom_cooldowns" { + command = plan + variables { + enable_autoscaling = true + scale_out_cooldown = "PT10M" + scale_in_cooldown = "PT15M" + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[0].scale_action[0].cooldown == "PT10M" + error_message = "Scale out cooldown should be PT10M" + } + assert { + condition = azurerm_monitor_autoscale_setting.main[0].profile[0].rule[1].scale_action[0].cooldown == "PT15M" + error_message = "Scale in cooldown should be PT15M" + } +} + +run "autoscaling_no_notifications_by_default" { + command = plan + variables { + enable_autoscaling = true + } + assert { + condition = length(azurerm_monitor_autoscale_setting.main[0].notification) == 0 + error_message = "Autoscaling should not have notifications by default" + } +} + +run "autoscaling_notifications_when_emails_provided" { + command = plan + variables { + enable_autoscaling = true + autoscale_notification_emails = ["ops@example.com", "dev@example.com"] + } + assert { + condition = length(azurerm_monitor_autoscale_setting.main[0].notification) == 1 + error_message = "Autoscaling should have notifications when emails provided" + } +} + +# ============================================================================= +# AUTO-HEAL TESTS +# ============================================================================= + +run "auto_heal_disabled_by_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_enabled == null + error_message = "Auto-heal should be null (disabled) by default" + } +} + +run "auto_heal_enabled_when_configured" { + command = plan + variables { + enable_auto_heal = true + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_enabled == true + error_message = "Auto-heal should be enabled when configured" + } +} + +run "auto_heal_slow_request_defaults" { + command = plan + variables { + enable_auto_heal = true + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].slow_request[0].count == 10 + error_message = "Auto-heal slow request count should default to 10" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].slow_request[0].interval == "00:01:00" + error_message = "Auto-heal slow request interval should default to 00:01:00" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].slow_request[0].time_taken == "00:00:30" + error_message = "Auto-heal slow request time taken should default to 00:00:30" + } +} + +run "auto_heal_slow_request_custom" { + command = plan + variables { + enable_auto_heal = true + auto_heal_slow_request_count = 20 + auto_heal_slow_request_interval = "00:02:00" + auto_heal_slow_request_time_taken = "00:01:00" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].slow_request[0].count == 20 + error_message = "Auto-heal slow request count should be 20" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].slow_request[0].interval == "00:02:00" + error_message = "Auto-heal slow request interval should be 00:02:00" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].slow_request[0].time_taken == "00:01:00" + error_message = "Auto-heal slow request time taken should be 00:01:00" + } +} + +run "auto_heal_status_code_defaults" { + command = plan + variables { + enable_auto_heal = true + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].status_code[0].count == 50 + error_message = "Auto-heal status code count should default to 50" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].status_code[0].interval == "00:05:00" + error_message = "Auto-heal status code interval should default to 00:05:00" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].status_code[0].status_code_range == "500-599" + error_message = "Auto-heal status code range should default to 500-599" + } +} + +run "auto_heal_status_code_custom" { + command = plan + variables { + enable_auto_heal = true + auto_heal_status_code_count = 100 + auto_heal_status_code_interval = "00:10:00" + auto_heal_status_code_range = "400-599" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].status_code[0].count == 100 + error_message = "Auto-heal status code count should be 100" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].status_code[0].interval == "00:10:00" + error_message = "Auto-heal status code interval should be 00:10:00" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].trigger[0].status_code[0].status_code_range == "400-599" + error_message = "Auto-heal status code range should be 400-599" + } +} + +run "auto_heal_action_recycle" { + command = plan + variables { + enable_auto_heal = true + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].action[0].action_type == "Recycle" + error_message = "Auto-heal action type should be Recycle" + } +} + +run "auto_heal_min_process_time_default" { + command = plan + variables { + enable_auto_heal = true + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].action[0].minimum_process_execution_time == "00:01:00" + error_message = "Auto-heal min process time should default to 00:01:00" + } +} + +run "auto_heal_min_process_time_custom" { + command = plan + variables { + enable_auto_heal = true + auto_heal_min_process_time = "00:05:00" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].auto_heal_setting[0].action[0].minimum_process_execution_time == "00:05:00" + error_message = "Auto-heal min process time should be 00:05:00" + } +} + +# ============================================================================= +# NETWORKING TESTS +# ============================================================================= + +run "vnet_integration_disabled_by_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.virtual_network_subnet_id == null + error_message = "VNet integration should be disabled by default" + } +} + +run "vnet_integration_enabled_when_set" { + command = plan + variables { + enable_vnet_integration = true + vnet_integration_subnet_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/app-subnet" + } + assert { + condition = azurerm_linux_web_app.main.virtual_network_subnet_id == "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/app-subnet" + error_message = "VNet integration subnet ID should be set" + } +} + +run "vnet_route_all_disabled_by_default" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].vnet_route_all_enabled == false + error_message = "VNet route all should be disabled by default" + } +} + +run "vnet_route_all_enabled_when_set" { + command = plan + variables { + vnet_route_all_enabled = true + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].vnet_route_all_enabled == true + error_message = "VNet route all should be enabled when set" + } +} + +run "ip_restriction_default_action_allow" { + command = plan + assert { + condition = azurerm_linux_web_app.main.site_config[0].ip_restriction_default_action == "Allow" + error_message = "IP restriction default action should be 'Allow'" + } +} + +run "ip_restriction_default_action_deny" { + command = plan + variables { + ip_restriction_default_action = "Deny" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].ip_restriction_default_action == "Deny" + error_message = "IP restriction default action should be 'Deny'" + } +} + +run "ip_restrictions_none_by_default" { + command = plan + assert { + condition = length(azurerm_linux_web_app.main.site_config[0].ip_restriction) == 0 + error_message = "No IP restrictions should exist by default" + } +} + +run "ip_restrictions_added_when_provided" { + command = plan + variables { + ip_restrictions = [ + { + name = "allow-office" + ip_address = "203.0.113.0/24" + priority = 100 + action = "Allow" + } + ] + } + assert { + condition = length(azurerm_linux_web_app.main.site_config[0].ip_restriction) == 1 + error_message = "IP restriction should be added" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].ip_restriction[0].name == "allow-office" + error_message = "IP restriction name should be 'allow-office'" + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].ip_restriction[0].ip_address == "203.0.113.0/24" + error_message = "IP restriction IP address should be set" + } +} + +run "ip_restrictions_service_tag" { + command = plan + variables { + ip_restrictions = [ + { + name = "allow-azure" + service_tag = "AzureCloud" + priority = 100 + action = "Allow" + } + ] + } + assert { + condition = azurerm_linux_web_app.main.site_config[0].ip_restriction[0].service_tag == "AzureCloud" + error_message = "IP restriction service tag should be 'AzureCloud'" + } +} + +# ============================================================================= +# IDENTITY TESTS +# ============================================================================= + +run "identity_enabled_by_default" { + command = plan + assert { + condition = local.identity_type != null + error_message = "Identity type should not be null by default" + } +} + +run "identity_system_assigned" { + command = plan + variables { + enable_system_identity = true + } + assert { + condition = local.identity_type == "SystemAssigned" + error_message = "Identity type should be 'SystemAssigned'" + } +} + +run "identity_system_assigned" { + command = plan + variables { + user_assigned_identity_ids = ["/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/my-identity"] + } + assert { + condition = local.identity_type == "SystemAssigned, UserAssigned" + error_message = "Identity type should be 'SystemAssigned, UserAssigned'" + } +} + +run "identity_system_and_user_assigned" { + command = plan + variables { + enable_system_identity = true + user_assigned_identity_ids = ["/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test-rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/my-identity"] + } + assert { + condition = local.identity_type == "SystemAssigned, UserAssigned" + error_message = "Identity type should be 'SystemAssigned, UserAssigned'" + } +} + +run "identity_block_created_when_enabled" { + command = plan + variables { + enable_system_identity = true + } + assert { + condition = length(azurerm_linux_web_app.main.identity) == 1 + error_message = "Identity block should be created when enabled" + } +} + +run "identity_block_not_created_when_disabled" { + command = plan + variables { + enable_system_identity = false + } + assert { + condition = length(azurerm_linux_web_app.main.identity) == 0 + error_message = "Identity block should not be created when disabled" + } +} + +# ============================================================================= +# ALERTING TESTS +# ============================================================================= + +run "alerts_disabled_by_default" { + command = plan + assert { + condition = length(azurerm_monitor_metric_alert.http_5xx) == 0 + error_message = "HTTP 5xx alert should not be created by default" + } +} + +run "alerts_created_when_enabled" { + command = plan + variables { + enable_alerts = true + } + assert { + condition = length(azurerm_monitor_metric_alert.http_5xx) == 1 + error_message = "HTTP 5xx alert should be created when enabled" + } + assert { + condition = length(azurerm_monitor_metric_alert.response_time) == 1 + error_message = "Response time alert should be created when enabled" + } + assert { + condition = length(azurerm_monitor_metric_alert.cpu_percentage) == 1 + error_message = "CPU percentage alert should be created when enabled" + } + assert { + condition = length(azurerm_monitor_metric_alert.memory_percentage) == 1 + error_message = "Memory percentage alert should be created when enabled" + } +} + +run "alerts_action_group_requires_emails" { + command = plan + variables { + enable_alerts = true + } + assert { + condition = length(azurerm_monitor_action_group.main) == 0 + error_message = "Action group should not be created without email recipients" + } +} + +run "alerts_action_group_created_with_emails" { + command = plan + variables { + enable_alerts = true + alert_email_recipients = ["ops@example.com"] + } + assert { + condition = length(azurerm_monitor_action_group.main) == 1 + error_message = "Action group should be created with email recipients" + } +} + +run "alerts_http_5xx_threshold_default" { + command = plan + variables { + enable_alerts = true + } + assert { + condition = azurerm_monitor_metric_alert.http_5xx[0].criteria[0].threshold == 10 + error_message = "HTTP 5xx threshold should default to 10" + } +} + +run "alerts_http_5xx_threshold_custom" { + command = plan + variables { + enable_alerts = true + alert_http_5xx_threshold = 25 + } + assert { + condition = azurerm_monitor_metric_alert.http_5xx[0].criteria[0].threshold == 25 + error_message = "HTTP 5xx threshold should be 25" + } +} + +run "alerts_response_time_threshold_default" { + command = plan + variables { + enable_alerts = true + } + assert { + condition = azurerm_monitor_metric_alert.response_time[0].criteria[0].threshold == 5 + error_message = "Response time threshold should default to 5 seconds (5000ms / 1000)" + } +} + +run "alerts_response_time_threshold_custom" { + command = plan + variables { + enable_alerts = true + alert_response_time_threshold_ms = 10000 + } + assert { + condition = azurerm_monitor_metric_alert.response_time[0].criteria[0].threshold == 10 + error_message = "Response time threshold should be 10 seconds" + } +} + +run "alerts_cpu_percentage_threshold_default" { + command = plan + variables { + enable_alerts = true + } + assert { + condition = azurerm_monitor_metric_alert.cpu_percentage[0].criteria[0].threshold == 85 + error_message = "CPU percentage threshold should default to 85" + } +} + +run "alerts_cpu_percentage_threshold_custom" { + command = plan + variables { + enable_alerts = true + alert_cpu_percentage_threshold = 90 + } + assert { + condition = azurerm_monitor_metric_alert.cpu_percentage[0].criteria[0].threshold == 90 + error_message = "CPU percentage threshold should be 90" + } +} + +run "alerts_memory_percentage_threshold_default" { + command = plan + variables { + enable_alerts = true + } + assert { + condition = azurerm_monitor_metric_alert.memory_percentage[0].criteria[0].threshold == 85 + error_message = "Memory percentage threshold should default to 85" + } +} + +run "alerts_memory_percentage_threshold_custom" { + command = plan + variables { + enable_alerts = true + alert_memory_percentage_threshold = 95 + } + assert { + condition = azurerm_monitor_metric_alert.memory_percentage[0].criteria[0].threshold == 95 + error_message = "Memory percentage threshold should be 95" + } +} + +run "alerts_health_check_not_created_without_path" { + command = plan + variables { + enable_alerts = true + } + assert { + condition = length(azurerm_monitor_metric_alert.health_check) == 0 + error_message = "Health check alert should not be created without health check path" + } +} + +run "alerts_health_check_created_with_path" { + command = plan + variables { + enable_alerts = true + health_check_path = "/health" + } + assert { + condition = length(azurerm_monitor_metric_alert.health_check) == 1 + error_message = "Health check alert should be created with health check path" + } +} + +# ============================================================================= +# OUTPUT TESTS +# ============================================================================= + +run "output_app_service_name" { + command = plan + assert { + condition = output.app_service_name == "my-test-app" + error_message = "app_service_name output should be 'my-test-app'" + } +} + +run "output_service_plan_name" { + command = plan + assert { + condition = output.service_plan_name == "my-test-app-plan" + error_message = "service_plan_name output should be 'my-test-app-plan'" + } +} + +run "output_staging_slot_null_when_disabled" { + command = plan + assert { + condition = output.staging_slot_id == null + error_message = "staging_slot_id should be null when staging slot is disabled" + } +} + +run "output_custom_domain_null_when_disabled" { + command = plan + assert { + condition = output.custom_domain_fqdn == null + error_message = "custom_domain_fqdn should be null when custom domain is disabled" + } +} + +run "output_application_insights_null_when_disabled" { + command = plan + assert { + condition = output.application_insights_id == null + error_message = "application_insights_id should be null when Application Insights is disabled" + } +} + +run "output_identity_principal_null_when_disabled" { + command = plan + + variables { + enable_system_identity = false + } + + assert { + condition = output.app_service_identity_principal_id == null + error_message = "app_service_identity_principal_id should be null when identity is disabled" + } +} diff --git a/azure-apps/deployment/modules/app_service_plan.tf b/azure-apps/deployment/modules/app_service_plan.tf new file mode 100644 index 00000000..773a31f3 --- /dev/null +++ b/azure-apps/deployment/modules/app_service_plan.tf @@ -0,0 +1,15 @@ +# ============================================================================= +# APP SERVICE PLAN +# ============================================================================= + +resource "azurerm_service_plan" "main" { + name = local.service_plan_name + location = var.location + resource_group_name = var.resource_group_name + os_type = var.os_type + sku_name = var.sku_name + per_site_scaling_enabled = var.per_site_scaling_enabled + zone_balancing_enabled = var.zone_balancing_enabled + + tags = local.common_tags +} diff --git a/azure-apps/deployment/modules/autoscaling.tf b/azure-apps/deployment/modules/autoscaling.tf new file mode 100644 index 00000000..a13a8e02 --- /dev/null +++ b/azure-apps/deployment/modules/autoscaling.tf @@ -0,0 +1,132 @@ +# ============================================================================= +# AUTOSCALING +# ============================================================================= + +resource "azurerm_monitor_autoscale_setting" "main" { + count = var.enable_autoscaling ? 1 : 0 + name = "${var.app_name}-autoscale" + resource_group_name = var.resource_group_name + location = var.location + target_resource_id = azurerm_service_plan.main.id + + tags = local.common_tags + + # --------------------------------------------------------------------------- + # DEFAULT PROFILE + # --------------------------------------------------------------------------- + profile { + name = "default" + + capacity { + default = var.autoscale_default_instances + minimum = var.autoscale_min_instances + maximum = var.autoscale_max_instances + } + + # ------------------------------------------------------------------------- + # CPU SCALE OUT + # ------------------------------------------------------------------------- + rule { + metric_trigger { + metric_name = "CpuPercentage" + metric_resource_id = azurerm_service_plan.main.id + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = var.cpu_scale_out_threshold + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = "1" + cooldown = var.scale_out_cooldown + } + } + + # ------------------------------------------------------------------------- + # CPU SCALE IN + # ------------------------------------------------------------------------- + rule { + metric_trigger { + metric_name = "CpuPercentage" + metric_resource_id = azurerm_service_plan.main.id + time_grain = "PT1M" + statistic = "Average" + time_window = "PT10M" + time_aggregation = "Average" + operator = "LessThan" + threshold = var.cpu_scale_in_threshold + } + + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = "1" + cooldown = var.scale_in_cooldown + } + } + + # ------------------------------------------------------------------------- + # MEMORY SCALE OUT + # ------------------------------------------------------------------------- + rule { + metric_trigger { + metric_name = "MemoryPercentage" + metric_resource_id = azurerm_service_plan.main.id + time_grain = "PT1M" + statistic = "Average" + time_window = "PT5M" + time_aggregation = "Average" + operator = "GreaterThan" + threshold = var.memory_scale_out_threshold + } + + scale_action { + direction = "Increase" + type = "ChangeCount" + value = "1" + cooldown = var.scale_out_cooldown + } + } + + # ------------------------------------------------------------------------- + # MEMORY SCALE IN + # ------------------------------------------------------------------------- + rule { + metric_trigger { + metric_name = "MemoryPercentage" + metric_resource_id = azurerm_service_plan.main.id + time_grain = "PT1M" + statistic = "Average" + time_window = "PT10M" + time_aggregation = "Average" + operator = "LessThan" + threshold = var.memory_scale_in_threshold + } + + scale_action { + direction = "Decrease" + type = "ChangeCount" + value = "1" + cooldown = var.scale_in_cooldown + } + } + } + + # --------------------------------------------------------------------------- + # NOTIFICATIONS + # --------------------------------------------------------------------------- + dynamic "notification" { + for_each = length(var.autoscale_notification_emails) > 0 ? [1] : [] + content { + email { + send_to_subscription_administrator = false + send_to_subscription_co_administrator = false + custom_emails = var.autoscale_notification_emails + } + } + } +} diff --git a/azure-apps/deployment/modules/deployment_slots.tf b/azure-apps/deployment/modules/deployment_slots.tf new file mode 100644 index 00000000..1e05b62a --- /dev/null +++ b/azure-apps/deployment/modules/deployment_slots.tf @@ -0,0 +1,55 @@ +# ============================================================================= +# DEPLOYMENT SLOTS +# ============================================================================= + +resource "azurerm_linux_web_app_slot" "staging" { + count = var.enable_staging_slot ? 1 : 0 + name = var.staging_slot_name + app_service_id = azurerm_linux_web_app.main.id + + tags = local.common_tags + + site_config { + always_on = var.staging_slot_always_on + http2_enabled = var.http2_enabled + websockets_enabled = var.websockets_enabled + ftps_state = var.ftps_state + minimum_tls_version = var.minimum_tls_version + vnet_route_all_enabled = var.vnet_route_all_enabled + app_command_line = var.app_command_line != "" ? var.app_command_line : null + + health_check_path = var.health_check_path != "" ? var.health_check_path : null + health_check_eviction_time_in_min = var.health_check_path != "" ? var.health_check_eviction_time_in_min : null + + application_stack { + docker_registry_url = var.docker_registry_url + docker_image_name = local.staging_docker_image + docker_registry_username = local.docker_registry_username + docker_registry_password = local.docker_registry_password + } + } + + app_settings = local.staging_app_settings + https_only = var.https_only + + dynamic "identity" { + for_each = local.identity_type != null ? [1] : [] + content { + type = local.identity_type + identity_ids = length(var.user_assigned_identity_ids) > 0 ? var.user_assigned_identity_ids : null + } + } +} + +# ============================================================================= +# SLOT SWAP (Promote staging to production) +# When promote_staging_to_production is true, swaps staging with production +# ============================================================================= + +resource "azurerm_web_app_active_slot" "slot_swap" { + count = var.enable_staging_slot && var.promote_staging_to_production ? 1 : 0 + + slot_id = azurerm_linux_web_app_slot.staging[0].id + + depends_on = [azurerm_linux_web_app_slot.staging] +} diff --git a/azure-apps/deployment/modules/dns.tf b/azure-apps/deployment/modules/dns.tf new file mode 100644 index 00000000..9e57f821 --- /dev/null +++ b/azure-apps/deployment/modules/dns.tf @@ -0,0 +1,91 @@ +# ============================================================================= +# DNS / CUSTOM DOMAIN +# ============================================================================= + +# Reference existing DNS zone +data "azurerm_dns_zone" "main" { + count = var.enable_custom_domain ? 1 : 0 + name = var.dns_zone_name + resource_group_name = var.dns_zone_resource_group +} + +# --------------------------------------------------------------------------- +# A RECORD (for apex domain) +# --------------------------------------------------------------------------- +resource "azurerm_dns_a_record" "main" { + count = var.enable_custom_domain && var.custom_subdomain == "@" ? 1 : 0 + name = "@" + zone_name = data.azurerm_dns_zone.main[0].name + resource_group_name = var.dns_zone_resource_group + ttl = 300 + target_resource_id = azurerm_linux_web_app.main.id + + tags = local.common_tags +} + +# --------------------------------------------------------------------------- +# CNAME RECORD (for subdomains) +# --------------------------------------------------------------------------- +resource "azurerm_dns_cname_record" "main" { + count = var.enable_custom_domain && var.custom_subdomain != "@" ? 1 : 0 + name = var.custom_subdomain + zone_name = data.azurerm_dns_zone.main[0].name + resource_group_name = var.dns_zone_resource_group + ttl = 300 + record = azurerm_linux_web_app.main.default_hostname + + tags = local.common_tags +} + +# --------------------------------------------------------------------------- +# TXT RECORD (for domain verification) +# --------------------------------------------------------------------------- +resource "azurerm_dns_txt_record" "verification" { + count = var.enable_custom_domain ? 1 : 0 + name = var.custom_subdomain == "@" ? "asuid" : "asuid.${var.custom_subdomain}" + zone_name = data.azurerm_dns_zone.main[0].name + resource_group_name = var.dns_zone_resource_group + ttl = 300 + + record { + value = azurerm_linux_web_app.main.custom_domain_verification_id + } + + tags = local.common_tags +} + +# --------------------------------------------------------------------------- +# CUSTOM DOMAIN BINDING +# --------------------------------------------------------------------------- +resource "azurerm_app_service_custom_hostname_binding" "main" { + count = var.enable_custom_domain ? 1 : 0 + hostname = local.custom_fqdn + app_service_name = azurerm_linux_web_app.main.name + resource_group_name = var.resource_group_name + + depends_on = [ + azurerm_dns_a_record.main, + azurerm_dns_cname_record.main, + azurerm_dns_txt_record.verification + ] +} + +# --------------------------------------------------------------------------- +# MANAGED SSL CERTIFICATE +# --------------------------------------------------------------------------- +resource "azurerm_app_service_managed_certificate" "main" { + count = var.enable_custom_domain && var.enable_managed_certificate ? 1 : 0 + custom_hostname_binding_id = azurerm_app_service_custom_hostname_binding.main[0].id + + tags = local.common_tags +} + +# --------------------------------------------------------------------------- +# CERTIFICATE BINDING +# --------------------------------------------------------------------------- +resource "azurerm_app_service_certificate_binding" "main" { + count = var.enable_custom_domain && var.enable_managed_certificate ? 1 : 0 + hostname_binding_id = azurerm_app_service_custom_hostname_binding.main[0].id + certificate_id = azurerm_app_service_managed_certificate.main[0].id + ssl_state = "SniEnabled" +} diff --git a/azure-apps/deployment/modules/locals.tf b/azure-apps/deployment/modules/locals.tf new file mode 100644 index 00000000..cb706dac --- /dev/null +++ b/azure-apps/deployment/modules/locals.tf @@ -0,0 +1,64 @@ +# ============================================================================= +# LOCALS - Computed values and defaults +# ============================================================================= + +locals { + # Resource naming with fallbacks + service_plan_name = var.service_plan_name != "" ? var.service_plan_name : "${var.app_name}-plan" + application_insights_name = var.application_insights_name != "" ? var.application_insights_name : "${var.app_name}-insights" + log_analytics_workspace_name = var.log_analytics_workspace_name != "" ? var.log_analytics_workspace_name : "${var.app_name}-logs" + + # Parse environment variables from JSON + env_variables = jsondecode(var.parameter_json) + + # Construct custom domain FQDN + custom_fqdn = var.enable_custom_domain ? ( + var.custom_subdomain == "@" ? var.dns_zone_name : "${var.custom_subdomain}.${var.dns_zone_name}" + ) : "" + + # Common tags applied to all resources + common_tags = merge(var.resource_tags, { + managed_by = "terraform" + }) + + # Docker registry credentials (only if provided) + docker_registry_username = var.docker_registry_username != "" ? var.docker_registry_username : null + docker_registry_password = var.docker_registry_password != "" ? var.docker_registry_password : null + + # Staging slot docker image (defaults to production image if not specified) + staging_docker_image = var.staging_docker_image != "" ? var.staging_docker_image : var.docker_image + + # App settings combining user env vars with required settings + base_app_settings = { + WEBSITES_ENABLE_APP_SERVICE_STORAGE = "false" + DOCKER_ENABLE_CI = "true" + } + + app_insights_settings = var.enable_application_insights ? { + APPLICATIONINSIGHTS_CONNECTION_STRING = azurerm_application_insights.main[0].connection_string + ApplicationInsightsAgent_EXTENSION_VERSION = "~3" + APPINSIGHTS_INSTRUMENTATIONKEY = azurerm_application_insights.main[0].instrumentation_key + } : {} + + app_settings = merge( + local.env_variables, + local.base_app_settings, + local.app_insights_settings + ) + + # Staging slot app settings + staging_app_settings = merge( + local.app_settings, + { + SLOT_NAME = var.staging_slot_name + } + ) + + # Identity type based on configuration + identity_type = ( + var.enable_system_identity && length(var.user_assigned_identity_ids) > 0 ? "SystemAssigned, UserAssigned" : + var.enable_system_identity ? "SystemAssigned" : + length(var.user_assigned_identity_ids) > 0 ? "UserAssigned" : + null + ) +} diff --git a/azure-apps/deployment/modules/monitoring.tf b/azure-apps/deployment/modules/monitoring.tf new file mode 100644 index 00000000..c60cbd2a --- /dev/null +++ b/azure-apps/deployment/modules/monitoring.tf @@ -0,0 +1,101 @@ +# ============================================================================= +# MONITORING - Application Insights & Log Analytics +# ============================================================================= + +# --------------------------------------------------------------------------- +# LOG ANALYTICS WORKSPACE +# --------------------------------------------------------------------------- +resource "azurerm_log_analytics_workspace" "main" { + count = var.enable_application_insights || var.enable_diagnostic_settings ? 1 : 0 + name = local.log_analytics_workspace_name + location = var.location + resource_group_name = var.resource_group_name + sku = "PerGB2018" + retention_in_days = var.log_analytics_retention_days + + tags = local.common_tags +} + +# --------------------------------------------------------------------------- +# APPLICATION INSIGHTS +# --------------------------------------------------------------------------- +resource "azurerm_application_insights" "main" { + count = var.enable_application_insights ? 1 : 0 + name = local.application_insights_name + location = var.location + resource_group_name = var.resource_group_name + application_type = "web" + workspace_id = azurerm_log_analytics_workspace.main[0].id + + tags = local.common_tags +} + +# --------------------------------------------------------------------------- +# DIAGNOSTIC SETTINGS +# --------------------------------------------------------------------------- +resource "azurerm_monitor_diagnostic_setting" "app_service" { + count = var.enable_diagnostic_settings ? 1 : 0 + name = "${var.app_name}-diagnostics" + target_resource_id = azurerm_linux_web_app.main.id + log_analytics_workspace_id = azurerm_log_analytics_workspace.main[0].id + + # HTTP logs + enabled_log { + category = "AppServiceHTTPLogs" + } + + # Console logs (stdout/stderr) + enabled_log { + category = "AppServiceConsoleLogs" + } + + # Application logs + enabled_log { + category = "AppServiceAppLogs" + } + + # Platform logs + enabled_log { + category = "AppServicePlatformLogs" + } + + # Audit logs + enabled_log { + category = "AppServiceAuditLogs" + } + + # Metrics + metric { + category = "AllMetrics" + enabled = true + } +} + +# Diagnostic settings for staging slot +resource "azurerm_monitor_diagnostic_setting" "staging_slot" { + count = var.enable_diagnostic_settings && var.enable_staging_slot ? 1 : 0 + name = "${var.app_name}-${var.staging_slot_name}-diagnostics" + target_resource_id = azurerm_linux_web_app_slot.staging[0].id + log_analytics_workspace_id = azurerm_log_analytics_workspace.main[0].id + + enabled_log { + category = "AppServiceHTTPLogs" + } + + enabled_log { + category = "AppServiceConsoleLogs" + } + + enabled_log { + category = "AppServiceAppLogs" + } + + enabled_log { + category = "AppServicePlatformLogs" + } + + metric { + category = "AllMetrics" + enabled = true + } +} diff --git a/azure-apps/deployment/modules/outputs.tf b/azure-apps/deployment/modules/outputs.tf new file mode 100644 index 00000000..94333b9d --- /dev/null +++ b/azure-apps/deployment/modules/outputs.tf @@ -0,0 +1,162 @@ +# ============================================================================= +# OUTPUTS +# ============================================================================= + +# --------------------------------------------------------------------------- +# APP SERVICE +# --------------------------------------------------------------------------- +output "app_service_id" { + description = "The ID of the App Service" + value = azurerm_linux_web_app.main.id +} + +output "app_service_name" { + description = "The name of the App Service" + value = azurerm_linux_web_app.main.name +} + +output "app_service_default_hostname" { + description = "The default hostname of the App Service" + value = azurerm_linux_web_app.main.default_hostname +} + +output "app_service_default_url" { + description = "The default URL of the App Service" + value = "https://${azurerm_linux_web_app.main.default_hostname}" +} + +output "docker_image" { + description = "The Docker image currently deployed to production" + value = local.effective_docker_image +} + +output "app_service_outbound_ip_addresses" { + description = "Outbound IP addresses of the App Service (comma-separated)" + value = azurerm_linux_web_app.main.outbound_ip_addresses +} + +output "app_service_outbound_ip_address_list" { + description = "Outbound IP addresses of the App Service (list)" + value = azurerm_linux_web_app.main.outbound_ip_address_list +} + +output "app_service_possible_outbound_ip_addresses" { + description = "All possible outbound IP addresses of the App Service" + value = azurerm_linux_web_app.main.possible_outbound_ip_addresses +} + +output "custom_domain_verification_id" { + description = "Custom domain verification ID" + value = azurerm_linux_web_app.main.custom_domain_verification_id + sensitive = true +} + +# --------------------------------------------------------------------------- +# APP SERVICE PLAN +# --------------------------------------------------------------------------- +output "service_plan_id" { + description = "The ID of the App Service Plan" + value = azurerm_service_plan.main.id +} + +output "service_plan_name" { + description = "The name of the App Service Plan" + value = azurerm_service_plan.main.name +} + +# --------------------------------------------------------------------------- +# IDENTITY +# --------------------------------------------------------------------------- +output "app_service_identity_principal_id" { + description = "The Principal ID of the App Service system-assigned identity" + value = var.enable_system_identity ? azurerm_linux_web_app.main.identity[0].principal_id : null +} + +output "app_service_identity_tenant_id" { + description = "The Tenant ID of the App Service system-assigned identity" + value = var.enable_system_identity ? azurerm_linux_web_app.main.identity[0].tenant_id : null +} + +# --------------------------------------------------------------------------- +# STAGING SLOT +# --------------------------------------------------------------------------- +output "staging_slot_id" { + description = "The ID of the staging slot" + value = var.enable_staging_slot ? azurerm_linux_web_app_slot.staging[0].id : null +} + +output "staging_slot_hostname" { + description = "The hostname of the staging slot" + value = var.enable_staging_slot ? azurerm_linux_web_app_slot.staging[0].default_hostname : null +} + +output "staging_slot_url" { + description = "The URL of the staging slot" + value = var.enable_staging_slot ? "https://${azurerm_linux_web_app_slot.staging[0].default_hostname}" : null +} + +output "staging_traffic_percent" { + description = "Percentage of traffic routed to staging slot" + value = var.enable_staging_slot ? var.staging_traffic_percent : 0 +} + +output "slot_swap_performed" { + description = "Whether a slot swap was performed (staging promoted to production)" + value = var.enable_staging_slot && var.promote_staging_to_production +} + +# --------------------------------------------------------------------------- +# CUSTOM DOMAIN +# --------------------------------------------------------------------------- +output "custom_domain_fqdn" { + description = "The custom domain FQDN" + value = var.enable_custom_domain ? local.custom_fqdn : null +} + +output "custom_domain_url" { + description = "The custom domain URL" + value = var.enable_custom_domain ? "https://${local.custom_fqdn}" : null +} + +# --------------------------------------------------------------------------- +# MONITORING +# --------------------------------------------------------------------------- +output "application_insights_id" { + description = "The ID of the Application Insights resource" + value = var.enable_application_insights ? azurerm_application_insights.main[0].id : null +} + +output "application_insights_instrumentation_key" { + description = "The instrumentation key of Application Insights" + value = var.enable_application_insights ? azurerm_application_insights.main[0].instrumentation_key : null + sensitive = true +} + +output "application_insights_connection_string" { + description = "The connection string of Application Insights" + value = var.enable_application_insights ? azurerm_application_insights.main[0].connection_string : null + sensitive = true +} + +output "log_analytics_workspace_id" { + description = "The ID of the Log Analytics Workspace" + value = var.enable_application_insights || var.enable_diagnostic_settings ? azurerm_log_analytics_workspace.main[0].id : null +} + +output "log_analytics_workspace_name" { + description = "The name of the Log Analytics Workspace" + value = var.enable_application_insights || var.enable_diagnostic_settings ? azurerm_log_analytics_workspace.main[0].name : null +} + +# --------------------------------------------------------------------------- +# KUDU / SCM (for debugging) +# --------------------------------------------------------------------------- +output "scm_url" { + description = "The SCM (Kudu) URL for the App Service" + value = "https://${azurerm_linux_web_app.main.name}.scm.azurewebsites.net" +} + +output "staging_scm_url" { + description = "The SCM (Kudu) URL for the staging slot" + value = var.enable_staging_slot ? "https://${azurerm_linux_web_app.main.name}-${var.staging_slot_name}.scm.azurewebsites.net" : null +} diff --git a/azure-apps/deployment/modules/provider.tf b/azure-apps/deployment/modules/provider.tf new file mode 100644 index 00000000..e39a452d --- /dev/null +++ b/azure-apps/deployment/modules/provider.tf @@ -0,0 +1,20 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 3.0" + } + null = { + source = "hashicorp/null" + version = "~> 3.0" + } + } + + backend "azurerm" {} +} + +provider "azurerm" { + features {} +} diff --git a/azure-apps/deployment/modules/remote_state.tf b/azure-apps/deployment/modules/remote_state.tf new file mode 100644 index 00000000..151e0008 --- /dev/null +++ b/azure-apps/deployment/modules/remote_state.tf @@ -0,0 +1,37 @@ +# ============================================================================= +# REMOTE STATE - Read current state for blue-green deployments +# ============================================================================= +# This data source reads the current Terraform state to get the existing +# production docker image. Used when preserve_production_image is enabled +# to keep the current production image while deploying a new image to staging. + +data "terraform_remote_state" "current" { + count = var.preserve_production_image && var.backend_storage_account_name != "" ? 1 : 0 + backend = "azurerm" + + config = { + storage_account_name = var.backend_storage_account_name + container_name = var.backend_container_name + resource_group_name = var.backend_resource_group_name + key = var.state_key + use_azuread_auth = true + } +} + +locals { + # Get the current production image from state, empty if no state exists yet + current_production_image = ( + var.preserve_production_image && length(data.terraform_remote_state.current) > 0 + ? try(data.terraform_remote_state.current[0].outputs.docker_image, "") + : "" + ) + + # Effective production image: + # - If preserve mode is enabled AND state exists with an image: use the existing image + # - Otherwise: use the new docker_image from variables + effective_docker_image = ( + var.preserve_production_image && local.current_production_image != "" + ? local.current_production_image + : var.docker_image + ) +} diff --git a/azure-apps/deployment/modules/terraform.tfvars.example b/azure-apps/deployment/modules/terraform.tfvars.example new file mode 100644 index 00000000..bde9f2f0 --- /dev/null +++ b/azure-apps/deployment/modules/terraform.tfvars.example @@ -0,0 +1,183 @@ +# ============================================================================= +# EXAMPLE terraform.tfvars +# ============================================================================= +# Copy this file to terraform.tfvars and customize as needed. +# Most variables have sensible defaults - you only need to set required ones. +# ============================================================================= + +# ============================================================================= +# REQUIRED VARIABLES +# ============================================================================= + +resource_group_name = "my-app-rg" +location = "eastus" +app_name = "my-awesome-app" # Must be globally unique +docker_image = "nginx:latest" # Your Docker image + +# ============================================================================= +# OPTIONAL: Resource Tags +# ============================================================================= + +resource_tags = { + environment = "production" + project = "my-project" + team = "platform" + cost_center = "12345" +} + +# ============================================================================= +# OPTIONAL: Environment Variables (as JSON) +# ============================================================================= + +# parameter_json = <= 0 && var.staging_traffic_percent <= 100 + error_message = "staging_traffic_percent must be between 0 and 100" + } +} + +variable "promote_staging_to_production" { + description = "When true, performs a slot swap promoting staging to production. After swap, the previous production becomes staging." + type = bool + default = false +} + +# ============================================================================= +# DNS / CUSTOM DOMAIN +# ============================================================================= + +variable "enable_custom_domain" { + description = "Enable custom domain configuration" + type = bool + default = false +} + +variable "dns_zone_name" { + description = "Name of the Azure DNS zone (e.g., example.com)" + type = string + default = "" +} + +variable "dns_zone_resource_group" { + description = "Resource group containing the DNS zone" + type = string + default = "" +} + +variable "custom_subdomain" { + description = "Subdomain for the app (e.g., 'api' for api.example.com, or '@' for apex domain)" + type = string + default = "@" +} + +variable "enable_managed_certificate" { + description = "Enable free Azure-managed SSL certificate for custom domain" + type = bool + default = true +} + +# ============================================================================= +# LOGGING +# ============================================================================= + +variable "enable_logging" { + description = "Enable application and HTTP logging" + type = bool + default = true +} + +variable "application_logs_level" { + description = "Application log level (Off, Error, Warning, Information, Verbose)" + type = string + default = "Information" +} + +variable "http_logs_retention_days" { + description = "HTTP logs retention in days" + type = number + default = 7 +} + +variable "http_logs_retention_mb" { + description = "HTTP logs retention in MB" + type = number + default = 35 +} + +variable "detailed_error_messages" { + description = "Enable detailed error messages in logs" + type = bool + default = true +} + +variable "failed_request_tracing" { + description = "Enable failed request tracing" + type = bool + default = true +} + +# ============================================================================= +# APPLICATION INSIGHTS / MONITORING +# ============================================================================= + +variable "enable_application_insights" { + description = "Enable Application Insights for APM" + type = bool + default = false +} + +variable "application_insights_name" { + description = "Name for Application Insights resource (defaults to app_name-insights)" + type = string + default = "" +} + +variable "log_analytics_workspace_name" { + description = "Name for Log Analytics Workspace (defaults to app_name-logs)" + type = string + default = "" +} + +variable "log_analytics_retention_days" { + description = "Log Analytics data retention in days" + type = number + default = 30 +} + +variable "enable_diagnostic_settings" { + description = "Enable diagnostic settings to export logs to Log Analytics" + type = bool + default = false +} + +# ============================================================================= +# AUTOSCALING +# ============================================================================= + +variable "enable_autoscaling" { + description = "Enable autoscaling for the App Service Plan" + type = bool + default = false +} + +variable "autoscale_min_instances" { + description = "Minimum number of instances" + type = number + default = 1 +} + +variable "autoscale_max_instances" { + description = "Maximum number of instances" + type = number + default = 10 +} + +variable "autoscale_default_instances" { + description = "Default number of instances" + type = number + default = 2 +} + +variable "cpu_scale_out_threshold" { + description = "CPU percentage to trigger scale out" + type = number + default = 70 +} + +variable "cpu_scale_in_threshold" { + description = "CPU percentage to trigger scale in" + type = number + default = 30 +} + +variable "memory_scale_out_threshold" { + description = "Memory percentage to trigger scale out" + type = number + default = 75 +} + +variable "memory_scale_in_threshold" { + description = "Memory percentage to trigger scale in" + type = number + default = 40 +} + +variable "scale_out_cooldown" { + description = "Cooldown period after scale out (ISO 8601 duration, e.g., PT5M)" + type = string + default = "PT5M" +} + +variable "scale_in_cooldown" { + description = "Cooldown period after scale in (ISO 8601 duration, e.g., PT10M)" + type = string + default = "PT10M" +} + +variable "autoscale_notification_emails" { + description = "Email addresses to notify on autoscale events" + type = list(string) + default = [] +} + +# ============================================================================= +# AUTO-HEAL +# ============================================================================= + +variable "enable_auto_heal" { + description = "Enable auto-heal to automatically restart unhealthy instances" + type = bool + default = false +} + +variable "auto_heal_slow_request_count" { + description = "Number of slow requests to trigger auto-heal" + type = number + default = 10 +} + +variable "auto_heal_slow_request_interval" { + description = "Interval for counting slow requests (ISO 8601 duration)" + type = string + default = "00:01:00" +} + +variable "auto_heal_slow_request_time_taken" { + description = "Time threshold for a request to be considered slow (ISO 8601 duration)" + type = string + default = "00:00:30" +} + +variable "auto_heal_status_code_count" { + description = "Number of error status codes to trigger auto-heal" + type = number + default = 50 +} + +variable "auto_heal_status_code_interval" { + description = "Interval for counting error status codes (ISO 8601 duration)" + type = string + default = "00:05:00" +} + +variable "auto_heal_status_code_range" { + description = "Status code range to monitor (e.g., 500-599)" + type = string + default = "500-599" +} + +variable "auto_heal_min_process_time" { + description = "Minimum process execution time before auto-heal can trigger (ISO 8601 duration)" + type = string + default = "00:01:00" +} + +# ============================================================================= +# NETWORKING +# ============================================================================= + +variable "enable_vnet_integration" { + description = "Enable VNet integration" + type = bool + default = false +} + +variable "vnet_integration_subnet_id" { + description = "Subnet ID for VNet integration" + type = string + default = "" +} + +variable "vnet_route_all_enabled" { + description = "Route all outbound traffic through VNet" + type = bool + default = false +} + +variable "ip_restriction_default_action" { + description = "Default action for IP restrictions (Allow or Deny)" + type = string + default = "Allow" +} + +variable "ip_restrictions" { + description = "List of IP restrictions" + type = list(object({ + name = string + ip_address = optional(string) + service_tag = optional(string) + priority = number + action = string + })) + default = [] +} + +# ============================================================================= +# IDENTITY +# ============================================================================= + +variable "enable_system_identity" { + description = "Enable system-assigned managed identity" + type = bool + default = true +} + +variable "user_assigned_identity_ids" { + description = "List of user-assigned managed identity IDs" + type = list(string) + default = [] +} + +# ============================================================================= +# ALERTING +# ============================================================================= + +variable "enable_alerts" { + description = "Enable Azure Monitor alerts" + type = bool + default = false +} + +variable "alert_email_recipients" { + description = "Email addresses to receive alerts" + type = list(string) + default = [] +} + +variable "alert_http_5xx_threshold" { + description = "Threshold for HTTP 5xx alert" + type = number + default = 10 +} + +variable "alert_response_time_threshold_ms" { + description = "Threshold for response time alert in milliseconds" + type = number + default = 5000 +} + +variable "alert_cpu_percentage_threshold" { + description = "Threshold for CPU percentage alert" + type = number + default = 85 +} + +variable "alert_memory_percentage_threshold" { + description = "Threshold for memory percentage alert" + type = number + default = 85 +} + +# ============================================================================= +# BACKEND CONFIGURATION (for remote state lookup) +# ============================================================================= + +variable "backend_storage_account_name" { + description = "Storage account name for the Terraform backend (used to read current state for blue-green deployments)" + type = string + default = "" +} + +variable "backend_container_name" { + description = "Container name for the Terraform backend" + type = string + default = "" +} + +variable "backend_resource_group_name" { + description = "Resource group name for the Terraform backend storage account" + type = string + default = "" +} + +variable "state_key" { + description = "State file key/path in the backend storage" + type = string + default = "" +} diff --git a/azure-apps/deployment/scripts/azure_setup b/azure-apps/deployment/scripts/azure_setup new file mode 100755 index 00000000..f5a29620 --- /dev/null +++ b/azure-apps/deployment/scripts/azure_setup @@ -0,0 +1,130 @@ +#!/bin/bash + +echo "🔍 Validating Azure provider configuration..." + +missing_vars=() + +function validate_env_var() { + local variable_name=$1 + local variable_value="${!variable_name}" + + if [ -z "$variable_value" ]; then + echo " ❌ $variable_name is missing" + missing_vars+=("$variable_name") + else + if [[ "$(echo "$variable_name" | tr '[:upper:]' '[:lower:]')" == *secret* ]]; then + echo " ✅ $variable_name=REDACTED" + else + echo " ✅ $variable_name=$variable_value" + fi + fi +} + +# Terraform state backend configuration +validate_env_var TOFU_PROVIDER_STORAGE_ACCOUNT +validate_env_var TOFU_PROVIDER_CONTAINER +validate_env_var ARM_CLIENT_SECRET + +if [ ${#missing_vars[@]} -gt 0 ]; then + echo "" + echo " 🔧 How to fix:" + echo " Set the missing variable(s) in the nullplatform agent Helm installation:" + for var in "${missing_vars[@]}"; do + echo " • $var" + done + echo "" + exit 1 +fi + +ARM_SUBSCRIPTION_ID=$(echo "$CONTEXT" | jq -r '.providers["cloud-providers"].authentication.subscription_id // empty') +ARM_CLIENT_ID=$(echo "$CONTEXT" | jq -r '.providers["cloud-providers"].authentication.client_id // empty') +ARM_TENANT_ID=$(echo "$CONTEXT" | jq -r '.providers["cloud-providers"].authentication.tenant_id // empty') +AZURE_RESOURCE_GROUP=$(echo "$CONTEXT" | jq -r '.providers["cloud-providers"].networking.public_dns_zone_resource_group_name // empty') +AZURE_LOCATION=australiaeast + +context_missing_vars=() + +if [ -z "$ARM_SUBSCRIPTION_ID" ]; then + echo " ❌ ARM_SUBSCRIPTION_ID could not be resolved from providers" + context_missing_vars+=("subscription_id") +fi + +if [ -z "$ARM_CLIENT_ID" ]; then + echo " ❌ ARM_CLIENT_ID could not be resolved from providers" + context_missing_vars+=("client_id") +fi + +if [ -z "$ARM_TENANT_ID" ]; then + echo " ❌ ARM_TENANT_ID could not be resolved from providers" + context_missing_vars+=("tenant_id") +fi + +if [ -z "$AZURE_RESOURCE_GROUP" ]; then + echo " ❌ AZURE_RESOURCE_GROUP could not be resolved from providers" + context_missing_vars+=("public_dns_zone_resource_group_name") +fi + +if [ ${#context_missing_vars[@]} -gt 0 ]; then + echo "" + echo " 💡 Possible causes:" + echo " Verify that you have an Azure cloud provider linked to this scope." + echo " The cloud provider must include the following fields:" + for var in "${context_missing_vars[@]}"; do + echo " • $var" + done + echo "" + exit 1 +fi + +echo "✨ Azure provider configured successfully" +echo "" + +# Build TOFU_VARIABLES for terraform +RESOURCE_TAGS_JSON=${RESOURCE_TAGS_JSON:-"{}"} + +TOFU_VARIABLES=$(echo "${TOFU_VARIABLES:-"{}"}" | jq \ + --arg resource_group_name "$AZURE_RESOURCE_GROUP" \ + --arg location "$AZURE_LOCATION" \ + --argjson resource_tags "$RESOURCE_TAGS_JSON" \ + --arg backend_storage_account_name "$TOFU_PROVIDER_STORAGE_ACCOUNT" \ + --arg backend_container_name "$TOFU_PROVIDER_CONTAINER" \ + --arg backend_resource_group_name "$AZURE_RESOURCE_GROUP" \ + '. + { + resource_group_name: $resource_group_name, + location: $location, + resource_tags: $resource_tags, + backend_storage_account_name: $backend_storage_account_name, + backend_container_name: $backend_container_name, + backend_resource_group_name: $backend_resource_group_name + }') + +# Build tofu init backend config +# STATE_KEY is exported by build_context +TOFU_INIT_VARIABLES="${TOFU_INIT_VARIABLES:-""}" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=storage_account_name=$TOFU_PROVIDER_STORAGE_ACCOUNT" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=container_name=$TOFU_PROVIDER_CONTAINER" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=resource_group_name=$AZURE_RESOURCE_GROUP" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=key=$STATE_KEY" + +# Export for downstream scripts +export TOFU_VARIABLES +export TOFU_INIT_VARIABLES +export ARM_SUBSCRIPTION_ID +export ARM_CLIENT_ID +export ARM_TENANT_ID +export ARM_CLIENT_SECRET +export AZURE_RESOURCE_GROUP +export AZURE_LOCATION + +# Add modules path if available +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +module_name="${script_dir}/modules" + +if [[ -d "$module_name" ]]; then + if [[ -n $MODULES_TO_USE ]]; then + MODULES_TO_USE="$MODULES_TO_USE,$module_name" + else + MODULES_TO_USE="$module_name" + fi + export MODULES_TO_USE +fi diff --git a/azure-apps/deployment/scripts/build_context b/azure-apps/deployment/scripts/build_context new file mode 100755 index 00000000..4249ed04 --- /dev/null +++ b/azure-apps/deployment/scripts/build_context @@ -0,0 +1,153 @@ +#!/bin/bash +set -euo pipefail + +# Extract scope information from context +SCOPE_ID=$(echo "$CONTEXT" | jq -r '.scope.id') +DEPLOYMENT_ID=$(echo "$CONTEXT" | jq -r '.deployment.id') + +# Extract NRN components +eval "$(echo "$CONTEXT" | jq -r '.scope.nrn | " +export ORGANIZATION_ID=\(capture("organization=(?\\d+)").id) +export ACCOUNT_ID=\(capture("account=(?\\d+)").id) +export NAMESPACE_ID=\(capture("namespace=(?\\d+)").id) +export APPLICATION_ID=\(capture("application=(?\\d+)").id) +"')" + +# Extract slugs for naming +NAMESPACE_SLUG=$(echo "$CONTEXT" | jq -r '.namespace.slug') +APPLICATION_SLUG=$(echo "$CONTEXT" | jq -r '.application.slug') +SCOPE_SLUG=$(echo "$CONTEXT" | jq -r '.scope.slug') + +# Generate App Service name (must be globally unique) +# Format: {namespace}-{application}-{scope}-{scope_id} +# Intelligent trimming: always keep scope_id, remove full segments from left +AZURE_APP_SERVICE_NAME_MAX_LENGTH=60 +APP_NAME=$("$SERVICE_PATH/deployment/scripts/generate_resource_name" "$AZURE_APP_SERVICE_NAME_MAX_LENGTH" "$NAMESPACE_SLUG" "$APPLICATION_SLUG" "$SCOPE_SLUG" "$SCOPE_ID") + +# Extract scope capabilities (required fields from service-spec have no defaults) +MEMORY=$(echo "$CONTEXT" | jq -r '.scope.capabilities.memory') +SKU_NAME=$("$SERVICE_PATH/deployment/scripts/get_sku_from_memory" "$MEMORY") +WEBSOCKETS_ENABLED=$(echo "$CONTEXT" | jq -r '.scope.capabilities.websockets_enabled') +SCALING_TYPE=$(echo "$CONTEXT" | jq -r '.scope.capabilities.scaling_type') +FIXED_INSTANCES=$(echo "$CONTEXT" | jq -r '.scope.capabilities.fixed_instances') + +# Blue-green deployment settings from context +# enable_staging_slot: enabled via ENABLE_STAGING_SLOT env var (set by workflow) +# staging_traffic_percent: read from deployment strategy_data (can be overridden by env var) +# promote_staging_to_production: enabled via PROMOTE_STAGING_TO_PRODUCTION env var (set by finalize workflow) +# preserve_production_image: enabled via PRESERVE_PRODUCTION_IMAGE env var (for blue-green to keep current prod image) +ENABLE_STAGING_SLOT="${ENABLE_STAGING_SLOT:-false}" +STAGING_TRAFFIC_PERCENT="${STAGING_TRAFFIC_PERCENT:-$(echo "$CONTEXT" | jq -r '.deployment.strategy_data.desired_switched_traffic // 0')}" +PROMOTE_STAGING_TO_PRODUCTION="${PROMOTE_STAGING_TO_PRODUCTION:-false}" +PRESERVE_PRODUCTION_IMAGE="${PRESERVE_PRODUCTION_IMAGE:-false}" + +# State key for remote state lookup (used by preserve_production_image in azure_setup) +STATE_KEY="azure-apps/${SCOPE_ID}/terraform.tfstate" + +# Managed identity settings (required for Azure RBAC integration, e.g., Cosmos DB) +ENABLE_SYSTEM_IDENTITY="${ENABLE_SYSTEM_IDENTITY:-true}" + +# Extract autoscaling settings (no defaults - if not set, autoscaling is disabled) +AUTOSCALING_MIN=$(echo "$CONTEXT" | jq -r '.scope.capabilities.autoscaling.min_instances') +AUTOSCALING_MAX=$(echo "$CONTEXT" | jq -r '.scope.capabilities.autoscaling.max_instances') +AUTOSCALING_CPU_THRESHOLD=$(echo "$CONTEXT" | jq -r '.scope.capabilities.autoscaling.target_cpu_utilization') +AUTOSCALING_MEMORY_THRESHOLD=$(echo "$CONTEXT" | jq -r '.scope.capabilities.autoscaling.target_memory_utilization') + +# Extract health check settings (no default for path - if not set, health check is disabled in Terraform) +HEALTH_CHECK_PATH=$(echo "$CONTEXT" | jq -r '.scope.capabilities.health_check.path') +HEALTH_CHECK_EVICTION_TIME=$(echo "$CONTEXT" | jq -r '.scope.capabilities.health_check.eviction_time_in_min // 1') + +# Determine if autoscaling should be enabled +ENABLE_AUTOSCALING="false" +if [ "$SCALING_TYPE" = "auto" ]; then + ENABLE_AUTOSCALING="true" +fi + +# Extract parameters/environment variables +PARAMETER_JSON=$(echo "$CONTEXT" | jq '[(.parameters.results // [])[] | {(.variable): .values[0].value}] | add // []') + +# Set output directory +OUTPUT_DIR="$SERVICE_PATH/output/$SCOPE_ID" +if [ -n "${NP_OUTPUT_DIR:-}" ]; then + OUTPUT_DIR="$NP_OUTPUT_DIR/output/$SCOPE_ID" +fi +mkdir -p "$OUTPUT_DIR" + +# Set Terraform working directory +TF_WORKING_DIR="$OUTPUT_DIR/terraform" +mkdir -p "$TF_WORKING_DIR" + +# Build TOFU_VARIABLES JSON with all terraform configuration +# Note: Only include variables that exist in the terraform module's variables.tf +TOFU_VARIABLES=$(jq -n \ + --arg app_name "$APP_NAME" \ + --arg docker_image "$DOCKER_IMAGE" \ + --arg docker_registry_url "${DOCKER_REGISTRY_URL:-https://index.docker.io}" \ + --arg docker_registry_username "${DOCKER_REGISTRY_USERNAME:-}" \ + --arg docker_registry_password "${DOCKER_REGISTRY_PASSWORD:-}" \ + --arg sku_name "$SKU_NAME" \ + --argjson websockets_enabled "$WEBSOCKETS_ENABLED" \ + --arg health_check_path "$HEALTH_CHECK_PATH" \ + --argjson health_check_eviction_time_in_min "$HEALTH_CHECK_EVICTION_TIME" \ + --argjson enable_staging_slot "$ENABLE_STAGING_SLOT" \ + --argjson staging_traffic_percent "$STAGING_TRAFFIC_PERCENT" \ + --argjson promote_staging_to_production "$PROMOTE_STAGING_TO_PRODUCTION" \ + --argjson preserve_production_image "$PRESERVE_PRODUCTION_IMAGE" \ + --arg state_key "$STATE_KEY" \ + --argjson enable_system_identity "$ENABLE_SYSTEM_IDENTITY" \ + --argjson enable_autoscaling "$ENABLE_AUTOSCALING" \ + --argjson autoscale_min_instances "$AUTOSCALING_MIN" \ + --argjson autoscale_max_instances "$AUTOSCALING_MAX" \ + --argjson autoscale_default_instances "$FIXED_INSTANCES" \ + --argjson cpu_scale_out_threshold "$AUTOSCALING_CPU_THRESHOLD" \ + --argjson memory_scale_out_threshold "$AUTOSCALING_MEMORY_THRESHOLD" \ + --argjson parameter_json "$PARAMETER_JSON" \ + --argjson https_only "${HTTPS_ONLY:-true}" \ + --arg minimum_tls_version "${MINIMUM_TLS_VERSION:-1.2}" \ + --arg ftps_state "${FTPS_STATE:-Disabled}" \ + --argjson client_affinity_enabled "${CLIENT_AFFINITY_ENABLED:-false}" \ + --argjson enable_logging "${ENABLE_LOGGING:-true}" \ + --arg application_logs_level "${APPLICATION_LOGS_LEVEL:-Information}" \ + --argjson http_logs_retention_days "${HTTP_LOGS_RETENTION_DAYS:-7}" \ + '{ + app_name: $app_name, + docker_image: $docker_image, + docker_registry_url: $docker_registry_url, + docker_registry_username: $docker_registry_username, + docker_registry_password: $docker_registry_password, + sku_name: $sku_name, + websockets_enabled: $websockets_enabled, + health_check_path: $health_check_path, + health_check_eviction_time_in_min: $health_check_eviction_time_in_min, + enable_staging_slot: $enable_staging_slot, + staging_traffic_percent: $staging_traffic_percent, + promote_staging_to_production: $promote_staging_to_production, + preserve_production_image: $preserve_production_image, + state_key: $state_key, + enable_system_identity: $enable_system_identity, + enable_autoscaling: $enable_autoscaling, + autoscale_min_instances: $autoscale_min_instances, + autoscale_max_instances: $autoscale_max_instances, + autoscale_default_instances: $autoscale_default_instances, + cpu_scale_out_threshold: $cpu_scale_out_threshold, + memory_scale_out_threshold: $memory_scale_out_threshold, + parameter_json: ($parameter_json | tojson), + https_only: $https_only, + minimum_tls_version: $minimum_tls_version, + ftps_state: $ftps_state, + client_affinity_enabled: $client_affinity_enabled, + enable_logging: $enable_logging, + application_logs_level: $application_logs_level, + http_logs_retention_days: $http_logs_retention_days + }') + +# Export all variables +export SCOPE_ID +export DEPLOYMENT_ID +export APP_NAME +export DOCKER_IMAGE +export OUTPUT_DIR +export TF_WORKING_DIR +export TOFU_VARIABLES +export CONTEXT +export STATE_KEY \ No newline at end of file diff --git a/azure-apps/deployment/scripts/do_tofu b/azure-apps/deployment/scripts/do_tofu new file mode 100755 index 00000000..1e838f78 --- /dev/null +++ b/azure-apps/deployment/scripts/do_tofu @@ -0,0 +1,59 @@ +#!/bin/bash +set -euo pipefail + +TOFU_ACTION="${TOFU_ACTION:-apply}" + +echo "📝 Running tofu $TOFU_ACTION for deployment: $DEPLOYMENT_ID" +echo "📋 App Service: $APP_NAME" + +TOFU_SOURCE="${TOFU_PATH:-$SERVICE_PATH/deployment/modules}" + +echo "📋 Source modules: $TOFU_SOURCE" +echo "📋 Working directory: $TF_WORKING_DIR" + +# Verify source directory exists +if [ ! -d "$TOFU_SOURCE" ]; then + echo "❌ Source directory does not exist: $TOFU_SOURCE" + exit 1 +fi + +# Copy tofu files to working directory +echo "📋 Copying terraform files..." +if ls "$TOFU_SOURCE"/*.tf 1>/dev/null 2>&1; then + cp -r "$TOFU_SOURCE"/*.tf "$TF_WORKING_DIR/" + echo " ✅ Copied $(ls "$TOFU_SOURCE"/*.tf | wc -l | tr -d ' ') .tf files" +else + echo " ❌ No .tf files found in $TOFU_SOURCE" + exit 1 +fi +cp -r "$TOFU_SOURCE"/scripts "$TF_WORKING_DIR/" 2>/dev/null || true + +# Copy custom modules (provider overrides for testing) +if [ -n "${CUSTOM_TOFU_MODULES:-}" ]; then + IFS=',' read -ra modules <<< "$CUSTOM_TOFU_MODULES" + for module in "${modules[@]}"; do + if [ -d "$module" ]; then + echo "📋 Adding custom module: $module" + cp -r "$module"/*.tf "$TF_WORKING_DIR/" 2>/dev/null || true + fi + done +fi + +# Generate tfvars from TOFU_VARIABLES +TOFU_VAR_FILE="$OUTPUT_DIR/tofu.tfvars.json" +echo "$TOFU_VARIABLES" > "$TOFU_VAR_FILE" + +# Run tofu +echo "📝 Initializing tofu..." +tofu -chdir="$TF_WORKING_DIR" init -input=false $TOFU_INIT_VARIABLES + +# Blue-green deployment image preservation is handled directly in Terraform +# via the terraform_remote_state data source when preserve_production_image=true +if [ "${PRESERVE_PRODUCTION_IMAGE:-false}" = "true" ]; then + echo "🔵 Blue-green mode: Terraform will preserve current production image and deploy new image to staging" +fi + +echo "📝 Running tofu $TOFU_ACTION..." +tofu -chdir="$TF_WORKING_DIR" "$TOFU_ACTION" -auto-approve -var-file="$TOFU_VAR_FILE" + +echo "✅ Tofu $TOFU_ACTION completed successfully" diff --git a/azure-apps/deployment/scripts/docker_setup b/azure-apps/deployment/scripts/docker_setup new file mode 100755 index 00000000..f5049167 --- /dev/null +++ b/azure-apps/deployment/scripts/docker_setup @@ -0,0 +1,54 @@ +#!/bin/bash + +echo "🔍 Validating Docker registry configuration..." + +DOCKER_REGISTRY_SERVER=$(echo "$CONTEXT" | jq -r '.providers["assets-repository"].setup.server // empty') +DOCKER_REGISTRY_USERNAME=$(echo "$CONTEXT" | jq -r '.providers["assets-repository"].setup.username // empty') +DOCKER_REGISTRY_PASSWORD=$(echo "$CONTEXT" | jq -r '.providers["assets-repository"].setup.password // empty') + +context_missing_vars=() + +if [ -z "$DOCKER_REGISTRY_SERVER" ]; then + echo " ❌ DOCKER_REGISTRY_SERVER could not be resolved from providers" + context_missing_vars+=("server") +fi + +if [ -z "$DOCKER_REGISTRY_USERNAME" ]; then + echo " ❌ DOCKER_REGISTRY_USERNAME could not be resolved from providers" + context_missing_vars+=("username") +fi + +if [ -z "$DOCKER_REGISTRY_PASSWORD" ]; then + echo " ❌ DOCKER_REGISTRY_PASSWORD could not be resolved from providers" + context_missing_vars+=("password") +fi + +if [ ${#context_missing_vars[@]} -gt 0 ]; then + echo "" + echo " 💡 Possible causes:" + echo " Verify that you have a Docker server asset provider provider linked to this scope." + echo " The Docker server provider must include the following fields:" + for var in "${context_missing_vars[@]}"; do + echo " • $var" + done + echo "" + exit 1 +fi + +DOCKER_REGISTRY_URL="https://${DOCKER_REGISTRY_SERVER}" + +DOCKER_IMAGE_URL=$(echo "$CONTEXT" | jq -r '.asset.url // empty') +DOCKER_IMAGE="${DOCKER_IMAGE_URL#*/}" + +echo " ✅ DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL" +echo " ✅ DOCKER_REGISTRY_USERNAME=$DOCKER_REGISTRY_USERNAME" +echo " ✅ DOCKER_REGISTRY_PASSWORD=****" +echo " ✅ DOCKER_IMAGE=$DOCKER_IMAGE" +echo "" +echo "✨ Docker registry configured successfully" +echo "" + +export DOCKER_REGISTRY_URL +export DOCKER_REGISTRY_USERNAME +export DOCKER_REGISTRY_PASSWORD +export DOCKER_IMAGE diff --git a/azure-apps/deployment/scripts/generate_resource_name b/azure-apps/deployment/scripts/generate_resource_name new file mode 100755 index 00000000..ef2dfcf2 --- /dev/null +++ b/azure-apps/deployment/scripts/generate_resource_name @@ -0,0 +1,79 @@ +#!/bin/bash +# ============================================================================= +# Generate Resource Name +# +# Generates a resource name from segments with intelligent truncation. +# The ID (last segment) is always preserved; if the name exceeds max length, +# full segments are removed from the left until it fits. +# +# Usage: +# ./generate_resource_name ... +# +# Example: +# ./generate_resource_name 60 "$NAMESPACE_SLUG" "$APPLICATION_SLUG" "$SCOPE_SLUG" "$SCOPE_ID" +# # Output: namespace-application-scope-123 +# # Or if too long: application-scope-123 (removes leftmost segments first) +# +# Arguments: +# max_length Maximum allowed length for the final name +# segments One or more name segments (last one is the ID, always preserved) +# +# Output: +# Prints the generated name to stdout +# ============================================================================= + +set -euo pipefail + +if [ $# -lt 2 ]; then + echo "Usage: $0 [segment2] ... " >&2 + echo "Example: $0 60 namespace app scope 123" >&2 + exit 1 +fi + +max_length="$1" +shift + +# Collect all segments +segments=("$@") +segment_count=${#segments[@]} + +# Last segment is the ID - always preserved +id="${segments[$((segment_count - 1))]}" +suffix="-${id}" +suffix_length=${#suffix} +max_prefix_length=$((max_length - suffix_length)) + +# Build prefix from all segments except the ID +prefix="" +for ((i = 0; i < segment_count - 1; i++)); do + if [ -n "$prefix" ]; then + prefix="${prefix}-${segments[$i]}" + else + prefix="${segments[$i]}" + fi +done + +# If no prefix segments, just return the ID +if [ -z "$prefix" ]; then + echo "$id" + exit 0 +fi + +# Trim from the left, removing full segments until it fits +while [ ${#prefix} -gt $max_prefix_length ]; do + if [[ "$prefix" == *-* ]]; then + # Remove the first segment (up to and including the first hyphen) + prefix="${prefix#*-}" + else + # No more hyphens, truncate at max length (last resort) + prefix="${prefix:0:$max_prefix_length}" + break + fi +done + +# Handle edge case: prefix might be empty after trimming +if [ -z "$prefix" ]; then + echo "$id" +else + echo "${prefix}${suffix}" +fi diff --git a/azure-apps/deployment/scripts/get_sku_from_memory b/azure-apps/deployment/scripts/get_sku_from_memory new file mode 100755 index 00000000..5fb56c1a --- /dev/null +++ b/azure-apps/deployment/scripts/get_sku_from_memory @@ -0,0 +1,71 @@ +#!/bin/bash +# ============================================================================= +# Get SKU from Memory +# +# Maps a memory value (in GB) to the appropriate Azure App Service SKU. +# This provides a developer-friendly interface - developers specify memory +# requirements without needing to know Azure-specific SKU names. +# +# Usage: +# ./get_sku_from_memory +# +# Arguments: +# memory_gb Memory requirement in GB (valid values: 2, 4, 8, 16, 32) +# +# Output: +# Prints the Azure SKU name to stdout +# +# Memory to SKU mapping: +# 2 GB -> S1 (Standard, 1.75 GB RAM, 1 vCPU) +# 4 GB -> S2 (Standard, 3.5 GB RAM, 2 vCPU) +# 8 GB -> P1v3 (Premium, 8 GB RAM, 2 vCPU) +# 16 GB -> P2v3 (Premium, 16 GB RAM, 4 vCPU) +# 32 GB -> P3v3 (Premium, 32 GB RAM, 8 vCPU) +# ============================================================================= + +set -euo pipefail + +if [ $# -ne 1 ]; then + echo "❌ Missing required argument: memory" >&2 + echo "" >&2 + echo "Usage: $0 " >&2 + echo "Valid memory values: 2, 4, 8, 16, 32" >&2 + exit 1 +fi + +memory="$1" + +case "$memory" in + 1) + echo "F1" + ;; + 2) + echo "S1" + ;; + 4) + echo "S2" + ;; + 8) + echo "P1v3" + ;; + 16) + echo "P2v3" + ;; + 32) + echo "P3v3" + ;; + *) + echo "❌ Invalid memory value: $memory" >&2 + echo "" >&2 + echo "💡 Valid memory values are: 2, 4, 8, 16, 32 (in GB)" >&2 + echo "" >&2 + echo "🔧 How to fix:" >&2 + echo " Choose one of the supported memory configurations:" >&2 + echo " • 2 GB - Standard tier (S1)" >&2 + echo " • 4 GB - Standard tier (S2)" >&2 + echo " • 8 GB - Premium tier (P1v3)" >&2 + echo " • 16 GB - Premium tier (P2v3)" >&2 + echo " • 32 GB - Premium tier (P3v3)" >&2 + exit 1 + ;; +esac diff --git a/azure-apps/deployment/scripts/resolve_azure_context b/azure-apps/deployment/scripts/resolve_azure_context new file mode 100755 index 00000000..52b3c522 --- /dev/null +++ b/azure-apps/deployment/scripts/resolve_azure_context @@ -0,0 +1,63 @@ +#!/bin/bash +# Shared helper to resolve Azure context from slugs and provider +# +# Required inputs (must be set before sourcing): +# SCOPE_ID - The scope ID +# SCOPE_SLUG - The scope slug +# NS_SLUG - The namespace slug +# APP_SLUG - The application slug +# SCOPE_NRN - The scope NRN (for provider lookup) +# SERVICE_PATH - Path to azure-apps root +# ARM_CLIENT_SECRET - Azure client secret (from environment) +# +# Exports: +# APP_NAME - Generated Azure App Service name +# ARM_SUBSCRIPTION_ID - Azure subscription ID +# ARM_CLIENT_ID - Azure client ID +# ARM_TENANT_ID - Azure tenant ID +# AZURE_RESOURCE_GROUP - Azure resource group +# AZURE_ACCESS_TOKEN - Azure OAuth access token + +set -euo pipefail + +# Validate required inputs +if [ -z "${SCOPE_ID:-}" ]; then + echo "Error: Missing required parameter: scope_id" >&2 + exit 1 +fi + +if [ -z "${SCOPE_SLUG:-}" ] || [ -z "${NS_SLUG:-}" ] || [ -z "${APP_SLUG:-}" ]; then + echo "Error: Could not extract slugs from context" >&2 + exit 1 +fi + +# Generate Azure App Service name +APP_NAME=$("$SERVICE_PATH/deployment/scripts/generate_resource_name" 60 "$NS_SLUG" "$APP_SLUG" "$SCOPE_SLUG" "$SCOPE_ID") +export APP_NAME + +# Fetch Azure credentials from cloud provider with single jq call +PROVIDERS=$(np provider list --categories cloud-providers --nrn "$SCOPE_NRN" --format json) + +eval "$(echo "$PROVIDERS" | jq -r '.results[0] | + "export ARM_SUBSCRIPTION_ID=" + (.attributes.authentication.subscription_id // "" | @sh) + "\n" + + "export ARM_CLIENT_ID=" + (.attributes.authentication.client_id // "" | @sh) + "\n" + + "export ARM_TENANT_ID=" + (.attributes.authentication.tenant_id // "" | @sh) + "\n" + + "export AZURE_RESOURCE_GROUP=" + (.attributes.networking.public_dns_zone_resource_group_name // "" | @sh) +')" + +if [ -z "$ARM_SUBSCRIPTION_ID" ] || [ -z "$ARM_CLIENT_ID" ] || [ -z "$ARM_TENANT_ID" ]; then + echo "Error: Could not resolve Azure credentials from cloud-providers provider" >&2 + exit 1 +fi + +# Get Azure access token via REST API (faster than az CLI) +TOKEN_RESPONSE=$(curl -s -X POST \ + "https://login.microsoftonline.com/${ARM_TENANT_ID}/oauth2/v2.0/token" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "client_id=${ARM_CLIENT_ID}&client_secret=${ARM_CLIENT_SECRET}&scope=https://management.azure.com/.default&grant_type=client_credentials") +export AZURE_ACCESS_TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.access_token // empty') + +if [ -z "$AZURE_ACCESS_TOKEN" ]; then + echo "Error: Failed to get Azure access token" >&2 + exit 1 +fi diff --git a/azure-apps/deployment/scripts/traffic_management b/azure-apps/deployment/scripts/traffic_management new file mode 100644 index 00000000..3b67b657 --- /dev/null +++ b/azure-apps/deployment/scripts/traffic_management @@ -0,0 +1,43 @@ +#!/bin/bash +# ============================================================================= +# Traffic Management Script for Azure App Service Deployment Slots +# ============================================================================= +# Sets traffic distribution between production and staging slots based on +# STAGING_TRAFFIC_PERCENT from build_context. +# +# Prerequisites (sourced before this script): +# - azure_setup: exports AZURE_RESOURCE_GROUP, ARM_CLIENT_ID, +# ARM_CLIENT_SECRET, ARM_TENANT_ID, ARM_SUBSCRIPTION_ID +# - build_context: sets APP_NAME, STAGING_TRAFFIC_PERCENT +# ============================================================================= + +set -euo pipefail + +STAGING_TRAFFIC_PERCENT="${STAGING_TRAFFIC_PERCENT:-0}" + +echo "🔀 Configuring traffic routing..." + +# Login to Azure using service principal credentials from azure_setup +az login --service-principal \ + --username "$ARM_CLIENT_ID" \ + --password "$ARM_CLIENT_SECRET" \ + --tenant "$ARM_TENANT_ID" \ + --output none + +az account set --subscription "$ARM_SUBSCRIPTION_ID" + +if [ "$STAGING_TRAFFIC_PERCENT" -eq 0 ]; then + echo " Clearing traffic routing for $APP_NAME (100% to production)" + az webapp traffic-routing clear \ + --resource-group "$AZURE_RESOURCE_GROUP" \ + --name "$APP_NAME" +else + PRODUCTION_PERCENT=$((100 - STAGING_TRAFFIC_PERCENT)) + echo " Setting traffic for $APP_NAME: production=${PRODUCTION_PERCENT}% staging=${STAGING_TRAFFIC_PERCENT}%" + az webapp traffic-routing set \ + --resource-group "$AZURE_RESOURCE_GROUP" \ + --name "$APP_NAME" \ + --distribution staging="$STAGING_TRAFFIC_PERCENT" +fi + +echo "✅ Traffic routing updated successfully" diff --git a/azure-apps/deployment/tests/integration/mocks/azure-provider/category.json b/azure-apps/deployment/tests/integration/mocks/azure-provider/category.json new file mode 100644 index 00000000..45ec79b1 --- /dev/null +++ b/azure-apps/deployment/tests/integration/mocks/azure-provider/category.json @@ -0,0 +1,12 @@ +{ + "status": 200, + "body": { + "results": [ + { + "id": "cloud-provider-id", + "slug": "cloud-provider", + "name": "Cloud provider" + } + ] + } +} diff --git a/azure-apps/deployment/tests/integration/mocks/azure-provider/get_provider.json b/azure-apps/deployment/tests/integration/mocks/azure-provider/get_provider.json new file mode 100644 index 00000000..4a483b6d --- /dev/null +++ b/azure-apps/deployment/tests/integration/mocks/azure-provider/get_provider.json @@ -0,0 +1,18 @@ +{ + "status": 200, + "body": { + "id": "azure-id", + "specification_id": "azure-spec-id", + "category": "cloud-provider-id", + "attributes": { + "authentication": { + "subscription_id": "mock-subscription-id", + "client_id": "mock-client-id", + "tenant_id": "mock-tenant-id" + }, + "networking": { + "public_dns_zone_resource_group_name": "test-resource-group" + } + } + } +} diff --git a/azure-apps/deployment/tests/integration/mocks/azure-provider/list_provider.json b/azure-apps/deployment/tests/integration/mocks/azure-provider/list_provider.json new file mode 100644 index 00000000..142de420 --- /dev/null +++ b/azure-apps/deployment/tests/integration/mocks/azure-provider/list_provider.json @@ -0,0 +1,18 @@ +{ + "status": 200, + "body": { + "results": [ + { + "category": "cloud-provider", + "created_at": "2026-01-07T16:28:17.036Z", + "dimensions": {}, + "groups": [], + "id": "azure-id", + "nrn": "organization=1:account=2", + "specification_id": "azure-spec-id", + "tags": [], + "updated_at": "2026-01-07T16:28:17.036Z" + } + ] + } +} \ No newline at end of file diff --git a/azure-apps/deployment/tests/integration/mocks/azure-provider/list_provider_spec.json b/azure-apps/deployment/tests/integration/mocks/azure-provider/list_provider_spec.json new file mode 100644 index 00000000..17f7b951 --- /dev/null +++ b/azure-apps/deployment/tests/integration/mocks/azure-provider/list_provider_spec.json @@ -0,0 +1,14 @@ +{ + "status": 200, + "body": { + "results": [ + { + "id": "azure-spec-id", + "slug": "azure", + "categories": [ + {"slug": "cloud-provider"} + ] + } + ] + } +} \ No newline at end of file diff --git a/azure-apps/deployment/tests/integration/mocks/scope/patch.json b/azure-apps/deployment/tests/integration/mocks/scope/patch.json new file mode 100644 index 00000000..5550c6db --- /dev/null +++ b/azure-apps/deployment/tests/integration/mocks/scope/patch.json @@ -0,0 +1,3 @@ +{ + "success": true +} diff --git a/azure-apps/deployment/tests/integration/resources/context_integration.json b/azure-apps/deployment/tests/integration/resources/context_integration.json new file mode 100644 index 00000000..c466e8c4 --- /dev/null +++ b/azure-apps/deployment/tests/integration/resources/context_integration.json @@ -0,0 +1,197 @@ +{ + "account": { + "created_at": "2023-01-31T21:53:32.597Z", + "id": 2, + "metadata": {}, + "name": "Playground", + "nrn": "organization=1:account=2", + "organization_id": 1, + "repository_prefix": "playground-repos", + "repository_provider": "github", + "settings": {}, + "slug": "playground", + "status": "active", + "updated_at": "2023-01-31T21:53:32.597Z" + }, + "application": { + "auto_deploy_on_creation": false, + "created_at": "2025-10-07T03:22:21.385Z", + "id": 4, + "is_mono_repo": false, + "messages": [], + "metadata": {}, + "name": "Automation", + "namespace_id": 3, + "nrn": "organization=1:account=2:namespace=3:application=4", + "repository_app_path": null, + "repository_url": "https://github.com/playground-repos/tools-automation", + "settings": {}, + "slug": "automation", + "status": "active", + "tags": {}, + "template_id": 1037172878, + "updated_at": "2025-10-07T03:22:30.695Z" + }, + "asset": { + "id": 6, + "build_id": 612605537, + "name": "main", + "type": "docker", + "url": "myregistry.azurecr.io/tools/automation:v1.0.0", + "platform": "linux/amd64", + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:build=5:asset=6" + }, + "deployment": { + "created_at": "2025-12-22T18:27:54.701Z", + "created_by": 123456789, + "deployment_group_id": null, + "deployment_token": "dep-token", + "expires_at": null, + "external_strategy_id": 10, + "id": 8, + "messages": [], + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7:deployment=8", + "release_id": 9, + "scope_id": 7, + "status": "creating", + "status_in_scope": "inactive", + "status_started_at": { + "creating": "2025-12-22T18:27:54.629Z" + }, + "strategy": "initial", + "strategy_data": { + "desired_switched_traffic": 0, + "parameters": { + "metrics": { + "enabled": false, + "rules": [] + }, + "traffic": { + "enable_auto_switch": false, + "interval": 10, + "step": "0.1" + } + } + }, + "updated_at": "2025-12-23T13:22:06.345Z", + "updated_by": null + }, + "namespace": { + "account_id": 2, + "created_at": "2025-05-15T21:34:40.725Z", + "id": 3, + "metadata": {}, + "name": "Tools", + "nrn": "organization=1:account=2:namespace=3", + "slug": "tools", + "status": "active", + "updated_at": "2025-05-15T21:34:40.725Z" + }, + "parameters": { + "results": [ + { + "destination_path": null, + "id": 10, + "name": "DATABASE_URL", + "type": "environment", + "values": [ + { + "id": "11", + "value": "postgres://localhost:5432/db" + } + ], + "variable": "DATABASE_URL", + "version_id": 12 + }, + { + "destination_path": null, + "id": 13, + "name": "LOG_LEVEL", + "type": "environment", + "values": [ + { + "id": "14", + "value": "info" + } + ], + "variable": "LOG_LEVEL", + "version_id": 15 + } + ] + }, + "providers": { + "cloud-providers": { + "authentication": { + "subscription_id": "mock-subscription-id", + "client_id": "mock-client-id", + "tenant_id": "mock-tenant-id" + }, + "networking": { + "public_dns_zone_resource_group_name": "test-resource-group" + } + }, + "assets-repository": { + "setup": { + "password": "mock-registry-password", + "path": "mock-path", + "server": "mockregistry.azurecr.io", + "use_namespace": false, + "username": "mock-registry-user" + } + } + }, + "release": { + "application_id": 4, + "build_id": 5, + "created_at": "2025-12-12T13:07:27.435Z", + "id": 9, + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:release=9", + "semver": "v1.0.0", + "status": "active", + "updated_at": "2025-12-12T13:07:27.702Z" + }, + "scope": { + "application_id": 4, + "asset_name": "main", + "capabilities": { + "memory": 4, + "websockets_enabled": false, + "scaling_type": "fixed", + "fixed_instances": 1, + "health_check": { + "path": "/healthz", + "eviction_time_in_min": 5 + } + }, + "created_at": "2025-12-22T18:27:04.949Z", + "dimensions": { + "country": "argentina", + "environment": "development" + }, + "domain": "", + "domains": [], + "external_created": false, + "id": 7, + "instance_id": "some-instance-id", + "messages": [], + "metadata": {}, + "name": "Development tools", + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7", + "profiles": [ + "environment_development", + "environment_development_country_argentina" + ], + "provider": "scope-type-id", + "requested_spec": {}, + "runtime_configurations": [], + "slug": "development-tools", + "status": "active", + "tags": [], + "tier": "important", + "type": "custom", + "updated_at": "2025-12-29T18:25:55.908Z" + } +} diff --git a/azure-apps/deployment/tests/integration/resources/context_integration_blue_green.json b/azure-apps/deployment/tests/integration/resources/context_integration_blue_green.json new file mode 100644 index 00000000..519d9087 --- /dev/null +++ b/azure-apps/deployment/tests/integration/resources/context_integration_blue_green.json @@ -0,0 +1,197 @@ +{ + "account": { + "created_at": "2023-01-31T21:53:32.597Z", + "id": 2, + "metadata": {}, + "name": "Playground", + "nrn": "organization=1:account=2", + "organization_id": 1, + "repository_prefix": "playground-repos", + "repository_provider": "github", + "settings": {}, + "slug": "playground", + "status": "active", + "updated_at": "2023-01-31T21:53:32.597Z" + }, + "application": { + "auto_deploy_on_creation": false, + "created_at": "2025-10-07T03:22:21.385Z", + "id": 4, + "is_mono_repo": false, + "messages": [], + "metadata": {}, + "name": "Automation", + "namespace_id": 3, + "nrn": "organization=1:account=2:namespace=3:application=4", + "repository_app_path": null, + "repository_url": "https://github.com/playground-repos/tools-automation", + "settings": {}, + "slug": "automation", + "status": "active", + "tags": {}, + "template_id": 1037172878, + "updated_at": "2025-10-07T03:22:30.695Z" + }, + "asset": { + "id": 6, + "build_id": 612605537, + "name": "main", + "type": "docker", + "url": "myregistry.azurecr.io/tools/automation:v2.0.0", + "platform": "linux/amd64", + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:build=5:asset=6" + }, + "deployment": { + "created_at": "2025-12-22T18:27:54.701Z", + "created_by": 123456789, + "deployment_group_id": null, + "deployment_token": "dep-token", + "expires_at": null, + "external_strategy_id": 10, + "id": 8, + "messages": [], + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7:deployment=8", + "release_id": 9, + "scope_id": 7, + "status": "creating", + "status_in_scope": "inactive", + "status_started_at": { + "creating": "2025-12-22T18:27:54.629Z" + }, + "strategy": "initial", + "strategy_data": { + "desired_switched_traffic": 0, + "parameters": { + "metrics": { + "enabled": false, + "rules": [] + }, + "traffic": { + "enable_auto_switch": false, + "interval": 10, + "step": "0.1" + } + } + }, + "updated_at": "2025-12-23T13:22:06.345Z", + "updated_by": null + }, + "namespace": { + "account_id": 2, + "created_at": "2025-05-15T21:34:40.725Z", + "id": 3, + "metadata": {}, + "name": "Tools", + "nrn": "organization=1:account=2:namespace=3", + "slug": "tools", + "status": "active", + "updated_at": "2025-05-15T21:34:40.725Z" + }, + "parameters": { + "results": [ + { + "destination_path": null, + "id": 10, + "name": "DATABASE_URL", + "type": "environment", + "values": [ + { + "id": "11", + "value": "postgres://localhost:5432/db" + } + ], + "variable": "DATABASE_URL", + "version_id": 12 + }, + { + "destination_path": null, + "id": 13, + "name": "LOG_LEVEL", + "type": "environment", + "values": [ + { + "id": "14", + "value": "info" + } + ], + "variable": "LOG_LEVEL", + "version_id": 15 + } + ] + }, + "providers": { + "cloud-providers": { + "authentication": { + "subscription_id": "mock-subscription-id", + "client_id": "mock-client-id", + "tenant_id": "mock-tenant-id" + }, + "networking": { + "public_dns_zone_resource_group_name": "test-resource-group" + } + }, + "assets-repository": { + "setup": { + "password": "mock-registry-password", + "path": "mock-path", + "server": "mockregistry.azurecr.io", + "use_namespace": false, + "username": "mock-registry-user" + } + } + }, + "release": { + "application_id": 4, + "build_id": 5, + "created_at": "2025-12-12T13:07:27.435Z", + "id": 9, + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:release=9", + "semver": "v1.0.0", + "status": "active", + "updated_at": "2025-12-12T13:07:27.702Z" + }, + "scope": { + "application_id": 4, + "asset_name": "main", + "capabilities": { + "memory": 4, + "websockets_enabled": false, + "scaling_type": "fixed", + "fixed_instances": 1, + "health_check": { + "path": "/healthz", + "eviction_time_in_min": 5 + } + }, + "created_at": "2025-12-22T18:27:04.949Z", + "dimensions": { + "country": "argentina", + "environment": "development" + }, + "domain": "", + "domains": [], + "external_created": false, + "id": 7, + "instance_id": "some-instance-id", + "messages": [], + "metadata": {}, + "name": "Development tools", + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7", + "profiles": [ + "environment_development", + "environment_development_country_argentina" + ], + "provider": "scope-type-id", + "requested_spec": {}, + "runtime_configurations": [], + "slug": "development-tools", + "status": "active", + "tags": [], + "tier": "important", + "type": "custom", + "updated_at": "2025-12-29T18:25:55.908Z" + } +} diff --git a/azure-apps/deployment/tests/integration/test_cases/azure_app_service/app_service_assertions.bash b/azure-apps/deployment/tests/integration/test_cases/azure_app_service/app_service_assertions.bash new file mode 100644 index 00000000..00bfbcd0 --- /dev/null +++ b/azure-apps/deployment/tests/integration/test_cases/azure_app_service/app_service_assertions.bash @@ -0,0 +1,531 @@ +# ============================================================================= +# App Service Assertions +# Custom assertions for verifying Azure App Service resources in integration tests +# ============================================================================= + +# ----------------------------------------------------------------------------- +# assert_service_plan_exists +# Verify that an App Service Plan exists with the expected SKU +# +# Arguments: +# $1 - plan_name: Name of the App Service Plan +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# $4 - expected_sku: Expected SKU name (optional, e.g., "S1", "P1v3") +# ----------------------------------------------------------------------------- +assert_service_plan_exists() { + local plan_name=$1 + local subscription_id=$2 + local resource_group=$3 + local expected_sku=${4:-} + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/serverfarms/${plan_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "FAIL: App Service Plan '$plan_name' not found" + echo "Response: $response" + return 1 + fi + + if [[ "$actual_name" != "$plan_name" ]]; then + echo "FAIL: Expected App Service Plan name '$plan_name', got '$actual_name'" + return 1 + fi + + # Check SKU if provided + if [[ -n "$expected_sku" ]]; then + local actual_sku + actual_sku=$(echo "$response" | jq -r '.sku.name // empty') + if [[ "$actual_sku" != "$expected_sku" ]]; then + echo "FAIL: Expected SKU '$expected_sku', got '$actual_sku'" + return 1 + fi + fi + + echo "PASS: App Service Plan '$plan_name' exists" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_service_plan_not_exists +# Verify that an App Service Plan does NOT exist +# +# Arguments: +# $1 - plan_name: Name of the App Service Plan +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_service_plan_not_exists() { + local plan_name=$1 + local subscription_id=$2 + local resource_group=$3 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/serverfarms/${plan_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local error_code + error_code=$(echo "$response" | jq -r '.error.code // empty') + + if [[ "$error_code" == "ResourceNotFound" ]]; then + echo "PASS: App Service Plan '$plan_name' does not exist (as expected)" + return 0 + fi + + # Check if the name field is empty or null (also indicates not found) + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "PASS: App Service Plan '$plan_name' does not exist (as expected)" + return 0 + fi + + echo "FAIL: App Service Plan '$plan_name' still exists" + echo "Response: $response" + return 1 +} + +# ----------------------------------------------------------------------------- +# assert_web_app_exists +# Verify that a Linux Web App exists +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_web_app_exists() { + local app_name=$1 + local subscription_id=$2 + local resource_group=$3 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/sites/${app_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "FAIL: Web App '$app_name' not found" + echo "Response: $response" + return 1 + fi + + if [[ "$actual_name" != "$app_name" ]]; then + echo "FAIL: Expected Web App name '$app_name', got '$actual_name'" + return 1 + fi + + echo "PASS: Web App '$app_name' exists" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_web_app_not_exists +# Verify that a Linux Web App does NOT exist +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_web_app_not_exists() { + local app_name=$1 + local subscription_id=$2 + local resource_group=$3 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/sites/${app_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local error_code + error_code=$(echo "$response" | jq -r '.error.code // empty') + + if [[ "$error_code" == "ResourceNotFound" ]]; then + echo "PASS: Web App '$app_name' does not exist (as expected)" + return 0 + fi + + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "PASS: Web App '$app_name' does not exist (as expected)" + return 0 + fi + + echo "FAIL: Web App '$app_name' still exists" + echo "Response: $response" + return 1 +} + +# ----------------------------------------------------------------------------- +# assert_azure_app_service_configured +# Comprehensive assertion that verifies both the App Service Plan and Web App exist +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# $4 - expected_sku: Expected SKU name (optional) +# ----------------------------------------------------------------------------- +assert_azure_app_service_configured() { + local app_name=$1 + local subscription_id=$2 + local resource_group=$3 + local expected_sku=${4:-} + + local plan_name="${app_name}-plan" + + echo "Verifying App Service configuration..." + + # Check App Service Plan + if ! assert_service_plan_exists "$plan_name" "$subscription_id" "$resource_group" "$expected_sku"; then + return 1 + fi + + # Check Web App + if ! assert_web_app_exists "$app_name" "$subscription_id" "$resource_group"; then + return 1 + fi + + echo "PASS: Azure App Service '$app_name' is fully configured" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_azure_app_service_not_configured +# Comprehensive assertion that verifies both the App Service Plan and Web App are removed +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_azure_app_service_not_configured() { + local app_name=$1 + local subscription_id=$2 + local resource_group=$3 + + local plan_name="${app_name}-plan" + + echo "Verifying App Service resources are removed..." + + # Check Web App is removed + if ! assert_web_app_not_exists "$app_name" "$subscription_id" "$resource_group"; then + return 1 + fi + + # Check App Service Plan is removed + if ! assert_service_plan_not_exists "$plan_name" "$subscription_id" "$resource_group"; then + return 1 + fi + + echo "PASS: Azure App Service '$app_name' is fully removed" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_log_analytics_exists +# Verify that a Log Analytics Workspace exists +# +# Arguments: +# $1 - workspace_name: Name of the Log Analytics Workspace +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_log_analytics_exists() { + local workspace_name=$1 + local subscription_id=$2 + local resource_group=$3 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.OperationalInsights/workspaces/${workspace_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "FAIL: Log Analytics Workspace '$workspace_name' not found" + return 1 + fi + + echo "PASS: Log Analytics Workspace '$workspace_name' exists" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_app_insights_exists +# Verify that Application Insights exists +# +# Arguments: +# $1 - insights_name: Name of the Application Insights +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_app_insights_exists() { + local insights_name=$1 + local subscription_id=$2 + local resource_group=$3 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Insights/components/${insights_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "FAIL: Application Insights '$insights_name' not found" + return 1 + fi + + echo "PASS: Application Insights '$insights_name' exists" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_deployment_slot_exists +# Verify that a deployment slot exists for a Web App +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - slot_name: Name of the deployment slot +# $3 - subscription_id: Azure subscription ID +# $4 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_deployment_slot_exists() { + local app_name=$1 + local slot_name=$2 + local subscription_id=$3 + local resource_group=$4 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/sites/${app_name}/slots/${slot_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "FAIL: Deployment slot '$slot_name' not found for app '$app_name'" + echo "Response: $response" + return 1 + fi + + # The slot name in the response includes the app name (e.g., "myapp/staging") + local expected_full_name="${app_name}/${slot_name}" + if [[ "$actual_name" != "$slot_name" && "$actual_name" != "$expected_full_name" ]]; then + echo "FAIL: Expected slot name '$slot_name', got '$actual_name'" + return 1 + fi + + echo "PASS: Deployment slot '$slot_name' exists for app '$app_name'" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_deployment_slot_not_exists +# Verify that a deployment slot does NOT exist for a Web App +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - slot_name: Name of the deployment slot +# $3 - subscription_id: Azure subscription ID +# $4 - resource_group: Azure resource group name +# ----------------------------------------------------------------------------- +assert_deployment_slot_not_exists() { + local app_name=$1 + local slot_name=$2 + local subscription_id=$3 + local resource_group=$4 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/sites/${app_name}/slots/${slot_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + local error_code + error_code=$(echo "$response" | jq -r '.error.code // empty') + + if [[ "$error_code" == "ResourceNotFound" ]]; then + echo "PASS: Deployment slot '$slot_name' does not exist (as expected)" + return 0 + fi + + local actual_name + actual_name=$(echo "$response" | jq -r '.name // empty') + if [[ -z "$actual_name" || "$actual_name" == "null" ]]; then + echo "PASS: Deployment slot '$slot_name' does not exist (as expected)" + return 0 + fi + + echo "FAIL: Deployment slot '$slot_name' still exists for app '$app_name'" + echo "Response: $response" + return 1 +} + +# ----------------------------------------------------------------------------- +# assert_azure_app_service_with_slot_configured +# Comprehensive assertion that verifies App Service Plan, Web App, and staging slot exist +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# $4 - expected_sku: Expected SKU name (optional) +# $5 - slot_name: Name of the deployment slot (default: "staging") +# ----------------------------------------------------------------------------- +assert_azure_app_service_with_slot_configured() { + local app_name=$1 + local subscription_id=$2 + local resource_group=$3 + local expected_sku=${4:-} + local slot_name=${5:-staging} + + echo "Verifying App Service configuration with staging slot..." + + # First check base App Service configuration + if ! assert_azure_app_service_configured "$app_name" "$subscription_id" "$resource_group" "$expected_sku"; then + return 1 + fi + + # Check deployment slot + if ! assert_deployment_slot_exists "$app_name" "$slot_name" "$subscription_id" "$resource_group"; then + return 1 + fi + + echo "PASS: Azure App Service '$app_name' with slot '$slot_name' is fully configured" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_web_app_docker_image +# Verify that a Web App is configured with the expected docker image +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# $4 - expected_image: Expected docker image (e.g., "myregistry.azurecr.io/app:v1.0.0") +# ----------------------------------------------------------------------------- +assert_web_app_docker_image() { + local app_name=$1 + local subscription_id=$2 + local resource_group=$3 + local expected_image=$4 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/sites/${app_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + # The docker image is in siteConfig.linuxFxVersion as "DOCKER|image:tag" + local linux_fx_version + linux_fx_version=$(echo "$response" | jq -r '.properties.siteConfig.linuxFxVersion // empty') + + if [[ -z "$linux_fx_version" || "$linux_fx_version" == "null" ]]; then + echo "FAIL: Web App '$app_name' has no linuxFxVersion configured" + echo "Response: $response" + return 1 + fi + + # Extract the image from DOCKER|image format + local actual_image + actual_image="${linux_fx_version#DOCKER|}" + + if [[ "$actual_image" != "$expected_image" ]]; then + echo "FAIL: Expected docker image '$expected_image', got '$actual_image'" + return 1 + fi + + echo "PASS: Web App '$app_name' has docker image '$expected_image'" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_deployment_slot_docker_image +# Verify that a deployment slot is configured with the expected docker image +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - slot_name: Name of the deployment slot +# $3 - subscription_id: Azure subscription ID +# $4 - resource_group: Azure resource group name +# $5 - expected_image: Expected docker image (e.g., "myregistry.azurecr.io/app:v1.0.0") +# ----------------------------------------------------------------------------- +assert_deployment_slot_docker_image() { + local app_name=$1 + local slot_name=$2 + local subscription_id=$3 + local resource_group=$4 + local expected_image=$5 + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/sites/${app_name}/slots/${slot_name}" + local response + response=$(azure_mock "$path" 2>/dev/null) + + # The docker image is in siteConfig.linuxFxVersion as "DOCKER|image:tag" + local linux_fx_version + linux_fx_version=$(echo "$response" | jq -r '.properties.siteConfig.linuxFxVersion // empty') + + if [[ -z "$linux_fx_version" || "$linux_fx_version" == "null" ]]; then + echo "FAIL: Deployment slot '$slot_name' has no linuxFxVersion configured" + echo "Response: $response" + return 1 + fi + + # Extract the image from DOCKER|image format + local actual_image + actual_image="${linux_fx_version#DOCKER|}" + + if [[ "$actual_image" != "$expected_image" ]]; then + echo "FAIL: Expected docker image '$expected_image' for slot '$slot_name', got '$actual_image'" + return 1 + fi + + echo "PASS: Deployment slot '$slot_name' has docker image '$expected_image'" + return 0 +} + +# ----------------------------------------------------------------------------- +# assert_web_app_app_settings_contain +# Verify that a Web App's app_settings contain expected key-value pairs +# +# Arguments: +# $1 - app_name: Name of the Web App +# $2 - subscription_id: Azure subscription ID +# $3 - resource_group: Azure resource group name +# $4 - expected_keys: Space-separated list of setting keys that must exist +# ----------------------------------------------------------------------------- +assert_web_app_app_settings_contain() { + local app_name=$1 + local subscription_id=$2 + local resource_group=$3 + shift 3 + local expected_keys=("$@") + + local path="/subscriptions/${subscription_id}/resourceGroups/${resource_group}/providers/Microsoft.Web/sites/${app_name}/config/appSettings/list" + local response + response=$(curl -s -X POST "${AZURE_MOCK_ENDPOINT}${path}" 2>/dev/null) + + local properties + properties=$(echo "$response" | jq '.properties // {}') + + for key in "${expected_keys[@]}"; do + local value + value=$(echo "$properties" | jq -r --arg k "$key" '.[$k] // empty') + if [[ -z "$value" ]]; then + echo "FAIL: app_settings missing key '$key'" + echo "Available keys: $(echo "$properties" | jq -r 'keys | join(", ")')" + return 1 + fi + echo "PASS: app_settings contains '$key'='$value'" + done + + return 0 +} diff --git a/azure-apps/deployment/tests/integration/test_cases/azure_app_service/blue_green_test.bats b/azure-apps/deployment/tests/integration/test_cases/azure_app_service/blue_green_test.bats new file mode 100644 index 00000000..fc1c8ef4 --- /dev/null +++ b/azure-apps/deployment/tests/integration/test_cases/azure_app_service/blue_green_test.bats @@ -0,0 +1,224 @@ +#!/usr/bin/env bats +# ============================================================================= +# Integration test: Azure App Service Blue-Green Deployment +# +# Tests the blue-green deployment lifecycle: +# 1. Initial deployment (no staging slot) +# 2. Blue-green deployment (creates staging slot with 0% traffic) +# 3. Finalize deployment (swap slots, disable staging) +# ============================================================================= + +# ============================================================================= +# Test Constants +# ============================================================================= +TEST_APP_NAME="tools-automation-development-tools-7" +TEST_PLAN_NAME="tools-automation-development-tools-7-plan" +TEST_SLOT_NAME="staging" + +# Azure resource identifiers +TEST_SUBSCRIPTION_ID="mock-subscription-id" +TEST_RESOURCE_GROUP="test-resource-group" +TEST_LOCATION="eastus" + +# Expected SKU based on memory=4 GB from context +TEST_EXPECTED_SKU="S2" + +# Expected docker image from context asset.url +TEST_DOCKER_IMAGE="mockregistry.azurecr.io/tools/automation:v1.0.0" +TEST_STAGING_DOCKER_IMAGE="mockregistry.azurecr.io/tools/automation:v2.0.0" + +# ============================================================================= +# Test Setup +# ============================================================================= + +setup_file() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + source "${PROJECT_ROOT}/testing/assertions.sh" + integration_setup --cloud-provider azure + + clear_mocks + + echo "Setting up blue-green deployment tests..." + + export TEST_APP_NAME + export TEST_PLAN_NAME + export TEST_SLOT_NAME + export TEST_SUBSCRIPTION_ID + export TEST_RESOURCE_GROUP + export TEST_LOCATION + export TEST_EXPECTED_SKU + export TEST_DOCKER_IMAGE + export TEST_STAGING_DOCKER_IMAGE +} + +teardown_file() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + clear_mocks + integration_teardown +} + +setup() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + source "${PROJECT_ROOT}/testing/assertions.sh" + source "${BATS_TEST_DIRNAME}/app_service_assertions.bash" + + clear_mocks + load_context "azure-apps/deployment/tests/integration/resources/context_integration.json" + + # Export environment variables for workflow execution + export SERVICE_PATH="$INTEGRATION_MODULE_ROOT/azure-apps" + export CUSTOM_TOFU_MODULES="$INTEGRATION_MODULE_ROOT/testing/azure-mock-provider" + + # Use mock storage account for backend + export TOFU_PROVIDER_STORAGE_ACCOUNT="devstoreaccount1" + export TOFU_PROVIDER_CONTAINER="tfstate" + export ARM_CLIENT_SECRET="mock-client-secret" + + # Setup API mocks for np CLI calls + local mocks_dir="azure-apps/deployment/tests/integration/mocks/" + mock_request "PATCH" "/scope/7" "$mocks_dir/scope/patch.json" + + mock_request "GET" "/category" "$mocks_dir/azure-provider/category.json" + mock_request "GET" "/provider_specification" "$mocks_dir/azure-provider/list_provider_spec.json" + mock_request "GET" "/provider" "$mocks_dir/azure-provider/list_provider.json" + mock_request "GET" "/provider/azure-id" "$mocks_dir/azure-provider/get_provider.json" + + # Ensure tfstate container exists in azure-mock + curl -s -X PUT "${AZURE_MOCK_ENDPOINT}/tfstate?restype=container" \ + -H "Host: devstoreaccount1.blob.core.windows.net" \ + -H "x-ms-version: 2021-06-08" >/dev/null 2>&1 || true +} + +# ============================================================================= +# Test: Initial Deployment (no staging slot) +# ============================================================================= + +@test "Should create App Service without staging slot on initial deployment" { + run_workflow "azure-apps/deployment/workflows/initial.yaml" + + # Verify App Service is created + assert_azure_app_service_configured \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_EXPECTED_SKU" + + # Verify production has the correct docker image + assert_web_app_docker_image \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_DOCKER_IMAGE" + + # Verify staging slot does NOT exist + assert_deployment_slot_not_exists \ + "$TEST_APP_NAME" \ + "$TEST_SLOT_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" +} + +# ============================================================================= +# Test: Blue-Green Deployment (creates staging slot) +# ============================================================================= + +@test "Should create staging slot on blue_green deployment" { + # First run initial to create base infrastructure + run_workflow "azure-apps/deployment/workflows/initial.yaml" + + # Update context to deploy new version (v2.0.0) to staging slot + export CONTEXT=$(echo "$CONTEXT" | jq '.asset.url = "mockregistry.azurecr.io/tools/automation:v2.0.0"') + + # Then run blue_green to create staging slot + run_workflow "azure-apps/deployment/workflows/blue_green.yaml" + + # Verify App Service with staging slot is created + assert_azure_app_service_with_slot_configured \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_EXPECTED_SKU" \ + "$TEST_SLOT_NAME" + + # Verify production slot keeps the original docker image + assert_web_app_docker_image \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_DOCKER_IMAGE" + + # Verify staging slot has the docker image (same as production on first blue-green) + assert_deployment_slot_docker_image \ + "$TEST_APP_NAME" \ + "$TEST_SLOT_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_STAGING_DOCKER_IMAGE" +} + +# ============================================================================= +# Test: Switch Traffic +# ============================================================================= + +@test "Should maintain staging slot with traffic percentage from context on switch_traffic deployment" { + # Setup: Create infrastructure with initial deployment + run_workflow "azure-apps/deployment/workflows/initial.yaml" + + # Update context to deploy new version (v2.0.0) to staging slot + export CONTEXT=$(echo "$CONTEXT" | jq '.asset.url = "mockregistry.azurecr.io/tools/automation:v2.0.0"') + run_workflow "azure-apps/deployment/workflows/blue_green.yaml" + + # Modify context to have desired_switched_traffic = 50 + export CONTEXT=$(echo "$CONTEXT" | jq '.deployment.strategy_data.desired_switched_traffic = 50') + + # Run switch_traffic workflow + run_workflow "azure-apps/deployment/workflows/switch_traffic.yaml" + + # Verify staging slot still exists (traffic routing is via Azure CLI, not terraform state) + assert_azure_app_service_with_slot_configured \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_EXPECTED_SKU" \ + "$TEST_SLOT_NAME" + + # Verify docker images are preserved after traffic switch + assert_web_app_docker_image \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_DOCKER_IMAGE" + + assert_deployment_slot_docker_image \ + "$TEST_APP_NAME" \ + "$TEST_SLOT_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_STAGING_DOCKER_IMAGE" +} + +# ============================================================================= +# Test: Destroy Blue-Green Infrastructure +# ============================================================================= + +@test "Should remove App Service and all slots on delete" { + # Setup: Create infrastructure with staging slot + run_workflow "azure-apps/deployment/workflows/initial.yaml" + run_workflow "azure-apps/deployment/workflows/blue_green.yaml" + + # Destroy all infrastructure + run_workflow "azure-apps/deployment/workflows/delete.yaml" + + # Verify everything is removed + assert_azure_app_service_not_configured \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" + + # Staging slot should also be gone (implicitly deleted with app) + assert_deployment_slot_not_exists \ + "$TEST_APP_NAME" \ + "$TEST_SLOT_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" +} diff --git a/azure-apps/deployment/tests/integration/test_cases/azure_app_service/lifecycle_test.bats b/azure-apps/deployment/tests/integration/test_cases/azure_app_service/lifecycle_test.bats new file mode 100644 index 00000000..f1c08bc1 --- /dev/null +++ b/azure-apps/deployment/tests/integration/test_cases/azure_app_service/lifecycle_test.bats @@ -0,0 +1,127 @@ +#!/usr/bin/env bats +# ============================================================================= +# Integration test: Azure App Service Lifecycle +# +# Tests the full lifecycle of an Azure App Service deployment: +# 1. Create infrastructure (App Service Plan + Linux Web App) +# 2. Verify all resources are configured correctly +# 3. Destroy infrastructure +# 4. Verify all resources are removed +# ============================================================================= + +# ============================================================================= +# Test Constants +# ============================================================================= +# Expected values derived from context_integration.json +# App name is generated: {namespace_slug}-{application_slug}-{scope_slug}-{scope_id} +# From context: tools-automation-development-tools-7 + +TEST_APP_NAME="tools-automation-development-tools-7" +TEST_PLAN_NAME="tools-automation-development-tools-7-plan" + +# Azure resource identifiers (from context providers.cloud-providers.azure) +TEST_SUBSCRIPTION_ID="mock-subscription-id" +TEST_RESOURCE_GROUP="test-resource-group" +TEST_LOCATION="eastus" + +# Expected SKU based on memory=4 GB from context +TEST_EXPECTED_SKU="S2" + +# ============================================================================= +# Test Setup +# ============================================================================= + +setup_file() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + source "${PROJECT_ROOT}/testing/assertions.sh" + integration_setup --cloud-provider azure + + clear_mocks + + echo "Creating test prerequisites in Azure Mock..." + + # Export test variables for use in tests + export TEST_APP_NAME + export TEST_PLAN_NAME + export TEST_SUBSCRIPTION_ID + export TEST_RESOURCE_GROUP + export TEST_LOCATION + export TEST_EXPECTED_SKU +} + +teardown_file() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + clear_mocks + integration_teardown +} + +setup() { + source "${PROJECT_ROOT}/testing/integration_helpers.sh" + source "${PROJECT_ROOT}/testing/assertions.sh" + source "${BATS_TEST_DIRNAME}/app_service_assertions.bash" + + clear_mocks + load_context "azure-apps/deployment/tests/integration/resources/context_integration.json" + + # Export environment variables for workflow execution + export SERVICE_PATH="$INTEGRATION_MODULE_ROOT/azure-apps" + export CUSTOM_TOFU_MODULES="$INTEGRATION_MODULE_ROOT/testing/azure-mock-provider" + + # Use mock storage account for backend (handled by azure-mock) + export TOFU_PROVIDER_STORAGE_ACCOUNT="devstoreaccount1" + export TOFU_PROVIDER_CONTAINER="tfstate" + export ARM_CLIENT_SECRET="mock-client-secret" + + # Setup API mocks for np CLI calls + local mocks_dir="azure-apps/deployment/tests/integration/mocks/" + mock_request "PATCH" "/scope/7" "$mocks_dir/scope/patch.json" + + mock_request "GET" "/category" "$mocks_dir/azure-provider/category.json" + mock_request "GET" "/provider_specification" "$mocks_dir/azure-provider/list_provider_spec.json" + mock_request "GET" "/provider" "$mocks_dir/azure-provider/list_provider.json" + mock_request "GET" "/provider/azure-id" "$mocks_dir/azure-provider/get_provider.json" + + # Ensure tfstate container exists in azure-mock for Terraform backend + curl -s -X PUT "${AZURE_MOCK_ENDPOINT}/tfstate?restype=container" \ + -H "Host: devstoreaccount1.blob.core.windows.net" \ + -H "x-ms-version: 2021-06-08" >/dev/null 2>&1 || true +} + +# ============================================================================= +# Test: Create Infrastructure +# ============================================================================= + +@test "create infrastructure deploys Azure App Service resources" { + run_workflow "azure-apps/deployment/workflows/initial.yaml" + + assert_azure_app_service_configured \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "$TEST_EXPECTED_SKU" + + # Verify app_settings merge all 3 sources: + # - env_variables (from context parameters): DATABASE_URL, LOG_LEVEL + # - base_app_settings (from locals.tf): WEBSITES_ENABLE_APP_SERVICE_STORAGE, DOCKER_ENABLE_CI + assert_web_app_app_settings_contain \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" \ + "DATABASE_URL" \ + "LOG_LEVEL" \ + "WEBSITES_ENABLE_APP_SERVICE_STORAGE" \ + "DOCKER_ENABLE_CI" +} + +# ============================================================================= +# Test: Destroy Infrastructure +# ============================================================================= + +@test "destroy infrastructure removes Azure App Service resources" { + run_workflow "azure-apps/deployment/workflows/delete.yaml" + + assert_azure_app_service_not_configured \ + "$TEST_APP_NAME" \ + "$TEST_SUBSCRIPTION_ID" \ + "$TEST_RESOURCE_GROUP" +} diff --git a/azure-apps/deployment/tests/modules/deployment_slots_test.bats b/azure-apps/deployment/tests/modules/deployment_slots_test.bats new file mode 100644 index 00000000..2da6b81c --- /dev/null +++ b/azure-apps/deployment/tests/modules/deployment_slots_test.bats @@ -0,0 +1,201 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for deployment_slots terraform configuration +# +# Tests the deployment slots terraform module validation: +# - Variable validation (staging_traffic_percent range) +# - Configuration validity with different variable combinations +# +# Note: Resource creation tests are in integration tests which use azure-mock +# +# Requirements: +# - bats-core: brew install bats-core +# - tofu/terraform: brew install opentofu +# +# Run tests: +# bats tests/modules/deployment_slots_test.bats +# ============================================================================= + +# Setup once for all tests +setup_file() { + export TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + export PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + export PROJECT_ROOT="$(cd "$PROJECT_DIR/../.." && pwd)" + export MODULE_PATH="$PROJECT_DIR/modules" + + # Create temporary working directory + export TEST_WORKING_DIR=$(mktemp -d) + + # Copy module files to working directory + cp -r "$MODULE_PATH"/* "$TEST_WORKING_DIR/" + + # Create a minimal provider override for validation testing + cat > "$TEST_WORKING_DIR/provider_override.tf" << 'EOF' +terraform { + backend "local" {} +} +EOF + + # Initialize terraform once (backend=false for validation only) + cd "$TEST_WORKING_DIR" + tofu init -backend=false >/dev/null 2>&1 +} + +# Cleanup after all tests +teardown_file() { + rm -rf "$TEST_WORKING_DIR" +} + +# Setup before each test +setup() { + cd "$TEST_WORKING_DIR" + + # Base required variables for all tests + export TF_VAR_resource_group_name="test-rg" + export TF_VAR_location="eastus" + export TF_VAR_app_name="test-app" + export TF_VAR_docker_image="nginx:latest" + + # Default blue-green deployment settings (disabled) + export TF_VAR_enable_staging_slot="false" + export TF_VAR_staging_traffic_percent="0" + export TF_VAR_promote_staging_to_production="false" +} + +# ============================================================================= +# Test: Variable Type Validation +# Note: Custom validation rules (0-100 range) are checked at plan time, +# not validate time. Those are tested in integration tests. +# ============================================================================= + +@test "Should accept staging_traffic_percent value of 0" { + export TF_VAR_staging_traffic_percent="0" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should accept staging_traffic_percent value of 50" { + export TF_VAR_staging_traffic_percent="50" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should accept staging_traffic_percent value of 100" { + export TF_VAR_staging_traffic_percent="100" + + run tofu validate + [ "$status" -eq 0 ] +} + +# ============================================================================= +# Test: Configuration Validity - Different Scenarios +# ============================================================================= + +@test "Should validate module with staging slot disabled" { + export TF_VAR_enable_staging_slot="false" + export TF_VAR_staging_traffic_percent="0" + export TF_VAR_promote_staging_to_production="false" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should validate module with staging slot enabled" { + export TF_VAR_enable_staging_slot="true" + export TF_VAR_staging_traffic_percent="0" + export TF_VAR_promote_staging_to_production="false" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should validate module with staging slot and traffic routing" { + export TF_VAR_enable_staging_slot="true" + export TF_VAR_staging_traffic_percent="50" + export TF_VAR_promote_staging_to_production="false" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should validate module with staging slot and promotion" { + export TF_VAR_enable_staging_slot="true" + export TF_VAR_staging_traffic_percent="0" + export TF_VAR_promote_staging_to_production="true" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should validate blue_green scenario configuration" { + # blue_green.yaml: staging slot enabled, 0% traffic, no promotion + export TF_VAR_enable_staging_slot="true" + export TF_VAR_staging_traffic_percent="0" + export TF_VAR_promote_staging_to_production="false" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should validate switch_traffic scenario configuration" { + # switch_traffic.yaml: staging slot enabled, variable traffic % + export TF_VAR_enable_staging_slot="true" + export TF_VAR_staging_traffic_percent="25" + export TF_VAR_promote_staging_to_production="false" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should validate finalize scenario configuration" { + # finalize.yaml: staging slot disabled after swap, promotion true + export TF_VAR_enable_staging_slot="false" + export TF_VAR_staging_traffic_percent="0" + export TF_VAR_promote_staging_to_production="true" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should validate module with 100% traffic to staging" { + export TF_VAR_enable_staging_slot="true" + export TF_VAR_staging_traffic_percent="100" + export TF_VAR_promote_staging_to_production="false" + + run tofu validate + [ "$status" -eq 0 ] +} + +# ============================================================================= +# Test: Boolean Variable Validation +# ============================================================================= + +@test "Should accept enable_staging_slot value of true" { + export TF_VAR_enable_staging_slot="true" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should accept enable_staging_slot value of false" { + export TF_VAR_enable_staging_slot="false" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should accept promote_staging_to_production value of true" { + export TF_VAR_promote_staging_to_production="true" + + run tofu validate + [ "$status" -eq 0 ] +} + +@test "Should accept promote_staging_to_production value of false" { + export TF_VAR_promote_staging_to_production="false" + + run tofu validate + [ "$status" -eq 0 ] +} diff --git a/azure-apps/deployment/tests/resources/context.json b/azure-apps/deployment/tests/resources/context.json new file mode 100644 index 00000000..724e6754 --- /dev/null +++ b/azure-apps/deployment/tests/resources/context.json @@ -0,0 +1,207 @@ +{ + "account": { + "created_at": "2023-01-31T21:53:32.597Z", + "id": 2, + "metadata": {}, + "name": "Playground", + "nrn": "organization=1:account=2", + "organization_id": 1, + "repository_prefix": "playground-repos", + "repository_provider": "github", + "settings": {}, + "slug": "playground", + "status": "active", + "updated_at": "2023-01-31T21:53:32.597Z" + }, + "application": { + "auto_deploy_on_creation": false, + "created_at": "2025-10-07T03:22:21.385Z", + "id": 4, + "is_mono_repo": false, + "messages": [], + "metadata": {}, + "name": "Automation", + "namespace_id": 3, + "nrn": "organization=1:account=2:namespace=3:application=4", + "repository_app_path": null, + "repository_url": "https://github.com/playground-repos/tools-automation", + "settings": {}, + "slug": "automation", + "status": "active", + "tags": {}, + "template_id": 1037172878, + "updated_at": "2025-10-07T03:22:30.695Z" + }, + "asset": { + "id": 6, + "build_id": 612605537, + "name": "main", + "type": "docker", + "url": "myregistry.azurecr.io/tools/automation:v1.0.0", + "platform": "linux/amd64", + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:build=5:asset=6" + }, + "deployment": { + "created_at": "2025-12-22T18:27:54.701Z", + "created_by": 123456789, + "deployment_group_id": null, + "deployment_token": "dep-token", + "expires_at": null, + "external_strategy_id": 10, + "id": 8, + "messages": [], + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7:deployment=8", + "parameters": { + "DATABASE_URL": "postgres://localhost:5432/db", + "LOG_LEVEL": "info" + }, + "release_id": 9, + "scope_id": 7, + "status": "creating", + "status_in_scope": "inactive", + "status_started_at": { + "creating": "2025-12-22T18:27:54.629Z" + }, + "strategy": "initial", + "strategy_data": { + "desired_switched_traffic": 0, + "parameters": { + "metrics": { + "enabled": false, + "rules": [] + }, + "traffic": { + "enable_auto_switch": false, + "interval": 10, + "step": "0.1" + } + } + }, + "updated_at": "2025-12-23T13:22:06.345Z", + "updated_by": null + }, + "namespace": { + "account_id": 2, + "created_at": "2025-05-15T21:34:40.725Z", + "id": 3, + "metadata": {}, + "name": "Tools", + "nrn": "organization=1:account=2:namespace=3", + "slug": "tools", + "status": "active", + "updated_at": "2025-05-15T21:34:40.725Z" + }, + "parameters": { + "results": [ + { + "destination_path": null, + "id": 10, + "name": "DATABASE_URL", + "type": "environment", + "values": [ + { + "id": "11", + "value": "postgres://localhost:5432/db" + } + ], + "variable": "DATABASE_URL", + "version_id": 12 + }, + { + "destination_path": null, + "id": 13, + "name": "LOG_LEVEL", + "type": "environment", + "values": [ + { + "id": "14", + "value": "info" + } + ], + "variable": "LOG_LEVEL", + "version_id": 15 + } + ] + }, + "providers": { + "cloud-providers": { + "authentication": { + "subscription_id": "test-subscription-id", + "client_id": "test-client-id", + "tenant_id": "test-tenant-id" + }, + "networking": { + "public_dns_zone_resource_group_name": "test-resource-group" + } + }, + "assets-repository": { + "setup": { + "password": "test-registry-password", + "path": "test-path", + "server": "testregistry.azurecr.io", + "use_namespace": false, + "username": "test-registry-user" + } + } + }, + "release": { + "application_id": 4, + "build_id": 5, + "created_at": "2025-12-12T13:07:27.435Z", + "id": 9, + "metadata": {}, + "nrn": "organization=1:account=2:namespace=3:application=4:release=9", + "semver": "v1.0.0", + "status": "active", + "updated_at": "2025-12-12T13:07:27.702Z" + }, + "scope": { + "application_id": 4, + "asset_name": "main", + "capabilities": { + "memory": 8, + "websockets_enabled": false, + "scaling_type": "auto", + "fixed_instances": 2, + "autoscaling": { + "min_instances": 2, + "max_instances": 5, + "target_cpu_utilization": 75, + "target_memory_utilization": 80 + }, + "health_check": { + "path": "/healthz", + "eviction_time_in_min": 5 + } + }, + "created_at": "2025-12-22T18:27:04.949Z", + "dimensions": { + "country": "argentina", + "environment": "development" + }, + "domain": "", + "domains": [], + "external_created": false, + "id": 7, + "instance_id": "some-instance-id", + "messages": [], + "metadata": {}, + "name": "Development tools", + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7", + "profiles": [ + "environment_development", + "environment_development_country_argentina" + ], + "provider": "scope-type-id", + "requested_spec": {}, + "runtime_configurations": [], + "slug": "development-tools", + "status": "active", + "tags": [], + "tier": "important", + "type": "custom", + "updated_at": "2025-12-29T18:25:55.908Z" + } +} diff --git a/azure-apps/deployment/tests/resources/mocks/az b/azure-apps/deployment/tests/resources/mocks/az new file mode 100755 index 00000000..575ed3cb --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/az @@ -0,0 +1,16 @@ +#!/bin/bash +# Mock az CLI for unit tests +# Logs all invocations to AZ_CALL_LOG (one line per call) +# Returns AZ_MOCK_RESPONSE file content if configured +# Returns AZ_MOCK_EXIT_CODE (default: 0) + +if [ -n "${AZ_CALL_LOG:-}" ]; then + echo "$*" >> "$AZ_CALL_LOG" +fi + +# Return mock response if configured +if [ -n "${AZ_MOCK_RESPONSE:-}" ] && [ -f "$AZ_MOCK_RESPONSE" ]; then + cat "$AZ_MOCK_RESPONSE" +fi + +exit "${AZ_MOCK_EXIT_CODE:-0}" diff --git a/azure-apps/deployment/tests/resources/mocks/curl b/azure-apps/deployment/tests/resources/mocks/curl new file mode 100755 index 00000000..37e92544 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/curl @@ -0,0 +1,80 @@ +#!/bin/bash +# Mock curl for unit tests +# Returns different responses based on URL patterns + +if [ -n "${CURL_CALL_LOG:-}" ]; then + echo "$*" >> "$CURL_CALL_LOG" +fi + +# Check URL in arguments +ARGS="$*" + +# Azure OAuth token endpoint +if [[ "$ARGS" == *"login.microsoftonline.com"*"oauth2"* ]]; then + echo '{"access_token":"mock-azure-token","token_type":"Bearer","expires_in":3600}' + exit 0 +fi + +# Azure publishing credentials endpoint (staging slot returns empty/error by default) +if [[ "$ARGS" == *"publishingcredentials"* ]]; then + if [[ "$ARGS" == *"/slots/staging/"* ]]; then + # Staging slot - return error (slot doesn't exist by default) + echo '{"error":{"code":"NotFound"}}' + else + # Production slot + echo '{"properties":{"publishingUserName":"$mock-user","publishingPassword":"mock-password"}}' + fi + exit 0 +fi + +# Azure App Service site info endpoint (for getting serverFarmId) +if [[ "$ARGS" == *"Microsoft.Web/sites/"* ]] && [[ "$ARGS" == *"api-version="* ]] && [[ "$ARGS" != *"/instances"* ]] && [[ "$ARGS" != *"/publishingcredentials"* ]] && [[ "$ARGS" != *"microsoft.insights"* ]] && [[ "$ARGS" != *"/slots/"* ]]; then + echo '{"properties":{"serverFarmId":"/subscriptions/test-subscription-id/resourceGroups/test-resource-group/providers/Microsoft.Web/serverfarms/test-app-service-plan"}}' + exit 0 +fi + +# Azure Monitor metrics endpoint +if [[ "$ARGS" == *"microsoft.insights/metrics"* ]]; then + SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + if [[ "$ARGS" == *"/slots/staging/"* ]]; then + # Staging slot metrics - return empty by default + echo '{"value":[{"timeseries":[]}]}' + else + cat "$SCRIPT_DIR/responses/az_metrics_cpu.json" + fi + exit 0 +fi + +# Azure App Service instances endpoint +if [[ "$ARGS" == *"/instances"* ]]; then + # Check for mock response override first + if [ -n "${CURL_MOCK_RESPONSE:-}" ] && [ -f "$CURL_MOCK_RESPONSE" ]; then + cat "$CURL_MOCK_RESPONSE" + elif [[ "$ARGS" == *"/slots/staging/"* ]]; then + # Staging slot instances - return empty by default + echo '{"value":[]}' + else + # Production slot instances + echo '{"value":[{"name":"instance1","properties":{"state":"Running","lastModifiedTimeUtc":"2026-01-28T10:00:00Z"}},{"name":"instance2","properties":{"state":"Running","lastModifiedTimeUtc":"2026-01-28T10:00:00Z"}}]}' + fi + exit 0 +fi + +# Kudu Docker logs API +if [[ "$ARGS" == *"scm.azurewebsites.net/api/logs/docker"* ]]; then + if [[ "$ARGS" == *"-staging.scm."* ]]; then + # Staging slot logs - return empty by default + echo '[]' + else + # Production slot logs + echo '[{"href":"https://mock.scm.azurewebsites.net/api/logs/docker/log1.txt","lastUpdated":"2026-01-28T10:00:00Z"}]' + fi + exit 0 +fi + +# Default: return mock response file if configured +if [ -n "${CURL_MOCK_RESPONSE:-}" ] && [ -f "$CURL_MOCK_RESPONSE" ]; then + cat "$CURL_MOCK_RESPONSE" +fi + +exit "${CURL_MOCK_EXIT_CODE:-0}" diff --git a/azure-apps/deployment/tests/resources/mocks/np b/azure-apps/deployment/tests/resources/mocks/np new file mode 100755 index 00000000..f1b63f21 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/np @@ -0,0 +1,29 @@ +#!/bin/bash +# Mock np CLI for unit tests +# Routes responses based on subcommand + +if [ -n "${NP_CALL_LOG:-}" ]; then + echo "$*" >> "$NP_CALL_LOG" +fi + +case "$1 $2" in + "scope read") + if [ -f "${NP_SCOPE_RESPONSE:-}" ]; then + cat "$NP_SCOPE_RESPONSE" + else + # Default response with active_deployment + echo '{"id":7,"slug":"development-tools","name":"Development tools","active_deployment":8}' + fi + ;; + "application read") + [ -f "${NP_APP_RESPONSE:-}" ] && cat "$NP_APP_RESPONSE" + ;; + "namespace read") + [ -f "${NP_NAMESPACE_RESPONSE:-}" ] && cat "$NP_NAMESPACE_RESPONSE" + ;; + "provider list") + [ -f "${NP_PROVIDER_RESPONSE:-}" ] && cat "$NP_PROVIDER_RESPONSE" + ;; +esac + +exit "${NP_MOCK_EXIT_CODE:-0}" diff --git a/azure-apps/deployment/tests/resources/mocks/responses/az_list_instances.json b/azure-apps/deployment/tests/resources/mocks/responses/az_list_instances.json new file mode 100644 index 00000000..392a143a --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/az_list_instances.json @@ -0,0 +1,12 @@ +[ + { + "name": "instance-001", + "siteInstanceName": "instance-001", + "state": "Running" + }, + { + "name": "instance-002", + "siteInstanceName": "instance-002", + "state": "Running" + } +] diff --git a/azure-apps/deployment/tests/resources/mocks/responses/az_metrics_cpu.json b/azure-apps/deployment/tests/resources/mocks/responses/az_metrics_cpu.json new file mode 100644 index 00000000..20d09b1e --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/az_metrics_cpu.json @@ -0,0 +1,39 @@ +{ + "cost": 0, + "interval": "PT5M", + "namespace": "Microsoft.Web/sites", + "resourceregion": "australiaeast", + "timespan": "2026-01-27T00:00:00Z/2026-01-27T01:00:00Z", + "value": [ + { + "displayDescription": "The percentage of CPU used by the app.", + "id": "/subscriptions/test-subscription-id/resourceGroups/test-resource-group/providers/Microsoft.Web/sites/tools-automation-development-tools-7/providers/Microsoft.Insights/metrics/CpuPercentage", + "name": { + "localizedValue": "CPU Percentage", + "value": "CpuPercentage" + }, + "resourceGroup": "test-resource-group", + "timeseries": [ + { + "data": [ + { + "average": 25.5, + "timeStamp": "2026-01-27T00:00:00+00:00" + }, + { + "average": 30.2, + "timeStamp": "2026-01-27T00:05:00+00:00" + }, + { + "average": 22.1, + "timeStamp": "2026-01-27T00:10:00+00:00" + } + ], + "metadatavalues": [] + } + ], + "type": "Microsoft.Insights/metrics", + "unit": "Percent" + } + ] +} diff --git a/azure-apps/deployment/tests/resources/mocks/responses/az_publishing_credentials.json b/azure-apps/deployment/tests/resources/mocks/responses/az_publishing_credentials.json new file mode 100644 index 00000000..dd108402 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/az_publishing_credentials.json @@ -0,0 +1,5 @@ +{ + "publishingUserName": "$testuser", + "publishingPassword": "testpassword123", + "scmUri": "https://tools-automation-development-tools-7.scm.azurewebsites.net" +} diff --git a/azure-apps/deployment/tests/resources/mocks/responses/kudu_docker_log_file.txt b/azure-apps/deployment/tests/resources/mocks/responses/kudu_docker_log_file.txt new file mode 100644 index 00000000..574236a4 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/kudu_docker_log_file.txt @@ -0,0 +1,6 @@ +2026-01-27T10:00:00.000Z INFO - Starting container +2026-01-27T10:00:01.000Z INFO - Container started successfully +2026-01-27T10:00:05.000Z ERROR - Connection refused to database +2026-01-27T10:00:10.000Z INFO - Retrying database connection +2026-01-27T10:00:15.000Z INFO - Database connected +2026-01-27T10:01:00.000Z INFO - Health check passed diff --git a/azure-apps/deployment/tests/resources/mocks/responses/kudu_docker_logs_list.json b/azure-apps/deployment/tests/resources/mocks/responses/kudu_docker_logs_list.json new file mode 100644 index 00000000..16f4c81e --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/kudu_docker_logs_list.json @@ -0,0 +1,16 @@ +[ + { + "machineName": "lw1sdlwk00003F", + "lastUpdated": "2026-01-27T10:05:00Z", + "size": 1024, + "href": "https://tools-automation-development-tools-7.scm.azurewebsites.net/api/vfs/LogFiles/2026_01_27_lw1sdlwk00003F_docker.log", + "path": "/home/LogFiles/2026_01_27_lw1sdlwk00003F_docker.log" + }, + { + "machineName": "lw1sdlwk00003F", + "lastUpdated": "2026-01-27T09:00:00Z", + "size": 512, + "href": "https://tools-automation-development-tools-7.scm.azurewebsites.net/api/vfs/LogFiles/2026_01_26_lw1sdlwk00003F_docker.log", + "path": "/home/LogFiles/2026_01_26_lw1sdlwk00003F_docker.log" + } +] diff --git a/azure-apps/deployment/tests/resources/mocks/responses/np_application_read.json b/azure-apps/deployment/tests/resources/mocks/responses/np_application_read.json new file mode 100644 index 00000000..c4e3c6c3 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/np_application_read.json @@ -0,0 +1,8 @@ +{ + "id": 4, + "name": "Automation", + "slug": "automation", + "namespace_id": 3, + "nrn": "organization=1:account=2:namespace=3:application=4", + "status": "active" +} diff --git a/azure-apps/deployment/tests/resources/mocks/responses/np_namespace_read.json b/azure-apps/deployment/tests/resources/mocks/responses/np_namespace_read.json new file mode 100644 index 00000000..ebfd20e9 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/np_namespace_read.json @@ -0,0 +1,8 @@ +{ + "id": 3, + "name": "Tools", + "slug": "tools", + "account_id": 2, + "nrn": "organization=1:account=2:namespace=3", + "status": "active" +} diff --git a/azure-apps/deployment/tests/resources/mocks/responses/np_provider_list.json b/azure-apps/deployment/tests/resources/mocks/responses/np_provider_list.json new file mode 100644 index 00000000..ea5c2968 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/np_provider_list.json @@ -0,0 +1,19 @@ +{ + "results": [ + { + "id": 1, + "name": "Azure Cloud Provider", + "category": "cloud-providers", + "attributes": { + "authentication": { + "subscription_id": "test-subscription-id", + "client_id": "test-client-id", + "tenant_id": "test-tenant-id" + }, + "networking": { + "public_dns_zone_resource_group_name": "test-resource-group" + } + } + } + ] +} diff --git a/azure-apps/deployment/tests/resources/mocks/responses/np_scope_read.json b/azure-apps/deployment/tests/resources/mocks/responses/np_scope_read.json new file mode 100644 index 00000000..4bcc1335 --- /dev/null +++ b/azure-apps/deployment/tests/resources/mocks/responses/np_scope_read.json @@ -0,0 +1,8 @@ +{ + "id": 7, + "name": "Development tools", + "slug": "development-tools", + "application_id": 4, + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7", + "status": "active" +} diff --git a/azure-apps/deployment/tests/scripts/azure_setup_test.bats b/azure-apps/deployment/tests/scripts/azure_setup_test.bats new file mode 100644 index 00000000..5d2d39f4 --- /dev/null +++ b/azure-apps/deployment/tests/scripts/azure_setup_test.bats @@ -0,0 +1,445 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for azure_setup script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats tests/scripts/azure_setup_test.bats +# +# Or run all tests: +# bats tests/scripts/*.bats +# ============================================================================= + +# Setup - runs before each test +setup() { + # Get the directory of the test file + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + PROJECT_ROOT="$(cd "$TEST_DIR/../../../.." && pwd)" + + # Load shared test utilities + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/scripts/azure_setup" + + # Load CONTEXT from test resources + export CONTEXT=$(cat "$PROJECT_DIR/tests/resources/context.json") + + # Set required environment variables with defaults + export TOFU_PROVIDER_STORAGE_ACCOUNT="tfstatestorage" + export TOFU_PROVIDER_CONTAINER="tfstate" + export ARM_CLIENT_SECRET="test-client-secret" + + # Initialize TOFU_VARIABLES as empty JSON + export TOFU_VARIABLES="{}" + export TOFU_INIT_VARIABLES="" + export RESOURCE_TAGS_JSON="{}" + + # STATE_KEY is normally set by build_context + export STATE_KEY="azure-apps/7/terraform.tfstate" +} + +# Teardown - runs after each test +teardown() { + # Clean up exported variables + unset CONTEXT + unset ARM_SUBSCRIPTION_ID + unset ARM_CLIENT_ID + unset ARM_TENANT_ID + unset ARM_CLIENT_SECRET + unset AZURE_RESOURCE_GROUP + unset AZURE_LOCATION + unset TOFU_PROVIDER_STORAGE_ACCOUNT + unset TOFU_PROVIDER_CONTAINER + unset TOFU_VARIABLES + unset TOFU_INIT_VARIABLES + unset RESOURCE_TAGS_JSON + unset STATE_KEY +} + +# ============================================================================= +# Helper functions +# ============================================================================= +run_azure_setup() { + source "$SCRIPT_PATH" +} + +# ============================================================================= +# Test: Required environment variables - Error messages +# ============================================================================= +@test "Should fail when TOFU_PROVIDER_STORAGE_ACCOUNT is not set" { + unset TOFU_PROVIDER_STORAGE_ACCOUNT + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ TOFU_PROVIDER_STORAGE_ACCOUNT is missing" + assert_contains "$output" "🔧 How to fix:" + assert_contains "$output" "• TOFU_PROVIDER_STORAGE_ACCOUNT" +} + +@test "Should fail when TOFU_PROVIDER_CONTAINER is not set" { + unset TOFU_PROVIDER_CONTAINER + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ TOFU_PROVIDER_CONTAINER is missing" + assert_contains "$output" "🔧 How to fix:" + assert_contains "$output" "• TOFU_PROVIDER_CONTAINER" +} + +@test "Should fail when ARM_CLIENT_SECRET is not set" { + unset ARM_CLIENT_SECRET + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ ARM_CLIENT_SECRET is missing" + assert_contains "$output" "🔧 How to fix:" + assert_contains "$output" "• ARM_CLIENT_SECRET" +} + +@test "Should fail when multiple env variables are missing and list all of them" { + unset TOFU_PROVIDER_STORAGE_ACCOUNT + unset TOFU_PROVIDER_CONTAINER + unset ARM_CLIENT_SECRET + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ TOFU_PROVIDER_STORAGE_ACCOUNT is missing" + assert_contains "$output" "❌ TOFU_PROVIDER_CONTAINER is missing" + assert_contains "$output" "❌ ARM_CLIENT_SECRET is missing" + assert_contains "$output" "🔧 How to fix:" + assert_contains "$output" "• TOFU_PROVIDER_STORAGE_ACCOUNT" + assert_contains "$output" "• TOFU_PROVIDER_CONTAINER" + assert_contains "$output" "• ARM_CLIENT_SECRET" +} + +# ============================================================================= +# Test: Context-derived variables - Validation +# ============================================================================= +@test "Should fail when ARM_SUBSCRIPTION_ID cannot be resolved from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["cloud-providers"].authentication.subscription_id)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ ARM_SUBSCRIPTION_ID could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "Verify that you have an Azure cloud provider linked to this scope." + assert_contains "$output" "• subscription_id" +} + +@test "Should fail when ARM_CLIENT_ID cannot be resolved from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["cloud-providers"].authentication.client_id)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ ARM_CLIENT_ID could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "Verify that you have an Azure cloud provider linked to this scope." + assert_contains "$output" "• client_id" +} + +@test "Should fail when ARM_TENANT_ID cannot be resolved from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["cloud-providers"].authentication.tenant_id)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ ARM_TENANT_ID could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "Verify that you have an Azure cloud provider linked to this scope." + assert_contains "$output" "• tenant_id" +} + +@test "Should fail when AZURE_RESOURCE_GROUP cannot be resolved from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["cloud-providers"].networking.public_dns_zone_resource_group_name)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ AZURE_RESOURCE_GROUP could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "Verify that you have an Azure cloud provider linked to this scope." + assert_contains "$output" "• public_dns_zone_resource_group_name" +} + +@test "Should fail when cloud-providers section is missing from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["cloud-providers"])') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ ARM_SUBSCRIPTION_ID could not be resolved from providers" + assert_contains "$output" "❌ ARM_CLIENT_ID could not be resolved from providers" + assert_contains "$output" "❌ ARM_TENANT_ID could not be resolved from providers" + assert_contains "$output" "❌ AZURE_RESOURCE_GROUP could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "Verify that you have an Azure cloud provider linked to this scope." +} + +# ============================================================================= +# Test: Validation success messages +# ============================================================================= +@test "Should display validation header message" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "🔍 Validating Azure provider configuration..." +} + +@test "Should display success message when all required variables are set" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "✨ Azure provider configured successfully" +} + +@test "Should display variable values when validation passes" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "✅ TOFU_PROVIDER_STORAGE_ACCOUNT=tfstatestorage" + assert_contains "$output" "✅ TOFU_PROVIDER_CONTAINER=tfstate" +} + +@test "Should redact variable value when name contains secret" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "✅ ARM_CLIENT_SECRET=REDACTED" + [[ "$output" != *"test-client-secret"* ]] +} + +# ============================================================================= +# Test: Context value extraction +# ============================================================================= +@test "Should extract ARM_SUBSCRIPTION_ID from context" { + run_azure_setup + + assert_equal "$ARM_SUBSCRIPTION_ID" "test-subscription-id" +} + +@test "Should extract ARM_CLIENT_ID from context" { + run_azure_setup + + assert_equal "$ARM_CLIENT_ID" "test-client-id" +} + +@test "Should extract ARM_TENANT_ID from context" { + run_azure_setup + + assert_equal "$ARM_TENANT_ID" "test-tenant-id" +} + +@test "Should extract AZURE_RESOURCE_GROUP from context" { + run_azure_setup + + assert_equal "$AZURE_RESOURCE_GROUP" "test-resource-group" +} + +@test "Should set AZURE_LOCATION to australiaeast" { + run_azure_setup + + assert_equal "$AZURE_LOCATION" "australiaeast" +} + +# ============================================================================= +# Test: TOFU_VARIABLES generation +# ============================================================================= +@test "Should generate TOFU_VARIABLES with resource_group_name" { + run_azure_setup + + local actual_value=$(echo "$TOFU_VARIABLES" | jq -r '.resource_group_name') + assert_equal "$actual_value" "test-resource-group" +} + +@test "Should generate TOFU_VARIABLES with location" { + run_azure_setup + + local actual_value=$(echo "$TOFU_VARIABLES" | jq -r '.location') + assert_equal "$actual_value" "australiaeast" +} + +@test "Should include resource_tags in TOFU_VARIABLES" { + export RESOURCE_TAGS_JSON='{"environment": "test", "team": "platform"}' + + run_azure_setup + + local expected_tags='{"environment": "test", "team": "platform"}' + local actual_tags=$(echo "$TOFU_VARIABLES" | jq '.resource_tags') + assert_json_equal "$actual_tags" "$expected_tags" "resource_tags" +} + +@test "Should preserve existing TOFU_VARIABLES when adding azure variables" { + export TOFU_VARIABLES='{"existing_key": "existing_value"}' + + run_azure_setup + + local expected='{ + "existing_key": "existing_value", + "resource_group_name": "test-resource-group", + "location": "australiaeast", + "resource_tags": {}, + "backend_storage_account_name": "tfstatestorage", + "backend_container_name": "tfstate", + "backend_resource_group_name": "test-resource-group" + }' + + assert_json_equal "$TOFU_VARIABLES" "$expected" "TOFU_VARIABLES" +} + +@test "Should add backend_storage_account_name to TOFU_VARIABLES" { + run_azure_setup + + local storage_account + storage_account=$(echo "$TOFU_VARIABLES" | jq -r '.backend_storage_account_name') + assert_equal "$storage_account" "tfstatestorage" +} + +@test "Should add backend_container_name to TOFU_VARIABLES" { + run_azure_setup + + local container + container=$(echo "$TOFU_VARIABLES" | jq -r '.backend_container_name') + assert_equal "$container" "tfstate" +} + +@test "Should add backend_resource_group_name to TOFU_VARIABLES" { + run_azure_setup + + local resource_group + resource_group=$(echo "$TOFU_VARIABLES" | jq -r '.backend_resource_group_name') + assert_equal "$resource_group" "test-resource-group" +} + +# ============================================================================= +# Test: TOFU_INIT_VARIABLES generation +# ============================================================================= +@test "Should generate TOFU_INIT_VARIABLES with backend config" { + export STATE_KEY="azure-apps/42/terraform.tfstate" + + run_azure_setup + + assert_contains "$TOFU_INIT_VARIABLES" "-backend-config=storage_account_name=tfstatestorage" + assert_contains "$TOFU_INIT_VARIABLES" "-backend-config=container_name=tfstate" + assert_contains "$TOFU_INIT_VARIABLES" "-backend-config=resource_group_name=test-resource-group" + assert_contains "$TOFU_INIT_VARIABLES" "-backend-config=key=azure-apps/42/terraform.tfstate" +} + +@test "Should preserve existing TOFU_INIT_VARIABLES" { + export TOFU_INIT_VARIABLES="-backend-config=existing=value" + + run_azure_setup + + assert_contains "$TOFU_INIT_VARIABLES" "-backend-config=existing=value" + assert_contains "$TOFU_INIT_VARIABLES" "-backend-config=storage_account_name=tfstatestorage" +} + +@test "Should use STATE_KEY from build_context in backend config" { + export STATE_KEY="azure-apps/99/terraform.tfstate" + + run_azure_setup + + assert_contains "$TOFU_INIT_VARIABLES" "-backend-config=key=azure-apps/99/terraform.tfstate" +} + +# ============================================================================= +# Test: Environment variable exports +# ============================================================================= +@test "Should export TOFU_VARIABLES" { + run_azure_setup + + assert_not_empty "$TOFU_VARIABLES" "TOFU_VARIABLES" +} + +@test "Should export TOFU_INIT_VARIABLES" { + run_azure_setup + + assert_not_empty "$TOFU_INIT_VARIABLES" "TOFU_INIT_VARIABLES" +} + +@test "Should export ARM_SUBSCRIPTION_ID" { + run_azure_setup + + assert_equal "$ARM_SUBSCRIPTION_ID" "test-subscription-id" +} + +@test "Should export ARM_CLIENT_ID" { + run_azure_setup + + assert_equal "$ARM_CLIENT_ID" "test-client-id" +} + +@test "Should export ARM_TENANT_ID" { + run_azure_setup + + assert_equal "$ARM_TENANT_ID" "test-tenant-id" +} + +@test "Should export ARM_CLIENT_SECRET" { + run_azure_setup + + assert_equal "$ARM_CLIENT_SECRET" "test-client-secret" +} + +@test "Should export AZURE_RESOURCE_GROUP" { + run_azure_setup + + assert_equal "$AZURE_RESOURCE_GROUP" "test-resource-group" +} + +@test "Should export AZURE_LOCATION" { + run_azure_setup + + assert_equal "$AZURE_LOCATION" "australiaeast" +} + +# ============================================================================= +# Test: MODULES_TO_USE handling +# ============================================================================= +@test "Should set MODULES_TO_USE when modules directory exists" { + # Create a temporary modules directory + local temp_modules_dir=$(mktemp -d) + mkdir -p "$temp_modules_dir/modules" + + # Create a modified script that uses our temp directory + local temp_script=$(mktemp) + sed "s|script_dir=.*|script_dir=\"$temp_modules_dir\"|" "$SCRIPT_PATH" > "$temp_script" + + export MODULES_TO_USE="" + source "$temp_script" + + assert_contains "$MODULES_TO_USE" "$temp_modules_dir/modules" + + # Cleanup + rm -rf "$temp_modules_dir" + rm "$temp_script" +} + +@test "Should append to existing MODULES_TO_USE when modules directory exists" { + # Create a temporary modules directory + local temp_modules_dir=$(mktemp -d) + mkdir -p "$temp_modules_dir/modules" + + # Create a modified script that uses our temp directory + local temp_script=$(mktemp) + sed "s|script_dir=.*|script_dir=\"$temp_modules_dir\"|" "$SCRIPT_PATH" > "$temp_script" + + export MODULES_TO_USE="/existing/module" + source "$temp_script" + + assert_contains "$MODULES_TO_USE" "/existing/module" + assert_contains "$MODULES_TO_USE" "$temp_modules_dir/modules" + + # Cleanup + rm -rf "$temp_modules_dir" + rm "$temp_script" +} diff --git a/azure-apps/deployment/tests/scripts/build_context_test.bats b/azure-apps/deployment/tests/scripts/build_context_test.bats new file mode 100644 index 00000000..44089ed7 --- /dev/null +++ b/azure-apps/deployment/tests/scripts/build_context_test.bats @@ -0,0 +1,468 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for build_context script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats tests/scripts/build_context_test.bats +# +# Or run all tests: +# bats tests/scripts/*.bats +# ============================================================================= + +# Setup - runs before each test +setup() { + # Get the directory of the test file + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + PROJECT_ROOT="$(cd "$TEST_DIR/../../../.." && pwd)" + + # Load shared test utilities + source "$PROJECT_ROOT/testing/assertions.sh" + + CONTEXT=$(cat "$PROJECT_DIR/tests/resources/context.json") + # SERVICE_PATH should point to azure-apps root (parent of deployment) + SERVICE_PATH="$(cd "$PROJECT_DIR/.." && pwd)" + TEST_OUTPUT_DIR=$(mktemp -d) + + # Variables normally set by docker_setup script + export DOCKER_REGISTRY_URL="https://testregistry.azurecr.io" + export DOCKER_REGISTRY_USERNAME="test-registry-user" + export DOCKER_REGISTRY_PASSWORD="test-registry-password" + export DOCKER_IMAGE="tools/automation:v1.0.0" + + export CONTEXT SERVICE_PATH TEST_OUTPUT_DIR +} + +# Teardown - runs after each test +teardown() { + # Clean up temp directory + if [ -d "$TEST_OUTPUT_DIR" ]; then + rm -rf "$TEST_OUTPUT_DIR" + fi +} + +# ============================================================================= +# Helper functions +# ============================================================================= +run_build_context() { + # Source the build_context script + source "$PROJECT_DIR/scripts/build_context" +} + +# ============================================================================= +# Test: Basic extraction from CONTEXT +# ============================================================================= +@test "Should extract SCOPE_ID from context" { + run_build_context + + assert_equal "$SCOPE_ID" "7" +} + +@test "Should extract DEPLOYMENT_ID from context" { + run_build_context + + assert_equal "$DEPLOYMENT_ID" "8" +} + +@test "Should use DOCKER_IMAGE from environment" { + run_build_context + + assert_equal "$DOCKER_IMAGE" "tools/automation:v1.0.0" +} + +# ============================================================================= +# Test: NRN component extraction +# ============================================================================= +@test "Should extract ORGANIZATION_ID from NRN" { + run_build_context + + assert_equal "$ORGANIZATION_ID" "1" +} + +@test "Should extract ACCOUNT_ID from NRN" { + run_build_context + + assert_equal "$ACCOUNT_ID" "2" +} + +@test "Should extract NAMESPACE_ID from NRN" { + run_build_context + + assert_equal "$NAMESPACE_ID" "3" +} + +@test "Should extract APPLICATION_ID from NRN" { + run_build_context + + assert_equal "$APPLICATION_ID" "4" +} + +# ============================================================================= +# Test: APP_NAME generation +# ============================================================================= +@test "Should generate APP_NAME from slugs and scope_id" { + run_build_context + + # Format: {namespace}-{application}-{scope}-{scope_id} + assert_equal "$APP_NAME" "tools-automation-development-tools-7" +} + +@test "Should generate APP_NAME within Azure max length (60 chars)" { + run_build_context + + local name_length=${#APP_NAME} + assert_less_than "$name_length" "61" "APP_NAME length" +} + +# ============================================================================= +# Test: TOFU_VARIABLES - Full JSON validation +# ============================================================================= +@test "Should generate TOFU_VARIABLES with all expected values from context" { + run_build_context + + # Build expected JSON - validates all values extracted from capabilities + local expected_json + expected_json=$(cat <<'EOF' +{ + "app_name": "tools-automation-development-tools-7", + "docker_image": "tools/automation:v1.0.0", + "docker_registry_url": "https://testregistry.azurecr.io", + "docker_registry_username": "test-registry-user", + "docker_registry_password": "test-registry-password", + "sku_name": "P1v3", + "websockets_enabled": false, + "health_check_path": "/healthz", + "health_check_eviction_time_in_min": 5, + "enable_staging_slot": false, + "staging_traffic_percent": 0, + "promote_staging_to_production": false, + "preserve_production_image": false, + "state_key": "azure-apps/7/terraform.tfstate", + "enable_system_identity": true, + "enable_autoscaling": true, + "autoscale_min_instances": 2, + "autoscale_max_instances": 5, + "autoscale_default_instances": 2, + "cpu_scale_out_threshold": 75, + "memory_scale_out_threshold": 80, + "parameter_json": "{\"DATABASE_URL\":\"postgres://localhost:5432/db\",\"LOG_LEVEL\":\"info\"}", + "https_only": true, + "minimum_tls_version": "1.2", + "ftps_state": "Disabled", + "client_affinity_enabled": false, + "enable_logging": true, + "application_logs_level": "Information", + "http_logs_retention_days": 7 +} +EOF +) + + assert_json_equal "$TOFU_VARIABLES" "$expected_json" "TOFU_VARIABLES" +} + +# ============================================================================= +# Test: TOFU_VARIABLES - Special logic (conditional values) +# ============================================================================= +@test "Should disable autoscaling when scaling_type is fixed" { + export CONTEXT=$(echo "$CONTEXT" | jq '.scope.capabilities.scaling_type = "fixed"') + + run_build_context + + local enable_autoscaling + enable_autoscaling=$(echo "$TOFU_VARIABLES" | jq -r '.enable_autoscaling') + assert_equal "$enable_autoscaling" "false" +} + +# ============================================================================= +# Test: TOFU_VARIABLES - Default values +# ============================================================================= +@test "Should use default health_check_eviction_time 1 when not specified" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.scope.capabilities.health_check.eviction_time_in_min)') + + run_build_context + + local eviction_time + eviction_time=$(echo "$TOFU_VARIABLES" | jq -r '.health_check_eviction_time_in_min') + assert_equal "$eviction_time" "1" +} + +# ============================================================================= +# Test: TOFU_VARIABLES - Environment variable overrides +# ============================================================================= +@test "Should use ENABLE_STAGING_SLOT from environment when set" { + export ENABLE_STAGING_SLOT="true" + + run_build_context + + local staging_slot + staging_slot=$(echo "$TOFU_VARIABLES" | jq -r '.enable_staging_slot') + assert_equal "$staging_slot" "true" +} + +@test "Should use provided DOCKER_REGISTRY_URL when set" { + export DOCKER_REGISTRY_URL="myregistry.azurecr.io" + + run_build_context + + local registry_url + registry_url=$(echo "$TOFU_VARIABLES" | jq -r '.docker_registry_url') + assert_equal "$registry_url" "myregistry.azurecr.io" +} + +# ============================================================================= +# Test: PARAMETER_JSON extraction +# ============================================================================= +@test "Should generate parameter_json with correct key-value pairs from multiple parameters" { + export CONTEXT=$(echo "$CONTEXT" | jq '.parameters.results = [ + {"variable": "TEST", "values": [{"id": "1", "value": "TRes"}]}, + {"variable": "PARAM", "values": [{"id": "2", "value": "values"}]} + ]') + + run_build_context + + local parameter_json + parameter_json=$(echo "$TOFU_VARIABLES" | jq -r '.parameter_json') + + local expected_json='{"TEST":"TRes","PARAM":"values"}' + assert_json_equal "$parameter_json" "$expected_json" "parameter_json" +} + +@test "Should generate parameter_json as empty array when parameters.results is empty" { + export CONTEXT=$(echo "$CONTEXT" | jq '.parameters.results = []') + + run_build_context + + local parameter_json + parameter_json=$(echo "$TOFU_VARIABLES" | jq -r '.parameter_json') + assert_equal "$parameter_json" "[]" +} + +@test "Should generate parameter_json as empty array when parameters is null" { + export CONTEXT=$(echo "$CONTEXT" | jq '.parameters = null') + + run_build_context + + local parameter_json + parameter_json=$(echo "$TOFU_VARIABLES" | jq -r '.parameter_json') + assert_equal "$parameter_json" "[]" +} + +# ============================================================================= +# Test: Blue-green deployment variables +# ============================================================================= +@test "Should read staging_traffic_percent from context.deployment.strategy_data.desired_switched_traffic" { + export CONTEXT=$(echo "$CONTEXT" | jq '.deployment.strategy_data.desired_switched_traffic = 25') + + run_build_context + + local traffic_percent + traffic_percent=$(echo "$TOFU_VARIABLES" | jq -r '.staging_traffic_percent') + assert_equal "$traffic_percent" "25" +} + +@test "Should default staging_traffic_percent to 0 when not in context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.deployment.strategy_data.desired_switched_traffic)') + + run_build_context + + local traffic_percent + traffic_percent=$(echo "$TOFU_VARIABLES" | jq -r '.staging_traffic_percent') + assert_equal "$traffic_percent" "0" +} + +@test "Should handle staging_traffic_percent of 100" { + export CONTEXT=$(echo "$CONTEXT" | jq '.deployment.strategy_data.desired_switched_traffic = 100') + + run_build_context + + local traffic_percent + traffic_percent=$(echo "$TOFU_VARIABLES" | jq -r '.staging_traffic_percent') + assert_equal "$traffic_percent" "100" +} + +@test "Should default promote_staging_to_production to false" { + run_build_context + + local promote + promote=$(echo "$TOFU_VARIABLES" | jq -r '.promote_staging_to_production') + assert_equal "$promote" "false" +} + +@test "Should use PROMOTE_STAGING_TO_PRODUCTION from environment when set" { + export PROMOTE_STAGING_TO_PRODUCTION="true" + + run_build_context + + local promote + promote=$(echo "$TOFU_VARIABLES" | jq -r '.promote_staging_to_production') + assert_equal "$promote" "true" +} + +@test "Should map fixed_instances to autoscale_default_instances" { + export CONTEXT=$(echo "$CONTEXT" | jq '.scope.capabilities.fixed_instances = 3') + + run_build_context + + local autoscale_default_instances + autoscale_default_instances=$(echo "$TOFU_VARIABLES" | jq -r '.autoscale_default_instances') + assert_equal "$autoscale_default_instances" "3" +} + +# ============================================================================= +# Test: Blue-green image preservation variables +# ============================================================================= +@test "Should default preserve_production_image to false" { + run_build_context + + local preserve + preserve=$(echo "$TOFU_VARIABLES" | jq -r '.preserve_production_image') + assert_equal "$preserve" "false" +} + +@test "Should use PRESERVE_PRODUCTION_IMAGE from environment when set" { + export PRESERVE_PRODUCTION_IMAGE="true" + + run_build_context + + local preserve + preserve=$(echo "$TOFU_VARIABLES" | jq -r '.preserve_production_image') + assert_equal "$preserve" "true" +} + +@test "Should generate and export STATE_KEY with scope_id" { + run_build_context + + assert_equal "$STATE_KEY" "azure-apps/7/terraform.tfstate" + + local state_key + state_key=$(echo "$TOFU_VARIABLES" | jq -r '.state_key') + assert_equal "$state_key" "azure-apps/7/terraform.tfstate" +} + +# ============================================================================= +# Test: Managed identity settings +# ============================================================================= +@test "Should default enable_system_identity to true" { + run_build_context + + local enable_identity + enable_identity=$(echo "$TOFU_VARIABLES" | jq -r '.enable_system_identity') + assert_equal "$enable_identity" "true" +} + +@test "Should allow ENABLE_SYSTEM_IDENTITY override from environment" { + export ENABLE_SYSTEM_IDENTITY="false" + + run_build_context + + local enable_identity + enable_identity=$(echo "$TOFU_VARIABLES" | jq -r '.enable_system_identity') + assert_equal "$enable_identity" "false" +} + +@test "Should allow STAGING_TRAFFIC_PERCENT env var to override context" { + export CONTEXT=$(echo "$CONTEXT" | jq '.deployment.strategy_data.desired_switched_traffic = 25') + export STAGING_TRAFFIC_PERCENT="50" + + run_build_context + + local traffic_percent + traffic_percent=$(echo "$TOFU_VARIABLES" | jq -r '.staging_traffic_percent') + assert_equal "$traffic_percent" "50" +} + +# ============================================================================= +# Test: OUTPUT_DIR and TF_WORKING_DIR +# ============================================================================= +@test "Should create OUTPUT_DIR with scope_id" { + run_build_context + + assert_equal "$OUTPUT_DIR" "$SERVICE_PATH/output/7" +} + +@test "Should create TF_WORKING_DIR as OUTPUT_DIR/terraform" { + run_build_context + + assert_contains "$TF_WORKING_DIR" "$OUTPUT_DIR/terraform" +} + +@test "Should create OUTPUT_DIR directory" { + run_build_context + + assert_directory_exists "$OUTPUT_DIR" +} + +@test "Should create TF_WORKING_DIR directory" { + run_build_context + + assert_directory_exists "$TF_WORKING_DIR" +} + +@test "Should use NP_OUTPUT_DIR when set" { + export NP_OUTPUT_DIR="$TEST_OUTPUT_DIR" + + run_build_context + + assert_equal "$OUTPUT_DIR" "$TEST_OUTPUT_DIR/output/7" +} + +# ============================================================================= +# Test: Exports are set +# ============================================================================= +@test "Should export SCOPE_ID" { + run_build_context + + assert_not_empty "$SCOPE_ID" "SCOPE_ID" +} + +@test "Should export DEPLOYMENT_ID" { + run_build_context + + assert_not_empty "$DEPLOYMENT_ID" "DEPLOYMENT_ID" +} + +@test "Should export APP_NAME" { + run_build_context + + assert_not_empty "$APP_NAME" "APP_NAME" +} + +@test "Should export DOCKER_IMAGE" { + run_build_context + + assert_not_empty "$DOCKER_IMAGE" "DOCKER_IMAGE" +} + +@test "Should export OUTPUT_DIR" { + run_build_context + + assert_not_empty "$OUTPUT_DIR" "OUTPUT_DIR" +} + +@test "Should export TF_WORKING_DIR" { + run_build_context + + assert_not_empty "$TF_WORKING_DIR" "TF_WORKING_DIR" +} + +@test "Should export TOFU_VARIABLES" { + run_build_context + + assert_not_empty "$TOFU_VARIABLES" "TOFU_VARIABLES" +} + +@test "Should export CONTEXT" { + run_build_context + + assert_not_empty "$CONTEXT" "CONTEXT" +} + +@test "Should export STATE_KEY" { + run_build_context + + assert_not_empty "$STATE_KEY" "STATE_KEY" +} diff --git a/azure-apps/deployment/tests/scripts/do_tofu_test.bats b/azure-apps/deployment/tests/scripts/do_tofu_test.bats new file mode 100644 index 00000000..83f76ad3 --- /dev/null +++ b/azure-apps/deployment/tests/scripts/do_tofu_test.bats @@ -0,0 +1,495 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for do_tofu script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats tests/scripts/do_tofu_test.bats +# ============================================================================= + +# Setup once for all tests in this file +setup_file() { + export TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + export PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + export PROJECT_ROOT="$(cd "$PROJECT_DIR/../.." && pwd)" + export SCRIPT_PATH="$PROJECT_DIR/scripts/do_tofu" + + # Create temporary directories once for all tests + export TEST_OUTPUT_DIR=$(mktemp -d) + export MOCK_BIN_DIR=$(mktemp -d) + export MOCK_TOFU_SOURCE=$(mktemp -d) + + # Create mock tofu source files + echo 'resource "azurerm_app_service" "main" {}' > "$MOCK_TOFU_SOURCE/main.tf" + echo 'variable "app_name" {}' > "$MOCK_TOFU_SOURCE/variables.tf" + mkdir -p "$MOCK_TOFU_SOURCE/scripts" + echo '#!/bin/bash' > "$MOCK_TOFU_SOURCE/scripts/helper.sh" + + # Setup mock tofu command + cat > "$MOCK_BIN_DIR/tofu" << 'EOF' +#!/bin/bash +echo "tofu $*" >> "$TOFU_MOCK_LOG" +exit 0 +EOF + chmod +x "$MOCK_BIN_DIR/tofu" +} + +# Cleanup once after all tests +teardown_file() { + rm -rf "$TEST_OUTPUT_DIR" "$MOCK_BIN_DIR" "$MOCK_TOFU_SOURCE" +} + +# Setup before each test - just reset state +setup() { + source "$PROJECT_ROOT/testing/assertions.sh" + + # Reset environment for each test + export TF_WORKING_DIR="$TEST_OUTPUT_DIR/terraform" + export OUTPUT_DIR="$TEST_OUTPUT_DIR" + export TOFU_VARIABLES='{"app_name": "test-app", "docker_image": "test:latest"}' + export TOFU_INIT_VARIABLES="-backend-config=storage_account_name=tfstate" + export TOFU_ACTION="apply" + export TOFU_MOCK_LOG="$TEST_OUTPUT_DIR/tofu_calls.log" + export DEPLOYMENT_ID="123" + export APP_NAME="test-app" + export SCOPE_ID="7" + export SERVICE_PATH="$PROJECT_DIR" + export TOFU_PATH="$MOCK_TOFU_SOURCE" + export PATH="$MOCK_BIN_DIR:$PATH" + + # Clean up from previous test + rm -rf "$TF_WORKING_DIR" "$TOFU_MOCK_LOG" "$OUTPUT_DIR/tofu.tfvars.json" + mkdir -p "$TF_WORKING_DIR" + + # Restore default mock (some tests override it) + cat > "$MOCK_BIN_DIR/tofu" << 'EOF' +#!/bin/bash +echo "tofu $*" >> "$TOFU_MOCK_LOG" +exit 0 +EOF + chmod +x "$MOCK_BIN_DIR/tofu" +} + +# ============================================================================= +# Test: Output messages +# ============================================================================= +@test "Should display deployment information at start" { + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "📝 Running tofu apply for deployment: 123" + assert_contains "$output" "📋 App Service: test-app" +} + +@test "Should display initialization message" { + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "📝 Initializing tofu..." +} + +@test "Should display action message" { + export TOFU_ACTION="apply" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "📝 Running tofu apply..." +} + +@test "Should display completion message for apply" { + export TOFU_ACTION="apply" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "✅ Tofu apply completed successfully" +} + +@test "Should display completion message for destroy" { + export TOFU_ACTION="destroy" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "✅ Tofu destroy completed successfully" +} + +# ============================================================================= +# Test: Tofu file copying +# ============================================================================= +@test "Should copy .tf files from tofu source" { + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_file_exists "$TF_WORKING_DIR/main.tf" + assert_file_exists "$TF_WORKING_DIR/variables.tf" +} + +@test "Should copy scripts directory from tofu source" { + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_directory_exists "$TF_WORKING_DIR/scripts" +} + +@test "Should use SERVICE_PATH/deployment/modules as default source" { + unset TOFU_PATH + mkdir -p "$SERVICE_PATH/deployment/modules" + echo 'resource "test" {}' > "$SERVICE_PATH/deployment/modules/test.tf" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + rm -rf "$SERVICE_PATH/deployment/modules" +} + +# ============================================================================= +# Test: tfvars file creation +# ============================================================================= +@test "Should write TOFU_VARIABLES to tfvars.json file" { + export TOFU_VARIABLES='{"environment": "production", "replicas": 3}' + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_file_exists "$OUTPUT_DIR/tofu.tfvars.json" + + local content + content=$(cat "$OUTPUT_DIR/tofu.tfvars.json") + assert_equal "$content" '{"environment": "production", "replicas": 3}' +} + +@test "Should create valid JSON in tfvars file" { + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + run jq '.' "$OUTPUT_DIR/tofu.tfvars.json" + assert_equal "$status" "0" +} + +# ============================================================================= +# Test: Tofu init command +# ============================================================================= +@test "Should call tofu init with -chdir" { + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_file_exists "$TOFU_MOCK_LOG" + + local init_call + init_call=$(grep "init" "$TOFU_MOCK_LOG" | head -1) + assert_contains "$init_call" "tofu -chdir=$TF_WORKING_DIR init" +} + +@test "Should call tofu init with -input=false" { + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local init_call + init_call=$(grep "init" "$TOFU_MOCK_LOG" | head -1) + assert_contains "$init_call" "-input=false" +} + +@test "Should call tofu init with TOFU_INIT_VARIABLES" { + export TOFU_INIT_VARIABLES="-backend-config=storage_account_name=mystorageaccount -backend-config=container_name=tfstate" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local init_call + init_call=$(grep "init" "$TOFU_MOCK_LOG" | head -1) + assert_contains "$init_call" "-backend-config=storage_account_name=mystorageaccount" + assert_contains "$init_call" "-backend-config=container_name=tfstate" +} + +# ============================================================================= +# Test: Tofu action command +# ============================================================================= +@test "Should call tofu apply with -chdir" { + export TOFU_ACTION="apply" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local action_call + action_call=$(grep "apply" "$TOFU_MOCK_LOG" | head -1) + assert_contains "$action_call" "tofu -chdir=$TF_WORKING_DIR apply" +} + +@test "Should call tofu with -auto-approve" { + export TOFU_ACTION="apply" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local action_call + action_call=$(grep "apply" "$TOFU_MOCK_LOG" | head -1) + assert_contains "$action_call" "-auto-approve" +} + +@test "Should call tofu with -var-file" { + export TOFU_ACTION="apply" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local action_call + action_call=$(grep "apply" "$TOFU_MOCK_LOG" | head -1) + assert_contains "$action_call" "-var-file=$OUTPUT_DIR/tofu.tfvars.json" +} + +@test "Should call tofu destroy when TOFU_ACTION is destroy" { + export TOFU_ACTION="destroy" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local action_call + action_call=$(grep "destroy" "$TOFU_MOCK_LOG" | head -1) + assert_contains "$action_call" "tofu -chdir=$TF_WORKING_DIR destroy" +} + +# ============================================================================= +# Test: Default TOFU_ACTION +# ============================================================================= +@test "Should use apply as default TOFU_ACTION" { + unset TOFU_ACTION + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "📝 Running tofu apply for deployment:" +} + +# ============================================================================= +# Test: Command execution order +# ============================================================================= +@test "Should call tofu init before action" { + export TOFU_ACTION="apply" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_command_order "$TOFU_MOCK_LOG" \ + "tofu -chdir=$TF_WORKING_DIR init" \ + "tofu -chdir=$TF_WORKING_DIR apply" +} + +# ============================================================================= +# Test: Error handling +# ============================================================================= +@test "Should fail if tofu init fails" { + cat > "$MOCK_BIN_DIR/tofu" << 'EOF' +#!/bin/bash +if [[ "$*" == *"init"* ]]; then + echo "Error: Failed to initialize" >&2 + exit 1 +fi +exit 0 +EOF + chmod +x "$MOCK_BIN_DIR/tofu" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +@test "Should fail if tofu action fails" { + cat > "$MOCK_BIN_DIR/tofu" << 'EOF' +#!/bin/bash +echo "tofu $*" >> "$TOFU_MOCK_LOG" +if [[ "$*" == *"apply"* ]]; then + echo "Error: Apply failed" >&2 + exit 1 +fi +exit 0 +EOF + chmod +x "$MOCK_BIN_DIR/tofu" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +@test "Should fail if source directory does not exist" { + export TOFU_PATH="/nonexistent/path" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ Source directory does not exist" +} + +@test "Should fail if no .tf files found in source" { + local empty_source=$(mktemp -d) + + export TOFU_PATH="$empty_source" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ No .tf files found" + + rm -rf "$empty_source" +} + +# ============================================================================= +# Test: Custom modules (CUSTOM_TOFU_MODULES) +# ============================================================================= +@test "Should not copy custom modules when CUSTOM_TOFU_MODULES is not set" { + unset CUSTOM_TOFU_MODULES + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + # Should not contain "Adding custom module" message + if [[ "$output" == *"Adding custom module"* ]]; then + echo "Output should not contain 'Adding custom module' but it does" + return 1 + fi +} + +@test "Should copy files from single custom module" { + local custom_module=$(mktemp -d) + echo 'provider "azurerm" { features {} }' > "$custom_module/provider_override.tf" + + export CUSTOM_TOFU_MODULES="$custom_module" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "📋 Adding custom module: $custom_module" + assert_file_exists "$TF_WORKING_DIR/provider_override.tf" + + rm -rf "$custom_module" +} + +@test "Should copy files from multiple custom modules" { + local custom_module1=$(mktemp -d) + local custom_module2=$(mktemp -d) + echo 'provider "azurerm" {}' > "$custom_module1/provider_override.tf" + echo 'terraform { backend "azurerm" {} }' > "$custom_module2/backend_override.tf" + + export CUSTOM_TOFU_MODULES="$custom_module1,$custom_module2" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "📋 Adding custom module: $custom_module1" + assert_contains "$output" "📋 Adding custom module: $custom_module2" + assert_file_exists "$TF_WORKING_DIR/provider_override.tf" + assert_file_exists "$TF_WORKING_DIR/backend_override.tf" + + rm -rf "$custom_module1" "$custom_module2" +} + +@test "Should skip non-existent custom module directory" { + local existing_module=$(mktemp -d) + echo 'provider "azurerm" {}' > "$existing_module/provider_override.tf" + + export CUSTOM_TOFU_MODULES="/nonexistent/module,$existing_module" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + # Should only show message for existing module + assert_contains "$output" "📋 Adding custom module: $existing_module" + assert_file_exists "$TF_WORKING_DIR/provider_override.tf" + + rm -rf "$existing_module" +} + +@test "Should allow custom module to override existing files" { + # Original file in main source + echo 'provider "azurerm" { features { key_vault {} } }' > "$MOCK_TOFU_SOURCE/provider.tf" + + # Override in custom module + local custom_module=$(mktemp -d) + echo 'provider "azurerm" { features {} skip_provider_registration = true }' > "$custom_module/provider.tf" + + export CUSTOM_TOFU_MODULES="$custom_module" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + # The custom module version should have overwritten the original + local content + content=$(cat "$TF_WORKING_DIR/provider.tf") + assert_contains "$content" "skip_provider_registration = true" + + rm -rf "$custom_module" +} + +@test "Should handle empty CUSTOM_TOFU_MODULES gracefully" { + export CUSTOM_TOFU_MODULES="" + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" +} + +# ============================================================================= +# Test: Blue-green deployment (PRESERVE_PRODUCTION_IMAGE) +# Note: Image preservation logic has moved to Terraform (terraform_remote_state). +# The bash script now just passes through the configuration. +# ============================================================================= +@test "Should display blue-green mode message when PRESERVE_PRODUCTION_IMAGE is true" { + export PRESERVE_PRODUCTION_IMAGE="true" + export TOFU_VARIABLES='{"docker_image": "new-app:v2.0.0", "app_name": "test-app", "preserve_production_image": true}' + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Blue-green mode: Terraform will preserve current production image" +} + +@test "Should not display blue-green message when PRESERVE_PRODUCTION_IMAGE is not set" { + export TOFU_VARIABLES='{"docker_image": "new-app:v2.0.0", "app_name": "test-app"}' + unset PRESERVE_PRODUCTION_IMAGE + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + # Output should not contain blue-green messages + [[ "$output" != *"Blue-green mode"* ]] +} + +@test "Should not display blue-green message when PRESERVE_PRODUCTION_IMAGE is false" { + export PRESERVE_PRODUCTION_IMAGE="false" + export TOFU_VARIABLES='{"docker_image": "new-app:v2.0.0", "app_name": "test-app"}' + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + # Output should not contain blue-green messages + [[ "$output" != *"Blue-green mode"* ]] +} + +@test "Should pass through TOFU_VARIABLES unchanged to tfvars file" { + export PRESERVE_PRODUCTION_IMAGE="true" + export TOFU_VARIABLES='{"docker_image": "new-app:v2.0.0", "app_name": "test-app", "preserve_production_image": true}' + + run bash "$SCRIPT_PATH" + + assert_equal "$status" "0" + + # tfvars should contain the original values (Terraform handles the image preservation) + local docker_image + docker_image=$(cat "$OUTPUT_DIR/tofu.tfvars.json" | jq -r '.docker_image') + assert_equal "$docker_image" "new-app:v2.0.0" +} diff --git a/azure-apps/deployment/tests/scripts/docker_setup_test.bats b/azure-apps/deployment/tests/scripts/docker_setup_test.bats new file mode 100644 index 00000000..16fe97a0 --- /dev/null +++ b/azure-apps/deployment/tests/scripts/docker_setup_test.bats @@ -0,0 +1,189 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for docker_setup script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats tests/scripts/docker_setup_test.bats +# +# Or run all tests: +# bats tests/scripts/*.bats +# ============================================================================= + +# Setup - runs before each test +setup() { + # Get the directory of the test file + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + PROJECT_ROOT="$(cd "$TEST_DIR/../../../.." && pwd)" + + # Load shared test utilities + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/scripts/docker_setup" + + # Load CONTEXT from test resources + export CONTEXT=$(cat "$PROJECT_DIR/tests/resources/context.json") +} + +# Teardown - runs after each test +teardown() { + unset CONTEXT + unset DOCKER_REGISTRY_URL + unset DOCKER_REGISTRY_USERNAME + unset DOCKER_REGISTRY_PASSWORD +} + +# ============================================================================= +# Helper functions +# ============================================================================= +run_docker_setup() { + source "$SCRIPT_PATH" +} + +# ============================================================================= +# Test: Validation success messages +# ============================================================================= +@test "Should display validation header message" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "🔍 Validating Docker registry configuration..." +} + +@test "Should display success message when all values are set" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "✨ Docker registry configured successfully" +} + +@test "Should display variable values when validation passes" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "✅ DOCKER_REGISTRY_URL=https://testregistry.azurecr.io" + assert_contains "$output" "✅ DOCKER_REGISTRY_USERNAME=test-registry-user" + assert_contains "$output" "✅ DOCKER_REGISTRY_PASSWORD=****" + assert_contains "$output" "✅ DOCKER_IMAGE=tools/automation:v1.0.0" +} + +# ============================================================================= +# Test: Context value extraction +# ============================================================================= +@test "Should extract DOCKER_REGISTRY_URL from context with https prefix" { + run_docker_setup + + assert_equal "$DOCKER_REGISTRY_URL" "https://testregistry.azurecr.io" +} + +@test "Should extract DOCKER_REGISTRY_USERNAME from context" { + run_docker_setup + + assert_equal "$DOCKER_REGISTRY_USERNAME" "test-registry-user" +} + +@test "Should extract DOCKER_REGISTRY_PASSWORD from context" { + run_docker_setup + + assert_equal "$DOCKER_REGISTRY_PASSWORD" "test-registry-password" +} + +# ============================================================================= +# Test: Context-derived variables - Validation errors +# ============================================================================= +@test "Should fail when server is missing from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["assets-repository"].setup.server)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ DOCKER_REGISTRY_SERVER could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "Verify that you have a Docker server asset provider provider linked to this scope." + assert_contains "$output" "• server" +} + +@test "Should fail when username is missing from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["assets-repository"].setup.username)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ DOCKER_REGISTRY_USERNAME could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "• username" +} + +@test "Should fail when password is missing from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["assets-repository"].setup.password)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ DOCKER_REGISTRY_PASSWORD could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "• password" +} + +@test "Should fail when assets-repository provider is missing from context" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["assets-repository"])') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ DOCKER_REGISTRY_SERVER could not be resolved from providers" + assert_contains "$output" "❌ DOCKER_REGISTRY_USERNAME could not be resolved from providers" + assert_contains "$output" "❌ DOCKER_REGISTRY_PASSWORD could not be resolved from providers" + assert_contains "$output" "💡 Possible causes:" + assert_contains "$output" "Verify that you have a Docker server asset provider provider linked to this scope." +} + +@test "Should list all missing fields when multiple are absent" { + export CONTEXT=$(echo "$CONTEXT" | jq 'del(.providers["assets-repository"].setup.server, .providers["assets-repository"].setup.username)') + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "• server" + assert_contains "$output" "• username" +} + +# ============================================================================= +# Test: Environment variable exports +# ============================================================================= +@test "Should export DOCKER_REGISTRY_URL" { + run_docker_setup + + assert_equal "$DOCKER_REGISTRY_URL" "https://testregistry.azurecr.io" +} + +@test "Should export DOCKER_REGISTRY_USERNAME" { + run_docker_setup + + assert_equal "$DOCKER_REGISTRY_USERNAME" "test-registry-user" +} + +@test "Should export DOCKER_REGISTRY_PASSWORD" { + run_docker_setup + + assert_equal "$DOCKER_REGISTRY_PASSWORD" "test-registry-password" +} + +# ============================================================================= +# Test: Docker image extraction +# ============================================================================= +@test "Should extract DOCKER_IMAGE from asset URL with registry server stripped" { + run_docker_setup + + assert_equal "$DOCKER_IMAGE" "tools/automation:v1.0.0" +} + +@test "Should export DOCKER_IMAGE" { + run_docker_setup + + assert_equal "$DOCKER_IMAGE" "tools/automation:v1.0.0" +} diff --git a/azure-apps/deployment/tests/scripts/generate_resource_name_test.bats b/azure-apps/deployment/tests/scripts/generate_resource_name_test.bats new file mode 100644 index 00000000..2da9014d --- /dev/null +++ b/azure-apps/deployment/tests/scripts/generate_resource_name_test.bats @@ -0,0 +1,215 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for generate_resource_name script +# +# Requirements: +# - bats-core: brew install bats-core +# +# Run tests: +# bats tests/scripts/generate_resource_name_test.bats +# +# Or run all tests: +# bats tests/scripts/*.bats +# ============================================================================= + +# Setup - runs before each test +setup() { + # Get the directory of the test file + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + PROJECT_ROOT="$(cd "$TEST_DIR/../../../.." && pwd)" + + # Load shared test utilities + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/scripts/generate_resource_name" +} + +# ============================================================================= +# Test: Basic functionality +# ============================================================================= +@test "Should generate name with all segments when within max length" { + run "$SCRIPT_PATH" 60 "namespace" "application" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "namespace-application-scope-12345" +} + +@test "Should generate name with two segments plus ID" { + run "$SCRIPT_PATH" 60 "namespace" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "namespace-scope-12345" +} + +@test "Should generate name with single segment plus ID" { + run "$SCRIPT_PATH" 60 "namespace" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "namespace-12345" +} + +@test "Should return just ID when only ID is provided" { + run "$SCRIPT_PATH" 60 "12345" + + assert_equal "$status" "0" + assert_equal "$output" "12345" +} + +# ============================================================================= +# Test: Truncation behavior +# ============================================================================= +@test "Should remove leftmost segment when name exceeds max length" { + # namespace-application-scope-12345 = 33 chars + # With max_length=30, should become: application-scope-12345 = 23 chars + run "$SCRIPT_PATH" 30 "namespace" "application" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "application-scope-12345" +} + +@test "Should remove multiple segments from left when necessary" { + # namespace-application-scope-12345 = 33 chars + # With max_length=20, should become: scope-12345 = 11 chars + run "$SCRIPT_PATH" 20 "namespace" "application" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "scope-12345" +} + +@test "Should truncate last segment as last resort when no more hyphens" { + # namespace-application-scope-12345 = 33 chars + # With max_length=10, suffix is -12345 (6 chars), max_prefix_length=4 + # After removing namespace and application, "scope" remains but has no hyphens + # So it truncates "scope" to "scop" (4 chars) -> "scop-12345" + run "$SCRIPT_PATH" 10 "namespace" "application" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "scop-12345" +} + +@test "Should return just ID when prefix becomes empty" { + # When max_prefix_length is 0 or negative, only ID should remain + run "$SCRIPT_PATH" 6 "namespace" "application" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "12345" +} + +@test "Should handle exact max length boundary" { + # namespace-application-scope-12345 = 33 chars + run "$SCRIPT_PATH" 33 "namespace" "application" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "namespace-application-scope-12345" +} + +@test "Should truncate when one char over max length" { + # namespace-application-scope-12345 = 33 chars + # With max_length=32, needs truncation -> application-scope-12345 = 23 chars + run "$SCRIPT_PATH" 32 "namespace" "application" "scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "application-scope-12345" +} + +# ============================================================================= +# Test: Azure App Service name constraints (60 char max) +# ============================================================================= +@test "Should handle Azure App Service max length (60 chars)" { + run "$SCRIPT_PATH" 60 "production-namespace" "my-application" "development-scope" "999999" + + assert_equal "$status" "0" + # production-namespace-my-application-development-scope-999999 = 60 chars (exactly at limit) + local result_length=${#output} + assert_less_than "$result_length" "61" "name length" +} + +@test "Should truncate long Azure names correctly" { + # very-long-namespace-name-my-application-name-development-scope-name-999999 > 60 chars + run "$SCRIPT_PATH" 60 "very-long-namespace-name" "my-application-name" "development-scope-name" "999999" + + assert_equal "$status" "0" + local result_length=${#output} + assert_less_than "$result_length" "61" "name length" + # Should always end with the ID + assert_contains "$output" "-999999" +} + +# ============================================================================= +# Test: Edge cases +# ============================================================================= +@test "Should handle segments with hyphens" { + run "$SCRIPT_PATH" 60 "my-namespace" "my-app" "my-scope" "12345" + + assert_equal "$status" "0" + assert_equal "$output" "my-namespace-my-app-my-scope-12345" +} + +@test "Should handle numeric segments" { + run "$SCRIPT_PATH" 60 "123" "456" "789" + + assert_equal "$status" "0" + assert_equal "$output" "123-456-789" +} + +@test "Should handle single character segments" { + run "$SCRIPT_PATH" 60 "a" "b" "c" "1" + + assert_equal "$status" "0" + assert_equal "$output" "a-b-c-1" +} + +@test "Should keep prefix when it fits within max length" { + # app-1 = 5 chars, which fits within max_length=5 + run "$SCRIPT_PATH" 5 "namespace" "app" "1" + + assert_equal "$status" "0" + assert_equal "$output" "app-1" +} + +@test "Should return just ID when max_length equals ID length plus hyphen" { + # When max_length only allows for the ID and hyphen, prefix is dropped + run "$SCRIPT_PATH" 2 "namespace" "app" "1" + + assert_equal "$status" "0" + assert_equal "$output" "1" +} + +# ============================================================================= +# Test: Error handling +# ============================================================================= +@test "Should fail with usage message when no arguments provided" { + run "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Usage:" +} + +@test "Should fail with usage message when only max_length provided" { + run "$SCRIPT_PATH" 60 + + assert_equal "$status" "1" + assert_contains "$output" "Usage:" +} + +# ============================================================================= +# Test: Real-world scenarios +# ============================================================================= +@test "Should generate valid name for typical nullplatform deployment" { + run "$SCRIPT_PATH" 60 "tools" "automation" "development-tools" "7" + + assert_equal "$status" "0" + assert_equal "$output" "tools-automation-development-tools-7" +} + +@test "Should handle long org names in nullplatform" { + run "$SCRIPT_PATH" 60 "enterprise-production" "customer-portal" "staging-environment" "12345678" + + assert_equal "$status" "0" + # Should fit within 60 chars + local result_length=${#output} + assert_less_than "$result_length" "61" "name length" + # Should end with scope ID + assert_contains "$output" "-12345678" +} diff --git a/azure-apps/deployment/tests/scripts/get_sku_from_memory_test.bats b/azure-apps/deployment/tests/scripts/get_sku_from_memory_test.bats new file mode 100644 index 00000000..8869f845 --- /dev/null +++ b/azure-apps/deployment/tests/scripts/get_sku_from_memory_test.bats @@ -0,0 +1,151 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for get_sku_from_memory script +# +# Requirements: +# - bats-core: brew install bats-core +# +# Run tests: +# bats tests/scripts/get_sku_from_memory_test.bats +# +# Or run all tests: +# bats tests/scripts/*.bats +# ============================================================================= + +# Setup - runs before each test +setup() { + # Get the directory of the test file + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + PROJECT_ROOT="$(cd "$TEST_DIR/../../../.." && pwd)" + + # Load shared test utilities + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/scripts/get_sku_from_memory" +} + +# ============================================================================= +# Test: Valid memory values - SKU mapping +# ============================================================================= +@test "Should return F1 for 1 GB memory" { + run "$SCRIPT_PATH" 1 + + assert_equal "$status" "0" + assert_equal "$output" "F1" +} + +@test "Should return S1 for 2 GB memory" { + run "$SCRIPT_PATH" 2 + + assert_equal "$status" "0" + assert_equal "$output" "S1" +} + +@test "Should return S2 for 4 GB memory" { + run "$SCRIPT_PATH" 4 + + assert_equal "$status" "0" + assert_equal "$output" "S2" +} + +@test "Should return P1v3 for 8 GB memory" { + run "$SCRIPT_PATH" 8 + + assert_equal "$status" "0" + assert_equal "$output" "P1v3" +} + +@test "Should return P2v3 for 16 GB memory" { + run "$SCRIPT_PATH" 16 + + assert_equal "$status" "0" + assert_equal "$output" "P2v3" +} + +@test "Should return P3v3 for 32 GB memory" { + run "$SCRIPT_PATH" 32 + + assert_equal "$status" "0" + assert_equal "$output" "P3v3" +} + +# ============================================================================= +# Test: Error handling - Invalid memory values +# ============================================================================= +@test "Should fail with error message for invalid memory value 5" { + run "$SCRIPT_PATH" 5 + + assert_equal "$status" "1" + assert_contains "$output" "❌ Invalid memory value: 5" + assert_contains "$output" "💡 Valid memory values are: 2, 4, 8, 16, 32 (in GB)" + assert_contains "$output" "🔧 How to fix:" +} + +@test "Should fail with error message for invalid memory value 3" { + run "$SCRIPT_PATH" 3 + + assert_equal "$status" "1" + assert_contains "$output" "❌ Invalid memory value: 3" +} + +@test "Should fail with error message for invalid memory value 64" { + run "$SCRIPT_PATH" 64 + + assert_equal "$status" "1" + assert_contains "$output" "❌ Invalid memory value: 64" +} + +@test "Should fail with error message for non-numeric value" { + run "$SCRIPT_PATH" "large" + + assert_equal "$status" "1" + assert_contains "$output" "❌ Invalid memory value: large" +} + +@test "Should fail with error message for empty string" { + run "$SCRIPT_PATH" "" + + assert_equal "$status" "1" + assert_contains "$output" "❌ Invalid memory value:" +} + +@test "Should fail with error message for negative value" { + run "$SCRIPT_PATH" "-8" + + assert_equal "$status" "1" + assert_contains "$output" "❌ Invalid memory value: -8" +} + +@test "Should fail with error message for decimal value" { + run "$SCRIPT_PATH" "2.5" + + assert_equal "$status" "1" + assert_contains "$output" "❌ Invalid memory value: 2.5" +} + +# ============================================================================= +# Test: Error handling - Missing argument +# ============================================================================= +@test "Should fail with usage message when no argument provided" { + run "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "❌ Missing required argument: memory" + assert_contains "$output" "Usage:" + assert_contains "$output" "Valid memory values: 2, 4, 8, 16, 32" +} + +# ============================================================================= +# Test: Error message includes fix instructions +# ============================================================================= +@test "Should include all valid options in fix instructions" { + run "$SCRIPT_PATH" 100 + + assert_equal "$status" "1" + assert_contains "$output" "• 2 GB - Standard tier (S1)" + assert_contains "$output" "• 4 GB - Standard tier (S2)" + assert_contains "$output" "• 8 GB - Premium tier (P1v3)" + assert_contains "$output" "• 16 GB - Premium tier (P2v3)" + assert_contains "$output" "• 32 GB - Premium tier (P3v3)" +} diff --git a/azure-apps/deployment/tests/scripts/traffic_management_test.bats b/azure-apps/deployment/tests/scripts/traffic_management_test.bats new file mode 100644 index 00000000..509877c2 --- /dev/null +++ b/azure-apps/deployment/tests/scripts/traffic_management_test.bats @@ -0,0 +1,232 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for traffic_management script +# +# Requirements: +# - bats-core: brew install bats-core +# +# Run tests: +# bats tests/scripts/traffic_management_test.bats +# +# Or run all tests: +# bats tests/scripts/*.bats +# ============================================================================= + +# Setup - runs before each test +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + PROJECT_ROOT="$(cd "$TEST_DIR/../../../.." && pwd)" + + # Load shared test utilities + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/scripts/traffic_management" + MOCKS_DIR="$PROJECT_DIR/tests/resources/mocks" + + # Add mock az to PATH (must be first to override real az) + export PATH="$MOCKS_DIR:$PATH" + + # Create a temp file to capture az calls + export AZ_CALL_LOG=$(mktemp) + export AZ_MOCK_EXIT_CODE=0 + + # Set required environment variables (from azure_setup) + export ARM_CLIENT_ID="test-client-id" + export ARM_CLIENT_SECRET="test-client-secret" + export ARM_TENANT_ID="test-tenant-id" + export ARM_SUBSCRIPTION_ID="test-subscription-id" + export AZURE_RESOURCE_GROUP="test-resource-group" + + # Set required environment variables (from build_context) + export APP_NAME="test-app" + export STAGING_TRAFFIC_PERCENT="0" +} + +# Teardown - runs after each test +teardown() { + rm -f "$AZ_CALL_LOG" + unset AZ_CALL_LOG + unset AZ_MOCK_EXIT_CODE + unset ARM_CLIENT_ID + unset ARM_CLIENT_SECRET + unset ARM_TENANT_ID + unset ARM_SUBSCRIPTION_ID + unset AZURE_RESOURCE_GROUP + unset APP_NAME + unset STAGING_TRAFFIC_PERCENT +} + +# ============================================================================= +# Test: Output messages +# ============================================================================= +@test "Should display routing header message" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Configuring traffic routing..." +} + +@test "Should display success message" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Traffic routing updated successfully" +} + +# ============================================================================= +# Test: Azure login +# ============================================================================= +@test "Should login to Azure with service principal credentials" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$AZ_CALL_LOG") + assert_contains "$calls" "login --service-principal --username test-client-id --password test-client-secret --tenant test-tenant-id --output none" +} + +@test "Should set the Azure subscription" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$AZ_CALL_LOG") + assert_contains "$calls" "account set --subscription test-subscription-id" +} + +@test "Should fail when az login fails" { + export AZ_MOCK_EXIT_CODE=1 + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +# ============================================================================= +# Test: Traffic routing - Clear (0%) +# ============================================================================= +@test "Should clear traffic routing when STAGING_TRAFFIC_PERCENT is 0" { + export STAGING_TRAFFIC_PERCENT="0" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$AZ_CALL_LOG") + assert_contains "$calls" "webapp traffic-routing clear --resource-group test-resource-group --name test-app" +} + +@test "Should display clearing message when STAGING_TRAFFIC_PERCENT is 0" { + export STAGING_TRAFFIC_PERCENT="0" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Clearing traffic routing for test-app (100% to production)" +} + +@test "Should default STAGING_TRAFFIC_PERCENT to 0 and clear routing" { + unset STAGING_TRAFFIC_PERCENT + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Clearing traffic routing for test-app (100% to production)" +} + +# ============================================================================= +# Test: Traffic routing - Set (> 0%) +# ============================================================================= +@test "Should set traffic routing when STAGING_TRAFFIC_PERCENT is greater than 0" { + export STAGING_TRAFFIC_PERCENT="25" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$AZ_CALL_LOG") + assert_contains "$calls" "webapp traffic-routing set --resource-group test-resource-group --name test-app --distribution staging=25" +} + +@test "Should display correct percentages when setting traffic to 25%" { + export STAGING_TRAFFIC_PERCENT="25" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Setting traffic for test-app: production=75% staging=25%" +} + +@test "Should display correct percentages when setting traffic to 50%" { + export STAGING_TRAFFIC_PERCENT="50" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Setting traffic for test-app: production=50% staging=50%" +} + +@test "Should display correct percentages when setting traffic to 100%" { + export STAGING_TRAFFIC_PERCENT="100" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + assert_contains "$output" "Setting traffic for test-app: production=0% staging=100%" +} + +# ============================================================================= +# Test: Missing environment variables +# ============================================================================= +@test "Should fail when ARM_CLIENT_ID is not set" { + unset ARM_CLIENT_ID + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +@test "Should fail when ARM_CLIENT_SECRET is not set" { + unset ARM_CLIENT_SECRET + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +@test "Should fail when ARM_TENANT_ID is not set" { + unset ARM_TENANT_ID + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +@test "Should fail when ARM_SUBSCRIPTION_ID is not set" { + unset ARM_SUBSCRIPTION_ID + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +@test "Should fail when APP_NAME is not set" { + unset APP_NAME + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} + +@test "Should fail when AZURE_RESOURCE_GROUP is not set" { + unset AZURE_RESOURCE_GROUP + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} diff --git a/azure-apps/deployment/workflows/blue_green.yaml b/azure-apps/deployment/workflows/blue_green.yaml new file mode 100644 index 00000000..bd35d175 --- /dev/null +++ b/azure-apps/deployment/workflows/blue_green.yaml @@ -0,0 +1,6 @@ +include: + - "$SERVICE_PATH/deployment/workflows/initial.yaml" +configuration: + ENABLE_STAGING_SLOT: true + PROMOTE_STAGING_TO_PRODUCTION: false + PRESERVE_PRODUCTION_IMAGE: true diff --git a/azure-apps/deployment/workflows/delete.yaml b/azure-apps/deployment/workflows/delete.yaml new file mode 100644 index 00000000..95091305 --- /dev/null +++ b/azure-apps/deployment/workflows/delete.yaml @@ -0,0 +1,5 @@ +include: + - "$SERVICE_PATH/deployment/workflows/initial.yaml" +configuration: + TOFU_ACTION: "destroy" + ENABLE_STAGING_SLOT: false \ No newline at end of file diff --git a/azure-apps/deployment/workflows/finalize.yaml b/azure-apps/deployment/workflows/finalize.yaml new file mode 100644 index 00000000..b26bebd9 --- /dev/null +++ b/azure-apps/deployment/workflows/finalize.yaml @@ -0,0 +1,13 @@ +include: + - "$SERVICE_PATH/deployment/workflows/initial.yaml" +configuration: + ENABLE_STAGING_SLOT: false + PROMOTE_STAGING_TO_PRODUCTION: true + PRESERVE_PRODUCTION_IMAGE: false +steps: + - name: clear_traffic + type: script + file: "$SERVICE_PATH/deployment/scripts/traffic_management" + configuration: + STAGING_TRAFFIC_PERCENT: 0 + before: tofu \ No newline at end of file diff --git a/azure-apps/deployment/workflows/initial.yaml b/azure-apps/deployment/workflows/initial.yaml new file mode 100644 index 00000000..356d5efe --- /dev/null +++ b/azure-apps/deployment/workflows/initial.yaml @@ -0,0 +1,33 @@ +include: + - "$SERVICE_PATH/values.yaml" +configuration: + TOFU_ACTION: "apply" + ENABLE_STAGING_SLOT: false +steps: + - name: docker_setup + type: script + file: "$SERVICE_PATH/deployment/scripts/docker_setup" + - name: build_context + type: script + file: "$SERVICE_PATH/deployment/scripts/build_context" + output: + - name: TOFU_VARIABLES + type: environment + - name: SCOPE_ID + type: environment + - name: DEPLOYMENT_ID + type: environment + - name: APP_NAME + type: environment + - name: OUTPUT_DIR + type: environment + - name: TF_WORKING_DIR + type: environment + - name: DOCKER_IMAGE + type: environment + - name: setup_provider + type: script + file: "$SERVICE_PATH/deployment/scripts/azure_setup" + - name: tofu + type: script + file: "$SERVICE_PATH/deployment/scripts/do_tofu" diff --git a/azure-apps/deployment/workflows/rollback.yaml b/azure-apps/deployment/workflows/rollback.yaml new file mode 100644 index 00000000..b1bb7e07 --- /dev/null +++ b/azure-apps/deployment/workflows/rollback.yaml @@ -0,0 +1,13 @@ +include: + - "$SERVICE_PATH/deployment/workflows/initial.yaml" +configuration: + SWAP_SLOTS: false + ENABLE_STAGING_SLOT: false + PRESERVE_PRODUCTION_IMAGE: true +steps: + - name: switch_traffic + type: script + file: "$SERVICE_PATH/deployment/scripts/traffic_management" + configuration: + STAGING_TRAFFIC_PERCENT: 0 + before: tofu \ No newline at end of file diff --git a/azure-apps/deployment/workflows/switch_traffic.yaml b/azure-apps/deployment/workflows/switch_traffic.yaml new file mode 100644 index 00000000..5de0b996 --- /dev/null +++ b/azure-apps/deployment/workflows/switch_traffic.yaml @@ -0,0 +1,33 @@ +include: + - "$SERVICE_PATH/values.yaml" +configuration: + ENABLE_STAGING_SLOT: true + PROMOTE_STAGING_TO_PRODUCTION: false +steps: + - name: docker_setup + type: script + file: "$SERVICE_PATH/deployment/scripts/docker_setup" + - name: build_context + type: script + file: "$SERVICE_PATH/deployment/scripts/build_context" + output: + - name: TOFU_VARIABLES + type: environment + - name: SCOPE_ID + type: environment + - name: DEPLOYMENT_ID + type: environment + - name: APP_NAME + type: environment + - name: OUTPUT_DIR + type: environment + - name: TF_WORKING_DIR + type: environment + - name: DOCKER_IMAGE + type: environment + - name: setup_provider + type: script + file: "$SERVICE_PATH/deployment/scripts/azure_setup" + - name: switch_traffic + type: script + file: "$SERVICE_PATH/deployment/scripts/traffic_management" \ No newline at end of file diff --git a/azure-apps/instance/build_context b/azure-apps/instance/build_context new file mode 100755 index 00000000..850ebdbc --- /dev/null +++ b/azure-apps/instance/build_context @@ -0,0 +1,29 @@ +#!/bin/bash +set -euo pipefail + +# Extract all context fields in a single jq call +# CONTEXT = $NP_ACTION_CONTEXT.notification (set by main entrypoint) +eval "$(echo "$CONTEXT" | jq -r ' + "export SCOPE_ID=" + ((.arguments.scope_id // .scope.id // "") | if type == "array" then .[0] else . end | tostring | @sh) + "\n" + + "export APPLICATION_ID=" + ((.arguments.application_id // .scope.application_id // .tags.application_id // "") | if type == "array" then .[0] else . end | tostring | @sh) + "\n" + + "export DEPLOYMENT_ID=" + ((.arguments.deployment_id // .scope.active_deployment // "") | if type == "array" then .[0] else . end | tostring | @sh) + "\n" + + "SCOPE_SLUG=" + (.scope.slug // "" | @sh) + "\n" + + "NS_SLUG=" + (.tags.namespace // "" | @sh) + "\n" + + "APP_SLUG=" + (.tags.application // "" | @sh) + "\n" + + "SCOPE_NRN=" + (.scope.nrn // .entity_nrn // "" | @sh) +')" + +export LIMIT=${LIMIT:-10} + +# If DEPLOYMENT_ID is empty, query the scope to get active_deployment +if [ -z "$DEPLOYMENT_ID" ] || [ "$DEPLOYMENT_ID" = "null" ]; then + if [ -n "$SCOPE_ID" ] && [ "$SCOPE_ID" != "null" ]; then + SCOPE_INFO=$(np scope read --id "$SCOPE_ID" --format json 2>/dev/null || echo '{}') + DEPLOYMENT_ID=$(echo "$SCOPE_INFO" | jq -r '.active_deployment // empty') + export DEPLOYMENT_ID + fi +fi + +# Use shared helper to resolve Azure context +# shellcheck source=../deployment/scripts/resolve_azure_context +source "$SERVICE_PATH/deployment/scripts/resolve_azure_context" diff --git a/azure-apps/instance/list b/azure-apps/instance/list new file mode 100755 index 00000000..6fc29f87 --- /dev/null +++ b/azure-apps/instance/list @@ -0,0 +1,64 @@ +#!/bin/bash +set -euo pipefail + +# List Azure App Service instances via REST API (both production and staging slots) + +BASE_URL="https://management.azure.com/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${AZURE_RESOURCE_GROUP}/providers/Microsoft.Web/sites/${APP_NAME}" +API_VERSION="api-version=2022-03-01" + +# Query production slot instances +PROD_INSTANCES=$(curl -s "${BASE_URL}/instances?${API_VERSION}" \ + -H "Authorization: Bearer ${AZURE_ACCESS_TOKEN}" \ + -H "Content-Type: application/json") + +# Query staging slot instances (may not exist) +STAGING_INSTANCES=$(curl -s "${BASE_URL}/slots/staging/instances?${API_VERSION}" \ + -H "Authorization: Bearer ${AZURE_ACCESS_TOKEN}" \ + -H "Content-Type: application/json" 2>/dev/null || echo '{"value":[]}') + +# Check if staging slot exists (error response won't have "value" array) +if ! echo "$STAGING_INSTANCES" | jq -e '.value' >/dev/null 2>&1; then + STAGING_INSTANCES='{"value":[]}' +fi + +# Merge and transform results +jq -n \ + --argjson prod "$PROD_INSTANCES" \ + --argjson staging "$STAGING_INSTANCES" \ + --argjson limit "${LIMIT:-10}" \ + --arg app_name "$APP_NAME" \ + --arg scope_id "${SCOPE_ID:-}" \ + --arg application_id "${APPLICATION_ID:-}" \ + --arg deployment_id "${DEPLOYMENT_ID:-}" \ + --arg resource_group "${AZURE_RESOURCE_GROUP:-}" \ +'{ + results: ( + (($prod.value // []) | map(. + {_slot: "production"})) + + (($staging.value // []) | map(. + {_slot: "staging"})) + )[:$limit] | map({ + id: .name, + selector: { + scope_id: $scope_id, + application_id: $application_id, + deployment_id: $deployment_id, + slot: ._slot + }, + details: { + namespace: $resource_group, + ip: "", + dns: (if ._slot == "staging" then ($app_name + "-staging.azurewebsites.net") else ($app_name + ".azurewebsites.net") end), + cpu: { + requested: 0, + limit: 0 + }, + memory: { + requested: "0Mi", + limit: "0Mi" + }, + architecture: "x86" + }, + state: (if .properties.state == "READY" then "Running" else (.properties.state // "Running") end), + launch_time: (.properties.lastModifiedTimeUtc // ""), + spot: false + }) +}' diff --git a/azure-apps/instance/tests/resources/instance_context.json b/azure-apps/instance/tests/resources/instance_context.json new file mode 100644 index 00000000..e5739c58 --- /dev/null +++ b/azure-apps/instance/tests/resources/instance_context.json @@ -0,0 +1,17 @@ +{ + "arguments": { + "scope_id": "7", + "application_id": "4", + "deployment_id": "8" + }, + "scope": { + "id": 7, + "slug": "development-tools", + "name": "Development tools", + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7" + }, + "tags": { + "namespace": "tools", + "application": "automation" + } +} diff --git a/azure-apps/instance/tests/scripts/build_context_test.bats b/azure-apps/instance/tests/scripts/build_context_test.bats new file mode 100644 index 00000000..f9821328 --- /dev/null +++ b/azure-apps/instance/tests/scripts/build_context_test.bats @@ -0,0 +1,167 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for instance/build_context script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats azure-apps/instance/tests/scripts/build_context_test.bats +# ============================================================================= + +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + AZURE_APPS_DIR="$(cd "$PROJECT_DIR/.." && pwd)" + PROJECT_ROOT="$(cd "$AZURE_APPS_DIR/.." && pwd)" + + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/build_context" + MOCKS_DIR="$AZURE_APPS_DIR/deployment/tests/resources/mocks" + RESPONSES_DIR="$MOCKS_DIR/responses" + + # Add mocks to PATH + export PATH="$MOCKS_DIR:$PATH" + + # Load test context (CONTEXT = .notification) + export CONTEXT=$(cat "$PROJECT_DIR/tests/resources/instance_context.json") + + # Set SERVICE_PATH (azure-apps root) + export SERVICE_PATH="$AZURE_APPS_DIR" + + # Set np mock responses (only provider list is called now) + export NP_PROVIDER_RESPONSE="$RESPONSES_DIR/np_provider_list.json" + + # ARM_CLIENT_SECRET is an env var set on the agent (not from provider) + export ARM_CLIENT_SECRET="test-client-secret" + + # Call logs + export AZ_CALL_LOG=$(mktemp) + export NP_CALL_LOG=$(mktemp) + export CURL_CALL_LOG=$(mktemp) + export AZ_MOCK_EXIT_CODE=0 + export NP_MOCK_EXIT_CODE=0 + export CURL_MOCK_EXIT_CODE=0 +} + +teardown() { + rm -f "$AZ_CALL_LOG" "$NP_CALL_LOG" "$CURL_CALL_LOG" +} + +run_build_context() { + source "$SCRIPT_PATH" +} + +# ============================================================================= +# Test: Context extraction +# ============================================================================= +@test "Should export SCOPE_ID from context arguments" { + run_build_context + + assert_equal "$SCOPE_ID" "7" +} + +@test "Should export APPLICATION_ID from context arguments" { + run_build_context + + assert_equal "$APPLICATION_ID" "4" +} + +@test "Should export DEPLOYMENT_ID from context arguments" { + run_build_context + + assert_equal "$DEPLOYMENT_ID" "8" +} + +@test "Should default LIMIT to 10" { + run_build_context + + assert_equal "$LIMIT" "10" +} + +# ============================================================================= +# Test: APP_NAME resolution from context slugs +# ============================================================================= +@test "Should resolve APP_NAME from context slugs" { + run_build_context + + assert_equal "$APP_NAME" "tools-automation-development-tools-7" +} + +# ============================================================================= +# Test: np CLI calls (only provider list now) +# ============================================================================= +@test "Should call np provider list with cloud-providers category" { + run_build_context + + local calls + calls=$(cat "$NP_CALL_LOG") + assert_contains "$calls" "provider list --categories cloud-providers" +} + +@test "Should not call np scope read" { + run_build_context + + local calls + calls=$(cat "$NP_CALL_LOG") + if [[ "$calls" == *"scope read"* ]]; then + echo "Expected no 'scope read' call, but found one" + return 1 + fi +} + +# ============================================================================= +# Test: Azure credentials from provider +# ============================================================================= +@test "Should resolve ARM_SUBSCRIPTION_ID from cloud provider" { + run_build_context + + assert_equal "$ARM_SUBSCRIPTION_ID" "test-subscription-id" +} + +@test "Should resolve ARM_CLIENT_ID from cloud provider" { + run_build_context + + assert_equal "$ARM_CLIENT_ID" "test-client-id" +} + +@test "Should resolve AZURE_RESOURCE_GROUP from cloud provider" { + run_build_context + + assert_equal "$AZURE_RESOURCE_GROUP" "test-resource-group" +} + +# ============================================================================= +# Test: Azure token via REST API +# ============================================================================= +@test "Should get Azure access token via OAuth endpoint" { + run_build_context + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "login.microsoftonline.com" + assert_contains "$calls" "oauth2" +} + +# ============================================================================= +# Test: Validation +# ============================================================================= +@test "Should fail when scope_id is missing from context" { + export CONTEXT='{"arguments":{},"scope":{"slug":"test"},"tags":{"namespace":"ns","application":"app"}}' + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Missing required parameter: scope_id" +} + +@test "Should fail when slugs are missing from context" { + export CONTEXT='{"arguments":{"scope_id":"7"},"scope":{},"tags":{}}' + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Could not extract slugs from context" +} diff --git a/azure-apps/instance/tests/scripts/list_test.bats b/azure-apps/instance/tests/scripts/list_test.bats new file mode 100644 index 00000000..f3c7054f --- /dev/null +++ b/azure-apps/instance/tests/scripts/list_test.bats @@ -0,0 +1,143 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for instance/list script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats azure-apps/instance/tests/scripts/list_test.bats +# ============================================================================= + +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + AZURE_APPS_DIR="$(cd "$PROJECT_DIR/.." && pwd)" + PROJECT_ROOT="$(cd "$AZURE_APPS_DIR/.." && pwd)" + + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/list" + MOCKS_DIR="$AZURE_APPS_DIR/deployment/tests/resources/mocks" + RESPONSES_DIR="$MOCKS_DIR/responses" + + # Add mocks to PATH + export PATH="$MOCKS_DIR:$PATH" + + # Set env vars (normally set by build_context) + export APP_NAME="tools-automation-development-tools-7" + export SCOPE_ID="7" + export APPLICATION_ID="4" + export DEPLOYMENT_ID="8" + export LIMIT="10" + + # Set Azure env vars (normally from build_context via np provider list) + export AZURE_RESOURCE_GROUP="test-resource-group" + export ARM_SUBSCRIPTION_ID="test-subscription-id" + export AZURE_ACCESS_TOKEN="mock-azure-token" + + # Configure curl mock + export CURL_CALL_LOG=$(mktemp) + export CURL_MOCK_EXIT_CODE=0 +} + +teardown() { + rm -f "$CURL_CALL_LOG" +} + +# ============================================================================= +# Test: Azure REST API call +# ============================================================================= +@test "Should call Azure REST API for instances" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "/instances" + assert_contains "$calls" "Authorization: Bearer" +} + +# ============================================================================= +# Test: Output structure +# ============================================================================= +@test "Should produce valid JSON output with results array" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local has_results + has_results=$(echo "$output" | jq 'has("results")') + assert_equal "$has_results" "true" +} + +@test "Should return correct number of instances" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local count + count=$(echo "$output" | jq '.results | length') + assert_equal "$count" "2" +} + +@test "Should include instance id in results" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local first_id + first_id=$(echo "$output" | jq -r '.results[0].id') + assert_equal "$first_id" "instance1" +} + +@test "Should include selector with scope_id" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local scope_id + scope_id=$(echo "$output" | jq -r '.results[0].selector.scope_id') + assert_equal "$scope_id" "7" +} + +@test "Should include dns in details" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local dns + dns=$(echo "$output" | jq -r '.results[0].details.dns') + assert_equal "$dns" "tools-automation-development-tools-7.azurewebsites.net" +} + +@test "Should include state in results" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local state + state=$(echo "$output" | jq -r '.results[0].state') + assert_equal "$state" "Running" +} + +# ============================================================================= +# Test: Empty instances +# ============================================================================= +@test "Should return empty results when no instances running" { + # Override curl mock for empty response + export CURL_MOCK_RESPONSE=$(mktemp) + echo '{"value":[]}' > "$CURL_MOCK_RESPONSE" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local count + count=$(echo "$output" | jq '.results | length') + assert_equal "$count" "0" + + rm -f "$CURL_MOCK_RESPONSE" +} diff --git a/azure-apps/instance/workflows/list.yaml b/azure-apps/instance/workflows/list.yaml new file mode 100644 index 00000000..f02622e7 --- /dev/null +++ b/azure-apps/instance/workflows/list.yaml @@ -0,0 +1,7 @@ +steps: + - name: build context + type: script + file: "$SERVICE_PATH/instance/build_context" + - name: list + type: script + file: "$SERVICE_PATH/instance/list" diff --git a/azure-apps/log/.gitignore b/azure-apps/log/.gitignore new file mode 100644 index 00000000..ceddaa37 --- /dev/null +++ b/azure-apps/log/.gitignore @@ -0,0 +1 @@ +.cache/ diff --git a/azure-apps/log/build_context b/azure-apps/log/build_context new file mode 100755 index 00000000..1d4130d0 --- /dev/null +++ b/azure-apps/log/build_context @@ -0,0 +1,34 @@ +#!/bin/bash +set -euo pipefail + +# Extract all context fields in a single jq call +NOTIFICATION=$(echo "$NP_ACTION_CONTEXT" | jq -c '.notification') +eval "$(echo "$NOTIFICATION" | jq -r ' + "export SCOPE_ID=" + ((.arguments.scope_id // .scope.id // "") | if type == "array" then .[0] else . end | tostring | @sh) + "\n" + + "export APPLICATION_ID=" + ((.arguments.application_id // .tags.application_id // "") | if type == "array" then .[0] else . end | tostring | @sh) + "\n" + + "export DEPLOYMENT_ID=" + ((.arguments.deployment_id // .scope.active_deployment // "") | if type == "array" then .[0] else . end | tostring | @sh) + "\n" + + "export FILTER_PATTERN=" + (.arguments.filter_pattern // "" | @sh) + "\n" + + "export INSTANCE_ID=" + (.arguments.instance_id // "" | @sh) + "\n" + + "export LIMIT=" + (.arguments.limit // "" | @sh) + "\n" + + "export START_TIME=" + (.arguments.start_time // "" | @sh) + "\n" + + "export END_TIME=" + (.arguments.end_time // "" | @sh) + "\n" + + "export NEXT_PAGE_TOKEN=" + (.arguments.next_page_token // "" | @sh) + "\n" + + "SCOPE_SLUG=" + (.scope.slug // "" | @sh) + "\n" + + "NS_SLUG=" + (.tags.namespace // "" | @sh) + "\n" + + "APP_SLUG=" + (.tags.application // "" | @sh) + "\n" + + "SCOPE_NRN=" + (.scope.nrn // .entity_nrn // "" | @sh) +')" + +# Convert START_TIME and END_TIME from Unix ms to ISO format if numeric +if [ -n "$START_TIME" ] && [[ "$START_TIME" =~ ^[0-9]+$ ]]; then + START_TIME=$(python3 -c "from datetime import datetime, timezone; print(datetime.fromtimestamp($START_TIME/1000, tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ'))") + export START_TIME +fi +if [ -n "$END_TIME" ] && [[ "$END_TIME" =~ ^[0-9]+$ ]]; then + END_TIME=$(python3 -c "from datetime import datetime, timezone; print(datetime.fromtimestamp($END_TIME/1000, tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ'))") + export END_TIME +fi + +# Use shared helper to resolve Azure context +# shellcheck source=../deployment/scripts/resolve_azure_context +source "$SERVICE_PATH/deployment/scripts/resolve_azure_context" diff --git a/azure-apps/log/log b/azure-apps/log/log new file mode 100755 index 00000000..2acb9056 --- /dev/null +++ b/azure-apps/log/log @@ -0,0 +1,151 @@ +#!/bin/bash +set -euo pipefail + +# Fetch logs from Azure App Service via Kudu Docker logs API (both production and staging slots) + +CACHE_DIR="${SERVICE_PATH}/log/.cache" +CACHE_TTL=3600 # 1 hour in seconds +TEMP_DIR=$(mktemp -d) +trap "rm -rf $TEMP_DIR" EXIT + +# Function to get publishing credentials (with caching) +get_credentials() { + local slot="$1" # "production" or "staging" + local cache_file="${CACHE_DIR}/${APP_NAME}-${slot}-creds.json" + local creds_url + + if [ "$slot" = "staging" ]; then + creds_url="https://management.azure.com/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${AZURE_RESOURCE_GROUP}/providers/Microsoft.Web/sites/${APP_NAME}/slots/staging/config/publishingcredentials/list?api-version=2022-03-01" + else + creds_url="https://management.azure.com/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${AZURE_RESOURCE_GROUP}/providers/Microsoft.Web/sites/${APP_NAME}/config/publishingcredentials/list?api-version=2022-03-01" + fi + + # Check if cached credentials exist and are fresh + if [ -f "$cache_file" ]; then + local cache_age=$(($(date +%s) - $(stat -c %Y "$cache_file" 2>/dev/null || stat -f %m "$cache_file" 2>/dev/null || echo 0))) + if [ "$cache_age" -lt "$CACHE_TTL" ]; then + cat "$cache_file" + return 0 + fi + fi + + # Fetch fresh credentials + local credentials + credentials=$(curl -s -X POST "$creds_url" \ + -H "Authorization: Bearer ${AZURE_ACCESS_TOKEN}" \ + -H "Content-Type: application/json" \ + -d '{}' 2>/dev/null) + + # Check if valid response (has publishingUserName) + if echo "$credentials" | jq -e '.properties.publishingUserName' >/dev/null 2>&1; then + mkdir -p "$CACHE_DIR" + echo "$credentials" > "$cache_file" + echo "$credentials" + return 0 + fi + + return 1 +} + +# Function to fetch logs from a slot +fetch_slot_logs() { + local slot="$1" + local scm_user="$2" + local scm_pass="$3" + local scm_host="$4" + local output_prefix="$5" + + # Fetch log file list from Kudu API + local log_list + log_list=$(curl -s -u "${scm_user}:${scm_pass}" "https://${scm_host}/api/logs/docker" 2>/dev/null || echo "[]") + + # Sort by lastUpdated descending and take top 3 log files + local log_urls + log_urls=$(echo "$log_list" | jq -r '[sort_by(.lastUpdated) | reverse | .[:3] | .[].href] | .[]' 2>/dev/null || echo "") + + # Fetch log files in parallel + local pids=() + local index=0 + for url in $log_urls; do + curl -s -u "${scm_user}:${scm_pass}" "$url" > "${TEMP_DIR}/${output_prefix}_${index}" 2>/dev/null & + pids+=($!) + index=$((index + 1)) + done + + # Wait for all downloads + if [ ${#pids[@]} -gt 0 ]; then + for pid in "${pids[@]}"; do + wait "$pid" 2>/dev/null || true + done + fi +} + +# Fetch production slot credentials and logs +PROD_CREDS=$(get_credentials "production" || echo '{}') +if echo "$PROD_CREDS" | jq -e '.properties.publishingUserName' >/dev/null 2>&1; then + PROD_USER=$(echo "$PROD_CREDS" | jq -r '.properties.publishingUserName') + PROD_PASS=$(echo "$PROD_CREDS" | jq -r '.properties.publishingPassword') + PROD_HOST="${APP_NAME}.scm.azurewebsites.net" + fetch_slot_logs "production" "$PROD_USER" "$PROD_PASS" "$PROD_HOST" "prod_log" +fi + +# Fetch staging slot credentials and logs (may not exist) +STAGING_CREDS=$(get_credentials "staging" || echo '{}') +if echo "$STAGING_CREDS" | jq -e '.properties.publishingUserName' >/dev/null 2>&1; then + STAGING_USER=$(echo "$STAGING_CREDS" | jq -r '.properties.publishingUserName') + STAGING_PASS=$(echo "$STAGING_CREDS" | jq -r '.properties.publishingPassword') + STAGING_HOST="${APP_NAME}-staging.scm.azurewebsites.net" + fetch_slot_logs "staging" "$STAGING_USER" "$STAGING_PASS" "$STAGING_HOST" "staging_log" +fi + +# Combine and parse logs from all slots using jq +if ls "${TEMP_DIR}"/*_log_* >/dev/null 2>&1; then + PARSED_LINES=$(cat "${TEMP_DIR}"/*_log_* | jq -R --arg start_time "${START_TIME:-}" --arg end_time "${END_TIME:-}" --arg filter "${FILTER_PATTERN:-}" ' + # For each line, try to extract timestamp + select(length > 0) | + . as $line | + if test("^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}") then + { + datetime: (.[0:19] + "Z"), + message: $line + } + else + empty + end | + # Apply START_TIME filter (keep logs >= start_time) + if ($start_time != "" and .datetime < $start_time) then empty else . end | + # Apply END_TIME filter (keep logs <= end_time) + if ($end_time != "" and .datetime > $end_time) then empty else . end | + # Apply FILTER_PATTERN filter + if ($filter != "" and (.message | test($filter) | not)) then empty else . end + ' 2>/dev/null | jq -s 'sort_by(.datetime)' 2>/dev/null || echo "[]") +else + PARSED_LINES="[]" +fi + +# Apply pagination (NEXT_PAGE_TOKEN and LIMIT) +OFFSET=0 +if [ -n "${NEXT_PAGE_TOKEN:-}" ]; then + DECODED=$(echo "$NEXT_PAGE_TOKEN" | base64 -d 2>/dev/null || echo "{}") + OFFSET=$(echo "$DECODED" | jq -r '.offset // 0') +fi + +TOTAL=$(echo "$PARSED_LINES" | jq 'length') +EFFECTIVE_LIMIT=${LIMIT:-1000} + +# Slice the results +RESULTS=$(echo "$PARSED_LINES" | jq --argjson offset "$OFFSET" --argjson limit "$EFFECTIVE_LIMIT" \ + '.[$offset:$offset + $limit]') + +# Determine next_page_token +NEXT_OFFSET=$((OFFSET + EFFECTIVE_LIMIT)) +NEXT_TOKEN="" +if [ "$NEXT_OFFSET" -lt "$TOTAL" ]; then + NEXT_TOKEN=$(echo "{\"offset\":${NEXT_OFFSET}}" | base64 | tr -d '\n') +fi + +# Output in the standard format +jq -n \ + --argjson results "$RESULTS" \ + --arg next_page_token "$NEXT_TOKEN" \ + '{results: $results, next_page_token: $next_page_token}' diff --git a/azure-apps/log/tests/resources/log_action_context.json b/azure-apps/log/tests/resources/log_action_context.json new file mode 100644 index 00000000..af76b2b0 --- /dev/null +++ b/azure-apps/log/tests/resources/log_action_context.json @@ -0,0 +1,26 @@ +{ + "notification": { + "action": "log:read", + "entity_nrn": "organization=1:account=2:namespace=3:application=4:scope=7", + "arguments": { + "scope_id": "7", + "application_id": "4", + "deployment_id": "8", + "filter_pattern": "ERROR", + "instance_id": "", + "limit": "100", + "start_time": "2026-01-27T00:00:00Z", + "next_page_token": "" + }, + "scope": { + "id": 7, + "slug": "development-tools", + "name": "Development tools", + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7" + }, + "tags": { + "namespace": "tools", + "application": "automation" + } + } +} diff --git a/azure-apps/log/tests/scripts/build_context_test.bats b/azure-apps/log/tests/scripts/build_context_test.bats new file mode 100644 index 00000000..28a3c85f --- /dev/null +++ b/azure-apps/log/tests/scripts/build_context_test.bats @@ -0,0 +1,207 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for log/build_context script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats azure-apps/log/tests/scripts/build_context_test.bats +# ============================================================================= + +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + AZURE_APPS_DIR="$(cd "$PROJECT_DIR/.." && pwd)" + PROJECT_ROOT="$(cd "$AZURE_APPS_DIR/.." && pwd)" + + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/build_context" + MOCKS_DIR="$AZURE_APPS_DIR/deployment/tests/resources/mocks" + RESPONSES_DIR="$MOCKS_DIR/responses" + + # Add mocks to PATH + export PATH="$MOCKS_DIR:$PATH" + + # Load test context + export NP_ACTION_CONTEXT=$(cat "$PROJECT_DIR/tests/resources/log_action_context.json") + + # Set SERVICE_PATH (azure-apps root) + export SERVICE_PATH="$AZURE_APPS_DIR" + + # Set np mock responses (only provider list is called now) + export NP_PROVIDER_RESPONSE="$RESPONSES_DIR/np_provider_list.json" + + # ARM_CLIENT_SECRET is an env var set on the agent (not from provider) + export ARM_CLIENT_SECRET="test-client-secret" + + # Call logs + export AZ_CALL_LOG=$(mktemp) + export NP_CALL_LOG=$(mktemp) + export CURL_CALL_LOG=$(mktemp) + export AZ_MOCK_EXIT_CODE=0 + export NP_MOCK_EXIT_CODE=0 + export CURL_MOCK_EXIT_CODE=0 +} + +teardown() { + rm -f "$AZ_CALL_LOG" "$NP_CALL_LOG" "$CURL_CALL_LOG" +} + +run_build_context() { + source "$SCRIPT_PATH" +} + +# ============================================================================= +# Test: Context extraction +# ============================================================================= +@test "Should extract SCOPE_ID from NP_ACTION_CONTEXT" { + run_build_context + + assert_equal "$SCOPE_ID" "7" +} + +@test "Should extract APPLICATION_ID from NP_ACTION_CONTEXT" { + run_build_context + + assert_equal "$APPLICATION_ID" "4" +} + +@test "Should extract DEPLOYMENT_ID from NP_ACTION_CONTEXT" { + run_build_context + + assert_equal "$DEPLOYMENT_ID" "8" +} + +@test "Should extract FILTER_PATTERN from NP_ACTION_CONTEXT" { + run_build_context + + assert_equal "$FILTER_PATTERN" "ERROR" +} + +@test "Should extract LIMIT from NP_ACTION_CONTEXT" { + run_build_context + + assert_equal "$LIMIT" "100" +} + +@test "Should extract START_TIME from NP_ACTION_CONTEXT" { + run_build_context + + assert_equal "$START_TIME" "2026-01-27T00:00:00Z" +} + +# ============================================================================= +# Test: Slug extraction from context (no np calls) +# ============================================================================= +@test "Should extract scope slug from context" { + run_build_context + + # APP_NAME is built from slugs, verify it's correct + assert_equal "$APP_NAME" "tools-automation-development-tools-7" +} + +# ============================================================================= +# Test: np CLI calls (only provider list now) +# ============================================================================= +@test "Should call np provider list with cloud-providers category" { + run_build_context + + local calls + calls=$(cat "$NP_CALL_LOG") + assert_contains "$calls" "provider list --categories cloud-providers" +} + +@test "Should not call np scope read" { + run_build_context + + local calls + calls=$(cat "$NP_CALL_LOG") + # Should NOT contain scope read + if [[ "$calls" == *"scope read"* ]]; then + echo "Expected no 'scope read' call, but found one" + return 1 + fi +} + +# ============================================================================= +# Test: Azure credentials from provider +# ============================================================================= +@test "Should resolve ARM_SUBSCRIPTION_ID from cloud provider" { + run_build_context + + assert_equal "$ARM_SUBSCRIPTION_ID" "test-subscription-id" +} + +@test "Should resolve ARM_CLIENT_ID from cloud provider" { + run_build_context + + assert_equal "$ARM_CLIENT_ID" "test-client-id" +} + +@test "Should resolve ARM_TENANT_ID from cloud provider" { + run_build_context + + assert_equal "$ARM_TENANT_ID" "test-tenant-id" +} + +@test "Should resolve AZURE_RESOURCE_GROUP from cloud provider" { + run_build_context + + assert_equal "$AZURE_RESOURCE_GROUP" "test-resource-group" +} + +# ============================================================================= +# Test: Azure token via REST API +# ============================================================================= +@test "Should get Azure access token via OAuth endpoint" { + run_build_context + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "login.microsoftonline.com" + assert_contains "$calls" "oauth2" +} + +@test "Should export AZURE_ACCESS_TOKEN" { + run_build_context + + assert_not_empty "$AZURE_ACCESS_TOKEN" "AZURE_ACCESS_TOKEN" +} + +# ============================================================================= +# Test: Validation +# ============================================================================= +@test "Should fail when SCOPE_ID is missing from context" { + export NP_ACTION_CONTEXT='{"notification":{"arguments":{},"scope":{"slug":"test"},"tags":{"namespace":"ns","application":"app"}}}' + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Missing required parameter: scope_id" +} + +@test "Should fail when slugs are missing from context" { + export NP_ACTION_CONTEXT='{"notification":{"arguments":{"scope_id":"7"},"scope":{},"tags":{}}}' + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Could not extract slugs from context" +} + +# ============================================================================= +# Test: Exports +# ============================================================================= +@test "Should export all required env vars" { + run_build_context + + assert_not_empty "$SCOPE_ID" "SCOPE_ID" + assert_not_empty "$APP_NAME" "APP_NAME" + assert_not_empty "$ARM_SUBSCRIPTION_ID" "ARM_SUBSCRIPTION_ID" + assert_not_empty "$ARM_CLIENT_ID" "ARM_CLIENT_ID" + assert_not_empty "$ARM_TENANT_ID" "ARM_TENANT_ID" + assert_not_empty "$AZURE_RESOURCE_GROUP" "AZURE_RESOURCE_GROUP" +} diff --git a/azure-apps/log/tests/scripts/log_test.bats b/azure-apps/log/tests/scripts/log_test.bats new file mode 100644 index 00000000..271ed131 --- /dev/null +++ b/azure-apps/log/tests/scripts/log_test.bats @@ -0,0 +1,137 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for log/log script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats azure-apps/log/tests/scripts/log_test.bats +# ============================================================================= + +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + AZURE_APPS_DIR="$(cd "$PROJECT_DIR/.." && pwd)" + PROJECT_ROOT="$(cd "$AZURE_APPS_DIR/.." && pwd)" + + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/log" + MOCKS_DIR="$AZURE_APPS_DIR/deployment/tests/resources/mocks" + RESPONSES_DIR="$MOCKS_DIR/responses" + + # Add mocks to PATH + export PATH="$MOCKS_DIR:$PATH" + + # Set env vars (normally set by build_context) + export APP_NAME="tools-automation-development-tools-7" + export SCOPE_ID="7" + export APPLICATION_ID="4" + export DEPLOYMENT_ID="8" + export FILTER_PATTERN="" + export INSTANCE_ID="" + export LIMIT="" + export START_TIME="" + export NEXT_PAGE_TOKEN="" + + # Set Azure env vars (normally from build_context via np provider list) + export AZURE_RESOURCE_GROUP="test-resource-group" + export ARM_CLIENT_ID="test-client-id" + export ARM_CLIENT_SECRET="test-client-secret" + export ARM_TENANT_ID="test-tenant-id" + export ARM_SUBSCRIPTION_ID="test-subscription-id" + export AZURE_ACCESS_TOKEN="mock-azure-token" + export SERVICE_PATH="$AZURE_APPS_DIR" + + # Configure curl mock + export CURL_CALL_LOG=$(mktemp) + export CURL_MOCK_EXIT_CODE=0 + + # Create a combined curl mock that returns different responses based on URL + export CURL_RESPONSE_DIR=$(mktemp -d) + # Default: return the log list + export CURL_MOCK_RESPONSE="$RESPONSES_DIR/kudu_docker_logs_list.json" +} + +teardown() { + rm -f "$CURL_CALL_LOG" + rm -rf "${CURL_RESPONSE_DIR:-}" + rm -rf "${SERVICE_PATH}/log/.cache" 2>/dev/null || true +} + +# ============================================================================= +# Test: Publishing credentials via REST API +# ============================================================================= +@test "Should fetch publishing credentials via REST API" { + run source "$SCRIPT_PATH" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "publishingcredentials" + assert_contains "$calls" "Authorization: Bearer" +} + +# ============================================================================= +# Test: Kudu API calls +# ============================================================================= +@test "Should call Kudu API with correct URL" { + run source "$SCRIPT_PATH" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "tools-automation-development-tools-7.scm.azurewebsites.net/api/logs/docker" +} + +@test "Should call Kudu API with credentials" { + run source "$SCRIPT_PATH" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "-u" +} + +# ============================================================================= +# Test: Output format +# ============================================================================= +@test "Should produce valid JSON output with results array" { + # Use a simpler mock: curl returns the log list, then log file content + # For this test, we just verify the structure with the mock returning log list + run source "$SCRIPT_PATH" + + # Output should be valid JSON + local json_valid + json_valid=$(echo "$output" | jq '.' >/dev/null 2>&1 && echo "true" || echo "false") + assert_equal "$json_valid" "true" +} + +@test "Should include results key in output" { + run source "$SCRIPT_PATH" + + local has_results + has_results=$(echo "$output" | jq 'has("results")' 2>/dev/null || echo "false") + assert_equal "$has_results" "true" +} + +@test "Should include next_page_token key in output" { + run source "$SCRIPT_PATH" + + local has_token + has_token=$(echo "$output" | jq 'has("next_page_token")' 2>/dev/null || echo "false") + assert_equal "$has_token" "true" +} + +# ============================================================================= +# Test: Empty results +# ============================================================================= +@test "Should return empty results when no logs available" { + export CURL_MOCK_RESPONSE="$CURL_RESPONSE_DIR/empty_logs.json" + echo "[]" > "$CURL_MOCK_RESPONSE" + + run source "$SCRIPT_PATH" + + local count + count=$(echo "$output" | jq '.results | length' 2>/dev/null || echo "-1") + assert_equal "$count" "0" +} diff --git a/azure-apps/log/workflows/log.yaml b/azure-apps/log/workflows/log.yaml new file mode 100644 index 00000000..84aa2d12 --- /dev/null +++ b/azure-apps/log/workflows/log.yaml @@ -0,0 +1,7 @@ +steps: + - name: build context + type: script + file: "$SERVICE_PATH/log/build_context" + - name: logs + type: script + file: "$SERVICE_PATH/log/log" diff --git a/azure-apps/metric/build_context b/azure-apps/metric/build_context new file mode 100755 index 00000000..6dff7403 --- /dev/null +++ b/azure-apps/metric/build_context @@ -0,0 +1,34 @@ +#!/bin/bash +set -euo pipefail + +# Extract all context fields in a single jq call +# CONTEXT = $NP_ACTION_CONTEXT.notification (set by main entrypoint) +eval "$(echo "$CONTEXT" | jq -r ' + "export SCOPE_ID=" + ((.arguments.scope_id // .scope.id // "") | if type == "array" then .[0] else . end | tostring | @sh) + "\n" + + "export METRIC_NAME=" + (.arguments.metric // "" | @sh) + "\n" + + "export START_TIME=" + (.arguments.start_time // "" | @sh) + "\n" + + "export END_TIME=" + (.arguments.end_time // "" | @sh) + "\n" + + "export INTERVAL=" + (.arguments.interval // "5" | @sh) + "\n" + + "SCOPE_SLUG=" + (.scope.slug // "" | @sh) + "\n" + + "NS_SLUG=" + (.tags.namespace // "" | @sh) + "\n" + + "APP_SLUG=" + (.tags.application // "" | @sh) + "\n" + + "SCOPE_NRN=" + (.scope.nrn // .entity_nrn // "" | @sh) +')" + +# Convert START_TIME and END_TIME from Unix ms to ISO format if numeric +if [ -n "$START_TIME" ] && [[ "$START_TIME" =~ ^[0-9]+$ ]]; then + START_TIME=$(python3 -c "from datetime import datetime, timezone; print(datetime.fromtimestamp($START_TIME/1000, tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ'))") + export START_TIME +fi +if [ -n "$END_TIME" ] && [[ "$END_TIME" =~ ^[0-9]+$ ]]; then + END_TIME=$(python3 -c "from datetime import datetime, timezone; print(datetime.fromtimestamp($END_TIME/1000, tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ'))") + export END_TIME +fi + +# Use shared helper to resolve Azure context +# shellcheck source=../deployment/scripts/resolve_azure_context +source "$SERVICE_PATH/deployment/scripts/resolve_azure_context" + +# Build Azure Resource ID +AZURE_RESOURCE_ID="/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${AZURE_RESOURCE_GROUP}/providers/Microsoft.Web/sites/${APP_NAME}" +export AZURE_RESOURCE_ID diff --git a/azure-apps/metric/list b/azure-apps/metric/list new file mode 100755 index 00000000..6f4cea07 --- /dev/null +++ b/azure-apps/metric/list @@ -0,0 +1,69 @@ +#!/bin/bash + +echo '{ + "results": [ + { + "name": "http.rpm", + "title": "Throughput", + "unit": "rpm", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.response_time", + "title": "Response time", + "unit": "seconds", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.error_rate", + "title": "Error rate", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.request_count", + "title": "Request count", + "unit": "count", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.5xx_count", + "title": "5xx errors", + "unit": "count", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.4xx_count", + "title": "4xx errors", + "unit": "count", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "system.cpu_usage_percentage", + "title": "CPU usage", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "system.memory_usage_percentage", + "title": "Memory usage percentage", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "system.health_check_status", + "title": "Health check status", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + } + ] +}' diff --git a/azure-apps/metric/metric b/azure-apps/metric/metric new file mode 100755 index 00000000..f6a0902f --- /dev/null +++ b/azure-apps/metric/metric @@ -0,0 +1,251 @@ +#!/bin/bash +set -euo pipefail + +# Query Azure Monitor metrics for App Service (both production and staging slots) + +if [ -z "${METRIC_NAME:-}" ]; then + echo '{"metric":"","type":"","period_in_seconds":0,"unit":"","results":[]}' >&2 + exit 1 +fi + +# Map nullplatform metric names to Azure Monitor metrics +# Returns: azure_metric aggregation unit type resource_type +# resource_type: "site" or "plan" (plan metrics are shared across slots) +get_azure_metric_config() { + local metric="$1" + case "$metric" in + "system.cpu_usage_percentage") + echo "CpuPercentage Average percent gauge plan" + ;; + "system.memory_usage_percentage") + echo "MemoryPercentage Average percent gauge plan" + ;; + "http.response_time") + echo "HttpResponseTime Average seconds gauge site" + ;; + "http.rpm") + echo "Requests Total rpm gauge site" + ;; + "http.error_rate") + echo "COMPUTED Total percent gauge site" + ;; + "http.request_count") + echo "Requests Total count gauge site" + ;; + "http.5xx_count") + echo "Http5xx Total count gauge site" + ;; + "http.4xx_count") + echo "Http4xx Total count gauge site" + ;; + "system.health_check_status") + echo "HealthCheckStatus Average percent gauge site" + ;; + *) + echo "" + ;; + esac +} + +CONFIG=$(get_azure_metric_config "$METRIC_NAME") + +if [ -z "$CONFIG" ]; then + echo "Error: Unknown metric: $METRIC_NAME" >&2 + exit 1 +fi + +AZURE_METRIC=$(echo "$CONFIG" | cut -d' ' -f1) +AGGREGATION=$(echo "$CONFIG" | cut -d' ' -f2) +UNIT=$(echo "$CONFIG" | cut -d' ' -f3) +METRIC_TYPE=$(echo "$CONFIG" | cut -d' ' -f4) +RESOURCE_TYPE=$(echo "$CONFIG" | cut -d' ' -f5) + +# Calculate interval +INTERVAL_MINUTES=${INTERVAL:-5} +PERIOD_SECONDS=$((INTERVAL_MINUTES * 60)) +TIMESPAN="${START_TIME}/${END_TIME}" + +# Resource IDs +PROD_RESOURCE_ID="$AZURE_RESOURCE_ID" +STAGING_RESOURCE_ID="${AZURE_RESOURCE_ID}/slots/staging" + +# Get App Service Plan ID (for plan-level metrics) +get_plan_id() { + local SITE_URL="https://management.azure.com${AZURE_RESOURCE_ID}?api-version=2022-03-01" + local SITE_INFO + SITE_INFO=$(curl -s "$SITE_URL" \ + -H "Authorization: Bearer ${AZURE_ACCESS_TOKEN}" \ + -H "Content-Type: application/json") + echo "$SITE_INFO" | jq -r '.properties.serverFarmId // empty' +} + +# Query Azure Monitor via REST API +query_metric() { + local metric="$1" + local aggregation="$2" + local resource_id="$3" + + local METRICS_URL="https://management.azure.com${resource_id}/providers/microsoft.insights/metrics" + + curl -s "${METRICS_URL}?api-version=2018-01-01&metricnames=${metric}&aggregation=${aggregation}×pan=${TIMESPAN}&interval=PT${INTERVAL_MINUTES}M" \ + -H "Authorization: Bearer ${AZURE_ACCESS_TOKEN}" \ + -H "Content-Type: application/json" 2>/dev/null || echo '{"value":[]}' +} + +# Transform Azure Monitor response to standard format with slot selector +transform_response() { + local response="$1" + local aggregation_key="$2" + local slot="$3" + + echo "$response" | jq --arg agg "$aggregation_key" --arg slot "$slot" ' + if (.value[0].timeseries | length) > 0 then + [.value[0].timeseries[] | { + selector: {slot: $slot}, + data: [ + .data[] | + select(.[$agg] != null) | + { + timestamp: .timeStamp, + value: .[$agg] + } + ] + }] + else + [] + end + ' 2>/dev/null || echo '[]' +} + +AGG_KEY=$(echo "$AGGREGATION" | tr '[:upper:]' '[:lower:]') + +if [ "$RESOURCE_TYPE" = "plan" ]; then + # Plan-level metrics (CPU/Memory) - shared across all slots + PLAN_ID=$(get_plan_id) + if [ -z "$PLAN_ID" ]; then + echo "Error: Could not get App Service Plan ID from site" >&2 + exit 1 + fi + + RESPONSE=$(query_metric "$AZURE_METRIC" "$AGGREGATION" "$PLAN_ID") + RESULTS=$(transform_response "$RESPONSE" "$AGG_KEY" "all") + +elif [ "$METRIC_NAME" = "http.error_rate" ]; then + # Special case: compute Http5xx / Requests * 100 for each slot + # Production + PROD_5XX=$(query_metric "Http5xx" "Total" "$PROD_RESOURCE_ID") + PROD_REQ=$(query_metric "Requests" "Total" "$PROD_RESOURCE_ID") + + PROD_RESULTS=$(jq -n \ + --argjson r5xx "$PROD_5XX" \ + --argjson rreq "$PROD_REQ" \ + 'if ($r5xx.value[0].timeseries | length) > 0 then + [{ + selector: {slot: "production"}, + data: [ + range($r5xx.value[0].timeseries[0].data | length) | + . as $i | + { + timestamp: $r5xx.value[0].timeseries[0].data[$i].timeStamp, + value: ( + if ($rreq.value[0].timeseries[0].data[$i].total // 0) > 0 then + ($r5xx.value[0].timeseries[0].data[$i].total // 0) / ($rreq.value[0].timeseries[0].data[$i].total) * 100 + else + 0 + end + ) + } + ] + }] + else [] end' 2>/dev/null || echo '[]') + + # Staging (may not exist) + STAGING_5XX=$(query_metric "Http5xx" "Total" "$STAGING_RESOURCE_ID") + STAGING_REQ=$(query_metric "Requests" "Total" "$STAGING_RESOURCE_ID") + + STAGING_RESULTS=$(jq -n \ + --argjson r5xx "$STAGING_5XX" \ + --argjson rreq "$STAGING_REQ" \ + 'if ($r5xx.value[0].timeseries | length) > 0 then + [{ + selector: {slot: "staging"}, + data: [ + range($r5xx.value[0].timeseries[0].data | length) | + . as $i | + { + timestamp: $r5xx.value[0].timeseries[0].data[$i].timeStamp, + value: ( + if ($rreq.value[0].timeseries[0].data[$i].total // 0) > 0 then + ($r5xx.value[0].timeseries[0].data[$i].total // 0) / ($rreq.value[0].timeseries[0].data[$i].total) * 100 + else + 0 + end + ) + } + ] + }] + else [] end' 2>/dev/null || echo '[]') + + RESULTS=$(jq -n --argjson prod "$PROD_RESULTS" --argjson staging "$STAGING_RESULTS" '$prod + $staging') + +elif [ "$METRIC_NAME" = "http.rpm" ]; then + # Special case: Total requests / interval_minutes for each slot + PROD_RESPONSE=$(query_metric "Requests" "Total" "$PROD_RESOURCE_ID") + PROD_RESULTS=$(echo "$PROD_RESPONSE" | jq --argjson interval "$INTERVAL_MINUTES" ' + if (.value[0].timeseries | length) > 0 then + [.value[0].timeseries[] | { + selector: {slot: "production"}, + data: [ + .data[] | + select(.total != null) | + { + timestamp: .timeStamp, + value: (.total / $interval) + } + ] + }] + else [] end' 2>/dev/null || echo '[]') + + STAGING_RESPONSE=$(query_metric "Requests" "Total" "$STAGING_RESOURCE_ID") + STAGING_RESULTS=$(echo "$STAGING_RESPONSE" | jq --argjson interval "$INTERVAL_MINUTES" ' + if (.value[0].timeseries | length) > 0 then + [.value[0].timeseries[] | { + selector: {slot: "staging"}, + data: [ + .data[] | + select(.total != null) | + { + timestamp: .timeStamp, + value: (.total / $interval) + } + ] + }] + else [] end' 2>/dev/null || echo '[]') + + RESULTS=$(jq -n --argjson prod "$PROD_RESULTS" --argjson staging "$STAGING_RESULTS" '$prod + $staging') + +else + # Standard site-level metrics - query both slots + PROD_RESPONSE=$(query_metric "$AZURE_METRIC" "$AGGREGATION" "$PROD_RESOURCE_ID") + PROD_RESULTS=$(transform_response "$PROD_RESPONSE" "$AGG_KEY" "production") + + STAGING_RESPONSE=$(query_metric "$AZURE_METRIC" "$AGGREGATION" "$STAGING_RESOURCE_ID") + STAGING_RESULTS=$(transform_response "$STAGING_RESPONSE" "$AGG_KEY" "staging") + + RESULTS=$(jq -n --argjson prod "$PROD_RESULTS" --argjson staging "$STAGING_RESULTS" '$prod + $staging') +fi + +# Output in standard format +jq -n \ + --arg metric "$METRIC_NAME" \ + --arg type "$METRIC_TYPE" \ + --argjson period "$PERIOD_SECONDS" \ + --arg unit "$UNIT" \ + --argjson results "$RESULTS" \ + '{ + metric: $metric, + type: $type, + period_in_seconds: $period, + unit: $unit, + results: $results + }' diff --git a/azure-apps/metric/tests/resources/metric_context.json b/azure-apps/metric/tests/resources/metric_context.json new file mode 100644 index 00000000..fef1aadb --- /dev/null +++ b/azure-apps/metric/tests/resources/metric_context.json @@ -0,0 +1,19 @@ +{ + "arguments": { + "scope_id": "7", + "metric": "system.cpu_usage_percentage", + "start_time": "2026-01-27T00:00:00Z", + "end_time": "2026-01-27T01:00:00Z", + "interval": "5" + }, + "scope": { + "id": 7, + "slug": "development-tools", + "name": "Development tools", + "nrn": "organization=1:account=2:namespace=3:application=4:scope=7" + }, + "tags": { + "namespace": "tools", + "application": "automation" + } +} diff --git a/azure-apps/metric/tests/scripts/build_context_test.bats b/azure-apps/metric/tests/scripts/build_context_test.bats new file mode 100644 index 00000000..d6544284 --- /dev/null +++ b/azure-apps/metric/tests/scripts/build_context_test.bats @@ -0,0 +1,171 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for metric/build_context script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats azure-apps/metric/tests/scripts/build_context_test.bats +# ============================================================================= + +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + AZURE_APPS_DIR="$(cd "$PROJECT_DIR/.." && pwd)" + PROJECT_ROOT="$(cd "$AZURE_APPS_DIR/.." && pwd)" + + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/build_context" + MOCKS_DIR="$AZURE_APPS_DIR/deployment/tests/resources/mocks" + RESPONSES_DIR="$MOCKS_DIR/responses" + + # Add mocks to PATH + export PATH="$MOCKS_DIR:$PATH" + + # Load test context (CONTEXT = .notification) + export CONTEXT=$(cat "$PROJECT_DIR/tests/resources/metric_context.json") + + # Set SERVICE_PATH (azure-apps root) + export SERVICE_PATH="$AZURE_APPS_DIR" + + # Set np mock responses (only provider list is called now) + export NP_PROVIDER_RESPONSE="$RESPONSES_DIR/np_provider_list.json" + + # ARM_CLIENT_SECRET is an env var set on the agent (not from provider) + export ARM_CLIENT_SECRET="test-client-secret" + + # Call logs + export AZ_CALL_LOG=$(mktemp) + export NP_CALL_LOG=$(mktemp) + export CURL_CALL_LOG=$(mktemp) + export AZ_MOCK_EXIT_CODE=0 + export NP_MOCK_EXIT_CODE=0 + export CURL_MOCK_EXIT_CODE=0 +} + +teardown() { + rm -f "$AZ_CALL_LOG" "$NP_CALL_LOG" "$CURL_CALL_LOG" +} + +run_build_context() { + source "$SCRIPT_PATH" +} + +# ============================================================================= +# Test: Context extraction - arguments as env vars +# ============================================================================= +@test "Should export SCOPE_ID from context arguments" { + run_build_context + + assert_equal "$SCOPE_ID" "7" +} + +@test "Should export START_TIME from context arguments" { + run_build_context + + assert_equal "$START_TIME" "2026-01-27T00:00:00Z" +} + +@test "Should export END_TIME from context arguments" { + run_build_context + + assert_equal "$END_TIME" "2026-01-27T01:00:00Z" +} + +@test "Should export INTERVAL from context arguments" { + run_build_context + + assert_equal "$INTERVAL" "5" +} + +# ============================================================================= +# Test: METRIC to METRIC_NAME mapping +# ============================================================================= +@test "Should map METRIC to METRIC_NAME" { + run_build_context + + assert_equal "$METRIC_NAME" "system.cpu_usage_percentage" +} + +# ============================================================================= +# Test: APP_NAME resolution from context slugs +# ============================================================================= +@test "Should resolve APP_NAME from context slugs" { + run_build_context + + assert_equal "$APP_NAME" "tools-automation-development-tools-7" +} + +# ============================================================================= +# Test: AZURE_RESOURCE_ID +# ============================================================================= +@test "Should build AZURE_RESOURCE_ID from resolved credentials and APP_NAME" { + run_build_context + + local expected="/subscriptions/test-subscription-id/resourceGroups/test-resource-group/providers/Microsoft.Web/sites/tools-automation-development-tools-7" + assert_equal "$AZURE_RESOURCE_ID" "$expected" +} + +# ============================================================================= +# Test: np CLI calls (only provider list now) +# ============================================================================= +@test "Should call np provider list with cloud-providers category" { + run_build_context + + local calls + calls=$(cat "$NP_CALL_LOG") + assert_contains "$calls" "provider list --categories cloud-providers" +} + +@test "Should not call np scope read" { + run_build_context + + local calls + calls=$(cat "$NP_CALL_LOG") + if [[ "$calls" == *"scope read"* ]]; then + echo "Expected no 'scope read' call, but found one" + return 1 + fi +} + +# ============================================================================= +# Test: Azure token via REST API +# ============================================================================= +@test "Should get Azure access token via OAuth endpoint" { + run_build_context + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "login.microsoftonline.com" + assert_contains "$calls" "oauth2" +} + +@test "Should export AZURE_ACCESS_TOKEN" { + run_build_context + + assert_not_empty "$AZURE_ACCESS_TOKEN" "AZURE_ACCESS_TOKEN" +} + +# ============================================================================= +# Test: Validation +# ============================================================================= +@test "Should fail when scope_id is missing from context" { + export CONTEXT='{"arguments":{},"scope":{"slug":"test"},"tags":{"namespace":"ns","application":"app"}}' + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Missing required parameter: scope_id" +} + +@test "Should fail when slugs are missing from context" { + export CONTEXT='{"arguments":{"scope_id":"7"},"scope":{},"tags":{}}' + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Could not extract slugs from context" +} diff --git a/azure-apps/metric/tests/scripts/list_test.bats b/azure-apps/metric/tests/scripts/list_test.bats new file mode 100644 index 00000000..7edcc9d2 --- /dev/null +++ b/azure-apps/metric/tests/scripts/list_test.bats @@ -0,0 +1,180 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for metric/list script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats azure-apps/metric/tests/scripts/list_test.bats +# ============================================================================= + +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + AZURE_APPS_DIR="$(cd "$PROJECT_DIR/.." && pwd)" + PROJECT_ROOT="$(cd "$AZURE_APPS_DIR/.." && pwd)" + + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/list" +} + +# ============================================================================= +# Test: Full output structure +# ============================================================================= +@test "Should produce valid JSON output" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local expected_json + expected_json=$(cat <<'EOF' +{ + "results": [ + { + "name": "http.rpm", + "title": "Throughput", + "unit": "rpm", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.response_time", + "title": "Response time", + "unit": "seconds", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.error_rate", + "title": "Error rate", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.request_count", + "title": "Request count", + "unit": "count", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.5xx_count", + "title": "5xx errors", + "unit": "count", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "http.4xx_count", + "title": "4xx errors", + "unit": "count", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "system.cpu_usage_percentage", + "title": "CPU usage", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "system.memory_usage_percentage", + "title": "Memory usage percentage", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + }, + { + "name": "system.health_check_status", + "title": "Health check status", + "unit": "%", + "available_filters": ["scope_id"], + "available_group_by": [] + } + ] +} +EOF +) + + assert_json_equal "$output" "$expected_json" "Metric list output" +} + +# ============================================================================= +# Test: Metric count +# ============================================================================= +@test "Should include all 9 metrics in the catalog" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local count + count=$(echo "$output" | jq '.results | length') + assert_equal "$count" "9" +} + +# ============================================================================= +# Test: Filters +# ============================================================================= +@test "Should set available_filters to scope_id for all metrics" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + # Check that all metrics have ["scope_id"] as available_filters + local all_match + all_match=$(echo "$output" | jq '[.results[] | .available_filters == ["scope_id"]] | all') + assert_equal "$all_match" "true" +} + +# ============================================================================= +# Test: Group by +# ============================================================================= +@test "Should set available_group_by to empty array for all metrics" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local all_empty + all_empty=$(echo "$output" | jq '[.results[] | .available_group_by == []] | all') + assert_equal "$all_empty" "true" +} + +# ============================================================================= +# Test: Individual metrics exist +# ============================================================================= +@test "Should include http.rpm metric" { + run source "$SCRIPT_PATH" + + local has_metric + has_metric=$(echo "$output" | jq '[.results[] | select(.name == "http.rpm")] | length') + assert_equal "$has_metric" "1" +} + +@test "Should include http.error_rate metric" { + run source "$SCRIPT_PATH" + + local has_metric + has_metric=$(echo "$output" | jq '[.results[] | select(.name == "http.error_rate")] | length') + assert_equal "$has_metric" "1" +} + +@test "Should include system.cpu_usage_percentage metric" { + run source "$SCRIPT_PATH" + + local has_metric + has_metric=$(echo "$output" | jq '[.results[] | select(.name == "system.cpu_usage_percentage")] | length') + assert_equal "$has_metric" "1" +} + +@test "Should include system.health_check_status metric" { + run source "$SCRIPT_PATH" + + local has_metric + has_metric=$(echo "$output" | jq '[.results[] | select(.name == "system.health_check_status")] | length') + assert_equal "$has_metric" "1" +} diff --git a/azure-apps/metric/tests/scripts/metric_test.bats b/azure-apps/metric/tests/scripts/metric_test.bats new file mode 100644 index 00000000..7e514df2 --- /dev/null +++ b/azure-apps/metric/tests/scripts/metric_test.bats @@ -0,0 +1,241 @@ +#!/usr/bin/env bats +# ============================================================================= +# Unit tests for metric/metric script +# +# Requirements: +# - bats-core: brew install bats-core +# - jq: brew install jq +# +# Run tests: +# bats azure-apps/metric/tests/scripts/metric_test.bats +# ============================================================================= + +setup() { + TEST_DIR="$(cd "$(dirname "$BATS_TEST_FILENAME")" && pwd)" + PROJECT_DIR="$(cd "$TEST_DIR/../.." && pwd)" + AZURE_APPS_DIR="$(cd "$PROJECT_DIR/.." && pwd)" + PROJECT_ROOT="$(cd "$AZURE_APPS_DIR/.." && pwd)" + + source "$PROJECT_ROOT/testing/assertions.sh" + + SCRIPT_PATH="$PROJECT_DIR/metric" + MOCKS_DIR="$AZURE_APPS_DIR/deployment/tests/resources/mocks" + RESPONSES_DIR="$MOCKS_DIR/responses" + + # Add mocks to PATH + export PATH="$MOCKS_DIR:$PATH" + + # Set env vars (normally set by build_context) + export METRIC_NAME="system.cpu_usage_percentage" + export START_TIME="2026-01-27T00:00:00Z" + export END_TIME="2026-01-27T01:00:00Z" + export INTERVAL="5" + export AZURE_RESOURCE_ID="/subscriptions/test-subscription-id/resourceGroups/test-resource-group/providers/Microsoft.Web/sites/tools-automation-development-tools-7" + export AZURE_ACCESS_TOKEN="mock-azure-token" + + # Configure curl mock for Azure Monitor metrics + export CURL_CALL_LOG=$(mktemp) + export CURL_MOCK_EXIT_CODE=0 +} + +teardown() { + rm -f "$CURL_CALL_LOG" +} + +# ============================================================================= +# Test: Output structure +# ============================================================================= +@test "Should produce valid JSON output with correct structure" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + # Validate JSON structure + local metric + metric=$(echo "$output" | jq -r '.metric') + assert_equal "$metric" "system.cpu_usage_percentage" + + local type + type=$(echo "$output" | jq -r '.type') + assert_equal "$type" "gauge" + + local period + period=$(echo "$output" | jq '.period_in_seconds') + assert_equal "$period" "300" + + local unit + unit=$(echo "$output" | jq -r '.unit') + assert_equal "$unit" "percent" + + local has_results + has_results=$(echo "$output" | jq 'has("results")') + assert_equal "$has_results" "true" +} + +# ============================================================================= +# Test: Metric mapping +# ============================================================================= +@test "Should map system.cpu_usage_percentage to CpuPercentage" { + export METRIC_NAME="system.cpu_usage_percentage" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=CpuPercentage" +} + +@test "Should map system.memory_usage_percentage to MemoryPercentage" { + export METRIC_NAME="system.memory_usage_percentage" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=MemoryPercentage" +} + +@test "Should map http.response_time to HttpResponseTime" { + export METRIC_NAME="http.response_time" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=HttpResponseTime" +} + +@test "Should map http.request_count to Requests" { + export METRIC_NAME="http.request_count" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=Requests" +} + +@test "Should map http.5xx_count to Http5xx" { + export METRIC_NAME="http.5xx_count" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=Http5xx" +} + +@test "Should map http.4xx_count to Http4xx" { + export METRIC_NAME="http.4xx_count" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=Http4xx" +} + +@test "Should map system.health_check_status to HealthCheckStatus" { + export METRIC_NAME="system.health_check_status" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=HealthCheckStatus" +} + +# ============================================================================= +# Test: Azure Monitor REST API call parameters +# ============================================================================= +@test "Should call Azure Monitor REST API with correct resource" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "microsoft.insights/metrics" +} + +@test "Should call Azure Monitor REST API with correct time range" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "timespan=2026-01-27T00:00:00Z/2026-01-27T01:00:00Z" +} + +@test "Should call Azure Monitor REST API with correct interval" { + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "interval=PT5M" +} + +# ============================================================================= +# Test: Special cases +# ============================================================================= +@test "Should compute http.rpm from Requests total divided by interval_minutes" { + export METRIC_NAME="http.rpm" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=Requests" + assert_contains "$calls" "aggregation=Total" +} + +@test "Should compute http.error_rate from Http5xx and Requests" { + export METRIC_NAME="http.error_rate" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "0" + + local calls + calls=$(cat "$CURL_CALL_LOG") + assert_contains "$calls" "metricnames=Http5xx" +} + +# ============================================================================= +# Test: Validation +# ============================================================================= +@test "Should fail for unknown metric name" { + export METRIC_NAME="unknown.metric" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" + assert_contains "$output" "Unknown metric: unknown.metric" +} + +@test "Should fail when METRIC_NAME is empty" { + export METRIC_NAME="" + + run source "$SCRIPT_PATH" + + assert_equal "$status" "1" +} diff --git a/azure-apps/metric/workflows/list.yaml b/azure-apps/metric/workflows/list.yaml new file mode 100644 index 00000000..4bf5a653 --- /dev/null +++ b/azure-apps/metric/workflows/list.yaml @@ -0,0 +1,4 @@ +steps: + - name: metrics + type: script + file: "$SERVICE_PATH/metric/list" diff --git a/azure-apps/metric/workflows/metric.yaml b/azure-apps/metric/workflows/metric.yaml new file mode 100644 index 00000000..94217cb2 --- /dev/null +++ b/azure-apps/metric/workflows/metric.yaml @@ -0,0 +1,7 @@ +steps: + - name: build context + type: script + file: "$SERVICE_PATH/metric/build_context" + - name: metric + type: script + file: "$SERVICE_PATH/metric/metric" diff --git a/azure-apps/no_op b/azure-apps/no_op new file mode 100755 index 00000000..b5d29148 --- /dev/null +++ b/azure-apps/no_op @@ -0,0 +1,4 @@ +#!/bin/bash +# No operation - placeholder for workflows that don't need to do anything +echo "No operation required" +exit 0 diff --git a/azure-apps/scope/workflows/create.yaml b/azure-apps/scope/workflows/create.yaml new file mode 100644 index 00000000..6e0c764e --- /dev/null +++ b/azure-apps/scope/workflows/create.yaml @@ -0,0 +1,4 @@ +steps: + - name: no_op + type: command + command: "$SERVICE_PATH/no_op" diff --git a/azure-apps/scope/workflows/delete.yaml b/azure-apps/scope/workflows/delete.yaml new file mode 100644 index 00000000..d72ecbbf --- /dev/null +++ b/azure-apps/scope/workflows/delete.yaml @@ -0,0 +1,2 @@ +include: + - "$SERVICE_PATH/scope/workflows/create.yaml" diff --git a/azure-apps/scope/workflows/diagnose.yaml b/azure-apps/scope/workflows/diagnose.yaml new file mode 100644 index 00000000..d72ecbbf --- /dev/null +++ b/azure-apps/scope/workflows/diagnose.yaml @@ -0,0 +1,2 @@ +include: + - "$SERVICE_PATH/scope/workflows/create.yaml" diff --git a/azure-apps/scope/workflows/update.yaml b/azure-apps/scope/workflows/update.yaml new file mode 100644 index 00000000..d72ecbbf --- /dev/null +++ b/azure-apps/scope/workflows/update.yaml @@ -0,0 +1,2 @@ +include: + - "$SERVICE_PATH/scope/workflows/create.yaml" diff --git a/azure-apps/specs/actions/create-scope.json.tpl b/azure-apps/specs/actions/create-scope.json.tpl new file mode 100644 index 00000000..46354cc9 --- /dev/null +++ b/azure-apps/specs/actions/create-scope.json.tpl @@ -0,0 +1,29 @@ +{ + "name": "create-scope", + "slug": "create-scope", + "type": "create", + "retryable": false, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id" + ], + "properties": { + "scope_id": { + "type": "string" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/actions/delete-deployment.json.tpl b/azure-apps/specs/actions/delete-deployment.json.tpl new file mode 100644 index 00000000..4dabc9a2 --- /dev/null +++ b/azure-apps/specs/actions/delete-deployment.json.tpl @@ -0,0 +1,33 @@ +{ + "name": "delete-deployment", + "slug": "delete-deployment", + "type": "custom", + "retryable": false, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id", + "deployment_id" + ], + "properties": { + "scope_id": { + "type": "string" + }, + "deployment_id": { + "type": "string" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/actions/delete-scope.json.tpl b/azure-apps/specs/actions/delete-scope.json.tpl new file mode 100644 index 00000000..9a5ea099 --- /dev/null +++ b/azure-apps/specs/actions/delete-scope.json.tpl @@ -0,0 +1,29 @@ +{ + "name": "delete-scope", + "slug": "delete-scope", + "type": "custom", + "retryable": false, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id" + ], + "properties": { + "scope_id": { + "type": "string" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/actions/diagnose-scope.json.tpl b/azure-apps/specs/actions/diagnose-scope.json.tpl new file mode 100644 index 00000000..66b2395a --- /dev/null +++ b/azure-apps/specs/actions/diagnose-scope.json.tpl @@ -0,0 +1,36 @@ +{ + "name": "Diagnose Scope", + "slug": "diagnose-scope", + "type": "diagnose", + "retryable": true, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id" + ], + "properties": { + "scope_id": { + "type": "number", + "readOnly": true + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + }, + "annotations": { + "show_on": [ + "manage", "performance" + ], + "runs_over": "scope" + } +} diff --git a/azure-apps/specs/actions/finalize-blue-green.json.tpl b/azure-apps/specs/actions/finalize-blue-green.json.tpl new file mode 100644 index 00000000..9ba7b315 --- /dev/null +++ b/azure-apps/specs/actions/finalize-blue-green.json.tpl @@ -0,0 +1,33 @@ +{ + "name": "finalize-blue-green", + "slug": "finalize-blue-green", + "type": "custom", + "retryable": false, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id", + "deployment_id" + ], + "properties": { + "scope_id": { + "type": "string" + }, + "deployment_id": { + "type": "string" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/actions/rollback-deployment.json.tpl b/azure-apps/specs/actions/rollback-deployment.json.tpl new file mode 100644 index 00000000..26d3ce04 --- /dev/null +++ b/azure-apps/specs/actions/rollback-deployment.json.tpl @@ -0,0 +1,33 @@ +{ + "name": "rollback-deployment", + "slug": "rollback-deployment", + "type": "custom", + "retryable": true, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id", + "deployment_id" + ], + "properties": { + "scope_id": { + "type": "string" + }, + "deployment_id": { + "type": "string" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/actions/start-blue-green.json.tpl b/azure-apps/specs/actions/start-blue-green.json.tpl new file mode 100644 index 00000000..657953b6 --- /dev/null +++ b/azure-apps/specs/actions/start-blue-green.json.tpl @@ -0,0 +1,33 @@ +{ + "name": "start-blue-green", + "slug": "start-blue-green", + "type": "custom", + "retryable": false, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id", + "deployment_id" + ], + "properties": { + "scope_id": { + "type": "string" + }, + "deployment_id": { + "type": "string" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/actions/start-initial.json.tpl b/azure-apps/specs/actions/start-initial.json.tpl new file mode 100644 index 00000000..b082c89a --- /dev/null +++ b/azure-apps/specs/actions/start-initial.json.tpl @@ -0,0 +1,32 @@ +{ + "name": "start-initial", + "slug": "start-initial", + "type": "custom", + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id", + "deployment_id" + ], + "properties": { + "scope_id": { + "type": "string" + }, + "deployment_id": { + "type": "string" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/actions/switch-traffic.json.tpl b/azure-apps/specs/actions/switch-traffic.json.tpl new file mode 100644 index 00000000..a46b0685 --- /dev/null +++ b/azure-apps/specs/actions/switch-traffic.json.tpl @@ -0,0 +1,37 @@ +{ + "name": "switch-traffic", + "slug": "switch-traffic", + "type": "custom", + "retryable": true, + "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "parameters": { + "schema": { + "type": "object", + "required": [ + "scope_id", + "deployment_id", + "desired_traffic" + ], + "properties": { + "scope_id": { + "type": "string" + }, + "deployment_id": { + "type": "string" + }, + "desired_traffic": { + "type": "number" + } + } + }, + "values": {} + }, + "results": { + "schema": { + "type": "object", + "required": [], + "properties": {} + }, + "values": {} + } +} diff --git a/azure-apps/specs/notification-channel.json.tpl b/azure-apps/specs/notification-channel.json.tpl new file mode 100644 index 00000000..7ea71635 --- /dev/null +++ b/azure-apps/specs/notification-channel.json.tpl @@ -0,0 +1,35 @@ +{ + "nrn": "{{ env.Getenv "NRN" }}", + "status": "active", + "description": "Channel to handle Azure App Service scopes", + "type": "agent", + "source": [ + "telemetry", + "service" + ], + "configuration": { + "api_key": "{{ env.Getenv "NP_API_KEY" }}", + "command": { + "data": { + "cmdline": "{{ env.Getenv "REPO_PATH" }}/entrypoint --service-path={{ env.Getenv "REPO_PATH" }}/{{ env.Getenv "SERVICE_PATH" }}", + "environment": { + "NP_ACTION_CONTEXT": "'${NOTIFICATION_CONTEXT}'" + } + }, + "type": "exec" + }, + "selector": { + "environment": "{{ env.Getenv "ENVIRONMENT" }}" + } + }, + "filters": { + "$or": [ + { + "service.specification.slug": "{{ env.Getenv "SERVICE_SLUG" }}" + }, + { + "arguments.scope_provider": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}" + } + ] + } +} diff --git a/azure-apps/specs/scope-type-definition.json.tpl b/azure-apps/specs/scope-type-definition.json.tpl new file mode 100644 index 00000000..551b0b81 --- /dev/null +++ b/azure-apps/specs/scope-type-definition.json.tpl @@ -0,0 +1,9 @@ +{ + "description": "Docker containers on Azure App Service", + "name": "Azure App Service", + "nrn": "{{ env.Getenv "NRN" }}", + "provider_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}", + "provider_type": "service", + "status": "active", + "type": "custom" +} diff --git a/azure-apps/specs/service-spec.json.tpl b/azure-apps/specs/service-spec.json.tpl new file mode 100644 index 00000000..fa67db7a --- /dev/null +++ b/azure-apps/specs/service-spec.json.tpl @@ -0,0 +1,332 @@ +{ + "assignable_to": "any", + "attributes": { + "schema":{ + "type":"object", + "required":[ + "memory", + "health_check", + "scaling_type", + "fixed_instances", + "websockets_enabled", + "continuous_delivery" + ], + "uiSchema":{ + "type":"VerticalLayout", + "elements":[ + { + "type":"Control", + "label":"Memory", + "scope":"#/properties/memory" + }, + { + "type":"Categorization", + "options":{ + "collapsable":{ + "label":"ADVANCED", + "collapsed":true + } + }, + "elements":[ + { + "type":"Category", + "label":"Size & Scaling", + "elements":[ + { + "type":"Control", + "scope":"#/properties/scaling_type" + }, + { + "rule":{ + "effect":"SHOW", + "condition":{ + "scope":"#/properties/scaling_type", + "schema":{ + "enum":[ + "fixed" + ] + } + } + }, + "type":"Control", + "scope":"#/properties/fixed_instances" + }, + { + "rule":{ + "effect":"SHOW", + "condition":{ + "scope":"#/properties/scaling_type", + "schema":{ + "enum":[ + "auto" + ] + } + } + }, + "type":"Group", + "label":"Autoscaling Settings", + "elements":[ + { + "type":"Control", + "scope":"#/properties/autoscaling/properties/min_instances" + }, + { + "type":"Control", + "scope":"#/properties/autoscaling/properties/max_instances" + }, + { + "type":"Control", + "scope":"#/properties/autoscaling/properties/target_cpu_utilization" + }, + { + "type":"Control", + "scope":"#/properties/autoscaling/properties/target_memory_enabled" + }, + { + "rule": { + "effect": "SHOW", + "condition": { + "scope": "#/properties/autoscaling/properties/target_memory_enabled", + "schema": { + "const": true + } + } + }, + "type": "Control", + "scope": "#/properties/autoscaling/properties/target_memory_utilization" + } + ] + } + ] + }, + { + "type":"Category", + "label":"Runtime", + "elements":[ + { + "type":"Control", + "label":"WebSockets", + "scope":"#/properties/websockets_enabled" + } + ] + }, + { + "type":"Category", + "label":"Health Check", + "elements":[ + { + "type":"Control", + "scope":"#/properties/health_check/properties/path" + }, + { + "type":"Control", + "scope":"#/properties/health_check/properties/eviction_time_in_min" + } + ] + }, + { + "type":"Category", + "label":"Continuous Deployment", + "elements":[ + { + "type":"Control", + "scope":"#/properties/continuous_delivery/properties/enabled" + }, + { + "rule":{ + "effect":"SHOW", + "condition":{ + "scope":"#/properties/continuous_delivery/properties/enabled", + "schema":{ + "const":true + } + } + }, + "type":"Control", + "scope":"#/properties/continuous_delivery/properties/branches" + } + ] + } + ] + } + ] + }, + "properties":{ + "asset_type":{ + "type":"string", + "export":false, + "default":"docker-image" + }, + "memory":{ + "type":"integer", + "oneOf":[ + { + "const":1, + "title":"1 GB" + }, + { + "const":2, + "title":"2 GB" + }, + { + "const":4, + "title":"4 GB" + }, + { + "const":8, + "title":"8 GB" + }, + { + "const":16, + "title":"16 GB" + }, + { + "const":32, + "title":"32 GB" + } + ], + "title":"Memory", + "default":1, + "description":"Memory allocation in GB for your application" + }, + "websockets_enabled":{ + "type":"boolean", + "title":"Enable WebSockets", + "default":false, + "description":"Enable WebSocket protocol support for real-time communication" + }, + "scaling_type":{ + "enum":[ + "fixed", + "auto" + ], + "type":"string", + "title":"Scaling Type", + "default":"fixed", + "description":"Choose between fixed number of instances or automatic scaling based on load" + }, + "fixed_instances":{ + "type":"integer", + "title":"Number of Instances", + "default":1, + "maximum":10, + "minimum":1, + "description":"Fixed number of instances to run" + }, + "autoscaling":{ + "type":"object", + "properties":{ + "min_instances":{ + "type":"integer", + "title":"Minimum Instances", + "default":1, + "maximum":10, + "minimum":1, + "description":"Minimum number of instances to maintain" + }, + "max_instances":{ + "type":"integer", + "title":"Maximum Instances", + "default":10, + "maximum":30, + "minimum":1, + "description":"Maximum number of instances to scale to" + }, + "target_cpu_utilization":{ + "type":"integer", + "title":"CPU Scale-Out Threshold (%)", + "default":70, + "maximum":90, + "minimum":50, + "description":"CPU percentage that triggers scale out" + }, + "target_memory_enabled": { + "type": "boolean", + "title": "Scale by Memory", + "default": false + }, + "target_memory_utilization": { + "type": "integer", + "title": "Memory Scale-Out Threshold (%)", + "default": 75, + "maximum": 90, + "minimum": 50, + "description": "Memory percentage that triggers scale out" + } + } + }, + "health_check":{ + "type":"object", + "properties":{ + "path":{ + "type":"string", + "title":"Health Check Path", + "description":"HTTP path for health check requests (e.g., /health). Leave empty to disable health checks.", + "default": "/health" + }, + "eviction_time_in_min":{ + "type":"integer", + "title":"Unhealthy Instance Eviction Time", + "default":2, + "maximum":10, + "minimum":2, + "description":"Minutes before an unhealthy instance is removed and replaced" + } + } + }, + "continuous_delivery":{ + "type":"object", + "title":"Continuous Delivery", + "required":[ + "enabled", + "branches" + ], + "properties":{ + "enabled":{ + "type":"boolean", + "title":"Enable Continuous Delivery", + "default":false, + "description":"Automatically deploy new versions from specified branches" + }, + "branches":{ + "type":"array", + "items":{ + "type":"string" + }, + "title":"Branches", + "default":[ + "main" + ], + "description":"Git branches to monitor for automatic deployment" + } + }, + "description":"Configure automatic deployment from Git branches" + }, + "custom_domains": { + "type": "object", + "required": [ + "enabled" + ], + "properties": { + "enabled": { + "type": "boolean", + "default": true + } + } + } + } + } + }, + "name": "Azure App Service", + "selectors": { + "category": "Scope", + "imported": false, + "provider": "Agent", + "sub_category": "App Service" + }, + "type": "scope", + "use_default_actions": false, + "visible_to": [ + "{{ env.Getenv "NRN" }}" + ] +} diff --git a/azure-apps/values.yaml b/azure-apps/values.yaml new file mode 100644 index 00000000..992eb435 --- /dev/null +++ b/azure-apps/values.yaml @@ -0,0 +1,58 @@ +# Azure App Service Scope Configuration +# +# Required environment variables (set in Helm values): +# AZURE_SUBSCRIPTION_ID - Azure subscription ID +# AZURE_RESOURCE_GROUP - Resource group for App Service resources +# AZURE_LOCATION - Azure region (e.g., eastus, westus2) +# TOFU_PROVIDER_STORAGE_ACCOUNT - Storage account for Terraform state +# TOFU_PROVIDER_CONTAINER - Container name for Terraform state (default: tfstate) +# +# Optional environment variables: +# DOCKER_REGISTRY_URL - Docker registry URL (default: https://index.docker.io) +# DOCKER_REGISTRY_USERNAME - Registry username (for private registries) +# DOCKER_REGISTRY_PASSWORD - Registry password (for private registries) +# DNS_ZONE_NAME - Azure DNS zone name for custom domains +# DNS_ZONE_RESOURCE_GROUP - Resource group containing DNS zone +# RESOURCE_TAGS_JSON - JSON object with resource tags + +provider_categories: + - cloud-providers + - assets-repository + +configuration: + # Terraform Configuration + TOFU_PATH: "$SERVICE_PATH/deployment/modules" + + # Domain Configuration (optional - set via environment variables) + ENABLE_CUSTOM_DOMAIN: false + ENABLE_MANAGED_CERTIFICATE: true + + # Network Configuration (optional) + ENABLE_VNET_INTEGRATION: false + # VNET_INTEGRATION_SUBNET_ID: "" + # VNET_ROUTE_ALL_ENABLED: false + + # Logging Configuration + ENABLE_LOGGING: true + APPLICATION_LOGS_LEVEL: "Information" + HTTP_LOGS_RETENTION_DAYS: 7 + + # Application Insights (optional) + ENABLE_APPLICATION_INSIGHTS: false + LOG_ANALYTICS_RETENTION_DAYS: 30 + + # Alerting (optional) + ENABLE_ALERTS: false + # ALERT_EMAIL_RECIPIENTS: [] + + # Identity Configuration + ENABLE_SYSTEM_IDENTITY: false + + # Default App Service Settings + HTTPS_ONLY: true + MINIMUM_TLS_VERSION: "1.2" + FTPS_STATE: "Disabled" + CLIENT_AFFINITY_ENABLED: false + + # Deployment Settings + DEPLOYMENT_MAX_WAIT_IN_SECONDS: 600 diff --git a/frontend/deployment/build_context b/frontend/deployment/build_context new file mode 100644 index 00000000..c8387bff --- /dev/null +++ b/frontend/deployment/build_context @@ -0,0 +1,51 @@ +#!/bin/bash + +application_slug=$(echo "$CONTEXT" | jq -r .application.slug) +scope_slug=$(echo "$CONTEXT" | jq -r .scope.slug) +namespace_slug=$(echo "$CONTEXT" | jq -r .namespace.slug) +scope_id=$(echo "$CONTEXT" | jq -r .scope.id) +application_version="$(echo "$CONTEXT" | jq -r .release.semver)" +env_vars_json=$(echo "$CONTEXT" | jq '(.parameters.results // []) | map({(.variable): .values[0].value}) | add // {}') + +RESOURCE_TAGS_JSON=$(echo "$CONTEXT" | jq \ + '{ + nullplatform: "true", + account: .account.slug, + account_id: .account.id, + namespace: .namespace.slug, + namespace_id: .namespace.id, + application: .application.slug, + application_id: .application.id, + scope: .scope.slug, + scope_id: .scope.id, + deployment_id: .deployment.id + }') + +TOFU_VARIABLES={} + +tf_state_key="frontend/$namespace_slug/$application_slug/$scope_slug-$scope_id" + +TOFU_INIT_VARIABLES="${TOFU_INIT_VARIABLES:-} -backend-config=key=$tf_state_key" + +TOFU_MODULE_DIR="$SERVICE_PATH/output/$scope_id" +if [ -n "${NP_OUTPUT_DIR:-}" ]; then + TOFU_MODULE_DIR="$NP_OUTPUT_DIR/output/$scope_id" +fi + +mkdir -p "$TOFU_MODULE_DIR" + +# ============================================================================= +# Modules to compose (comma-separated list) +# Initialized from CUSTOM_MODULES, extended by setup scripts +# Available modules: +# - state/aws : AWS S3 backend for terraform state +# - network/route_53 : AWS Route53 DNS configuration +# - hosting/amplify : AWS Amplify hosting (coming soon) +# ============================================================================= +MODULES_TO_USE="${CUSTOM_TOFU_MODULES:-}" + +export TOFU_VARIABLES +export TOFU_INIT_VARIABLES +export TOFU_MODULE_DIR +export MODULES_TO_USE +export RESOURCE_TAGS_JSON \ No newline at end of file diff --git a/frontend/deployment/compose_modules b/frontend/deployment/compose_modules new file mode 100755 index 00000000..b2361546 --- /dev/null +++ b/frontend/deployment/compose_modules @@ -0,0 +1,72 @@ +#!/bin/bash + +# Compose Terraform modules dynamically +# Usage: source compose_modules +# +# Required environment variables: +# MODULES_TO_USE - Comma-separated list of modules (e.g., "state/aws,network/route_53,hosting/amplify") +# TOFU_MODULE_DIR - Target directory where .tf files will be copied +# +# Each module can have: +# - *.tf files: Copied to TOFU_MODULE_DIR (Terraform auto-merges all .tf files) +# - setup script: Sourced to configure TOFU_VARIABLES and TOFU_INIT_VARIABLES + +script_dir="$(dirname "${BASH_SOURCE[0]}")" +modules_dir="$script_dir" + +if [ -z "${MODULES_TO_USE:-}" ]; then + echo "✗ MODULES_TO_USE is not set" + exit 1 +fi + +if [ -z "${TOFU_MODULE_DIR:-}" ]; then + echo "✗ TOFU_MODULE_DIR is not set" + exit 1 +fi + +mkdir -p "$TOFU_MODULE_DIR" + +echo "Composing modules: $MODULES_TO_USE" +echo "Target directory: $TOFU_MODULE_DIR" +echo "" + +IFS=',' read -ra modules <<< "$MODULES_TO_USE" +for module in "${modules[@]}"; do + module=$(echo "$module" | xargs) # trim whitespace + + echo $module + + ls $module + if [ ! -d "$module" ]; then + echo "✗ Module not found: $module" + exit 1 + fi + + # Copy .tf files if they exist (with module prefix to avoid conflicts) + if ls "$module"/*.tf 1> /dev/null 2>&1; then + # Extract last two path components for prefix (e.g., "/path/to/state/aws" -> "state_aws_") + parent=$(basename "$(dirname "$module")") + leaf=$(basename "$module") + prefix="${parent}_${leaf}_" + for tf_file in "$module"/*.tf; do + filename=$(basename "$tf_file") + cp "$tf_file" "$TOFU_MODULE_DIR/${prefix}${filename}" + done + echo "✓ Copied modules from: $module (prefix: $prefix)" + fi + + # Source setup script if it exists + if [ -f "$module/setup" ]; then + echo " Running setup for: $module" + source "$module/setup" + if [ $? -ne 0 ]; then + echo "✗ Setup failed for module: $module" + exit 1 + fi + echo "✓ Setup completed for: $module" + fi + + echo "" +done + +echo "✓ All modules composed successfully" \ No newline at end of file diff --git a/frontend/deployment/distribution/amplify/modules/main.tf b/frontend/deployment/distribution/amplify/modules/main.tf index 6a45c61f..0f073c5c 100644 --- a/frontend/deployment/distribution/amplify/modules/main.tf +++ b/frontend/deployment/distribution/amplify/modules/main.tf @@ -202,4 +202,4 @@ output "distribution_webhook_url" { description = "Webhook URL for manual triggers" value = aws_amplify_webhook.main.url sensitive = true -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/amplify/setup b/frontend/deployment/distribution/amplify/setup index 90a3fc2f..96cd94a6 100755 --- a/frontend/deployment/distribution/amplify/setup +++ b/frontend/deployment/distribution/amplify/setup @@ -20,4 +20,4 @@ TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ # Add module to composition list script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" module_name="${script_dir#*deployment/}" -MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" +MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" \ No newline at end of file diff --git a/frontend/deployment/distribution/blob-cdn/modules/main.tf b/frontend/deployment/distribution/blob-cdn/modules/main.tf index 775e8178..60372e47 100644 --- a/frontend/deployment/distribution/blob-cdn/modules/main.tf +++ b/frontend/deployment/distribution/blob-cdn/modules/main.tf @@ -100,4 +100,4 @@ resource "azurerm_cdn_endpoint_custom_domain" "static" { protocol_type = "ServerNameIndication" tls_version = "TLS12" } -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/blob-cdn/setup b/frontend/deployment/distribution/blob-cdn/setup index c6841497..94d8b44e 100755 --- a/frontend/deployment/distribution/blob-cdn/setup +++ b/frontend/deployment/distribution/blob-cdn/setup @@ -124,4 +124,4 @@ if [[ -n $MODULES_TO_USE ]]; then MODULES_TO_USE="$MODULES_TO_USE,$module_name" else MODULES_TO_USE="$module_name" -fi +fi \ No newline at end of file diff --git a/frontend/deployment/distribution/cloudfront/modules/data.tf b/frontend/deployment/distribution/cloudfront/modules/data.tf index 075a9425..5d664814 100644 --- a/frontend/deployment/distribution/cloudfront/modules/data.tf +++ b/frontend/deployment/distribution/cloudfront/modules/data.tf @@ -14,4 +14,4 @@ data "aws_acm_certificate" "custom_domain" { domain = local.distribution_acm_certificate_domain statuses = ["ISSUED", "PENDING_VALIDATION"] most_recent = true -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/cloudfront/modules/locals.tf b/frontend/deployment/distribution/cloudfront/modules/locals.tf index efcadf36..ba36d349 100644 --- a/frontend/deployment/distribution/cloudfront/modules/locals.tf +++ b/frontend/deployment/distribution/cloudfront/modules/locals.tf @@ -23,4 +23,4 @@ locals { distribution_target_domain = aws_cloudfront_distribution.static.domain_name distribution_target_zone_id = aws_cloudfront_distribution.static.hosted_zone_id distribution_record_type = "A" -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/cloudfront/modules/main.tf b/frontend/deployment/distribution/cloudfront/modules/main.tf index 53b6a67d..2bf8c545 100644 --- a/frontend/deployment/distribution/cloudfront/modules/main.tf +++ b/frontend/deployment/distribution/cloudfront/modules/main.tf @@ -136,4 +136,4 @@ resource "terraform_data" "cloudfront_invalidation" { } depends_on = [aws_cloudfront_distribution.static] -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/cloudfront/modules/outputs.tf b/frontend/deployment/distribution/cloudfront/modules/outputs.tf index dd5cbba9..bcce186a 100644 --- a/frontend/deployment/distribution/cloudfront/modules/outputs.tf +++ b/frontend/deployment/distribution/cloudfront/modules/outputs.tf @@ -41,4 +41,4 @@ output "distribution_record_type" { output "distribution_website_url" { description = "Website URL" value = local.network_full_domain != "" ? "https://${local.network_full_domain}" : "https://${aws_cloudfront_distribution.static.domain_name}" -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/cloudfront/modules/variables.tf b/frontend/deployment/distribution/cloudfront/modules/variables.tf index a55c81d3..7fb68347 100644 --- a/frontend/deployment/distribution/cloudfront/modules/variables.tf +++ b/frontend/deployment/distribution/cloudfront/modules/variables.tf @@ -23,4 +23,4 @@ variable "distribution_cloudfront_endpoint_url" { description = "Custom CloudFront endpoint URL for AWS CLI (used for testing with moto)" type = string default = "" -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/cloudfront/setup b/frontend/deployment/distribution/cloudfront/setup index 01605b88..f1294d67 100755 --- a/frontend/deployment/distribution/cloudfront/setup +++ b/frontend/deployment/distribution/cloudfront/setup @@ -105,4 +105,4 @@ if [[ -n $MODULES_TO_USE ]]; then MODULES_TO_USE="$MODULES_TO_USE,$module_name" else MODULES_TO_USE="$module_name" -fi +fi \ No newline at end of file diff --git a/frontend/deployment/distribution/firebase/modules/main.tf b/frontend/deployment/distribution/firebase/modules/main.tf index c99c1a0b..6db3ead2 100644 --- a/frontend/deployment/distribution/firebase/modules/main.tf +++ b/frontend/deployment/distribution/firebase/modules/main.tf @@ -84,4 +84,4 @@ output "distribution_firebaseapp_url" { output "distribution_website_url" { description = "Website URL" value = length(var.distribution_custom_domains) > 0 ? "https://${var.distribution_custom_domains[0]}" : "https://${google_firebase_distribution_site.default.site_id}.web.app" -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/firebase/setup b/frontend/deployment/distribution/firebase/setup index cfe2181a..83fb09f1 100755 --- a/frontend/deployment/distribution/firebase/setup +++ b/frontend/deployment/distribution/firebase/setup @@ -24,4 +24,4 @@ TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ # Add module to composition list script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" module_name="${script_dir#*deployment/}" -MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" +MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" \ No newline at end of file diff --git a/frontend/deployment/distribution/gcs-cdn/modules/main.tf b/frontend/deployment/distribution/gcs-cdn/modules/main.tf index 5182dd48..2525b9a0 100644 --- a/frontend/deployment/distribution/gcs-cdn/modules/main.tf +++ b/frontend/deployment/distribution/gcs-cdn/modules/main.tf @@ -190,4 +190,4 @@ output "distribution_website_url" { output "distribution_upload_command" { description = "Command to upload files" value = "gsutil -m rsync -r ./dist gs://${google_storage_bucket.static.name}" -} +} \ No newline at end of file diff --git a/frontend/deployment/distribution/gcs-cdn/setup b/frontend/deployment/distribution/gcs-cdn/setup index 5637f51d..adf29790 100755 --- a/frontend/deployment/distribution/gcs-cdn/setup +++ b/frontend/deployment/distribution/gcs-cdn/setup @@ -27,4 +27,4 @@ TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ # Add module to composition list script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" module_name="${script_dir#*deployment/}" -MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" +MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" \ No newline at end of file diff --git a/frontend/deployment/distribution/static-web-apps/modules/main.tf b/frontend/deployment/distribution/static-web-apps/modules/main.tf index a0ac20f0..7afb8b88 100644 --- a/frontend/deployment/distribution/static-web-apps/modules/main.tf +++ b/frontend/deployment/distribution/static-web-apps/modules/main.tf @@ -1,3 +1,4 @@ + # Azure Static Web Apps Hosting # Resources for Azure Static Web Apps diff --git a/frontend/deployment/distribution/static-web-apps/setup b/frontend/deployment/distribution/static-web-apps/setup index c51d9f6f..c66fd9bd 100755 --- a/frontend/deployment/distribution/static-web-apps/setup +++ b/frontend/deployment/distribution/static-web-apps/setup @@ -16,4 +16,4 @@ TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ # Add module to composition list script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" module_name="${script_dir#*deployment/}" -MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" +MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" \ No newline at end of file diff --git a/frontend/deployment/do_tofu b/frontend/deployment/do_tofu new file mode 100644 index 00000000..85b4f303 --- /dev/null +++ b/frontend/deployment/do_tofu @@ -0,0 +1,42 @@ +#!/bin/bash + +#echo $TOFU_VARIABLES | jq . + +echo "$TOFU_INIT_VARIABLES" +echo "$TOFU_VARIABLES" +echo "$MODULES_TO_USE" +#set -eou pipefail +# +#CURRENT_DIR=$(dirname "${BASH_SOURCE[0]}") +# +#cd "$CURRENT_DIR" +# +#AWS_REGION="${AWS_REGION:-us-east-1}" +#TF_STATE_BUCKET="test-static-null2" +#TF_LOCK_TABLE="service-provisioning-terraform-state-lock" +## You need to export the GITHUB_TOKEN as an env var in the agent +##GITHUB_TOKEN="" +# +#HOSTED_PUBLIC_ZONE_ID=$(echo "$CONTEXT" | jq -r '.providers["cloud-providers"].networking.hosted_public_zone_id') +# +#DOMAIN=$(aws route53 get-hosted-zone --id "$HOSTED_PUBLIC_ZONE_ID" --query 'HostedZone.Name' --output text | sed 's/\.$//') +#SUBDOMAIN="$APPLICATION_SLUG-$SCOPE_SLUG" +# +#np scope patch --id "$SCOPE_ID" --body "{\"domain\":\"$SUBDOMAIN.$DOMAIN\"}" +# +#tofu init \ +# -backend-config="bucket=${TF_STATE_BUCKET}" \ +# -backend-config="key=amplify/$APPLICATION_SLUG/$SCOPE_SLUG-$SCOPE_ID" \ +# -backend-config="region=${AWS_REGION}" \ +# -backend-config="dynamodb_table=${TF_LOCK_TABLE}" +# +#tofu $ACTION -auto-approve \ +# -var="aws_region=${AWS_REGION}" \ +# -var="github_token=${GITHUB_TOKEN}" \ +# -var="application_name=${APPLICATION_SLUG}" \ +# -var="repository_url=${REPOSITORY_URL}" \ +# -var="application_version=${APPLICATION_VERSION}" \ +# -var="env_vars_json=${ENV_VARS_JSON}" \ +# -var="resource_tags_json=${RESOURCE_TAGS_JSON}" \ +# -var="domain=${DOMAIN}" \ +# -var="subdomain=${SUBDOMAIN}" \ No newline at end of file diff --git a/frontend/deployment/network/azure_dns/modules/main.tf b/frontend/deployment/network/azure_dns/modules/main.tf index 59380599..73a78525 100644 --- a/frontend/deployment/network/azure_dns/modules/main.tf +++ b/frontend/deployment/network/azure_dns/modules/main.tf @@ -31,4 +31,4 @@ resource "azurerm_dns_a_record" "main" { resource_group_name = var.azure_provider.resource_group ttl = 300 records = [local.distribution_target_domain] -} +} \ No newline at end of file diff --git a/frontend/deployment/network/cloud_dns/setup b/frontend/deployment/network/cloud_dns/setup index 9ae7b088..272ee155 100755 --- a/frontend/deployment/network/cloud_dns/setup +++ b/frontend/deployment/network/cloud_dns/setup @@ -40,4 +40,4 @@ TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ # Add module to composition list script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" module_name="${script_dir#*deployment/}" -MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" +MODULES_TO_USE="${MODULES_TO_USE:+$MODULES_TO_USE,}$module_name" \ No newline at end of file diff --git a/frontend/deployment/network/route53/setup b/frontend/deployment/network/route53/setup index 300bad50..eefcf4de 100755 --- a/frontend/deployment/network/route53/setup +++ b/frontend/deployment/network/route53/setup @@ -163,4 +163,4 @@ if [[ -n $MODULES_TO_USE ]]; then MODULES_TO_USE="$MODULES_TO_USE,$module_name" else MODULES_TO_USE="$module_name" -fi +fi \ No newline at end of file diff --git a/frontend/deployment/network/route_53/modules/locals.tf b/frontend/deployment/network/route_53/modules/locals.tf new file mode 100644 index 00000000..d9357d81 --- /dev/null +++ b/frontend/deployment/network/route_53/modules/locals.tf @@ -0,0 +1,4 @@ +locals { + # Compute full domain from domain + subdomain + network_full_domain = var.network_subdomain != "" ? "${var.network_subdomain}.${var.network_domain}" : var.network_domain +} \ No newline at end of file diff --git a/frontend/deployment/network/route_53/modules/main.tf b/frontend/deployment/network/route_53/modules/main.tf new file mode 100644 index 00000000..7cd8f0f7 --- /dev/null +++ b/frontend/deployment/network/route_53/modules/main.tf @@ -0,0 +1,23 @@ +resource "aws_route53_record" "main_alias" { + count = local.hosting_record_type == "A" ? 1 : 0 + + zone_id = var.network_hosted_zone_id + name = local.network_full_domain + type = "A" + + alias { + name = local.hosting_target_domain + zone_id = local.hosting_target_zone_id + evaluate_target_health = false + } +} + +resource "aws_route53_record" "main_cname" { + count = local.hosting_record_type == "CNAME" ? 1 : 0 + + zone_id = var.network_hosted_zone_id + name = local.network_full_domain + type = "CNAME" + ttl = 300 + records = [local.hosting_target_domain] +} diff --git a/frontend/deployment/network/route_53/modules/outputs.tf b/frontend/deployment/network/route_53/modules/outputs.tf new file mode 100644 index 00000000..6fa188e5 --- /dev/null +++ b/frontend/deployment/network/route_53/modules/outputs.tf @@ -0,0 +1,14 @@ +output "network_full_domain" { + description = "Full domain name (subdomain.domain or just domain)" + value = local.network_full_domain +} + +output "network_fqdn" { + description = "Fully qualified domain name" + value = local.hosting_record_type == "A" ? aws_route53_record.main_alias[0].fqdn : aws_route53_record.main_cname[0].fqdn +} + +output "network_website_url" { + description = "Website URL" + value = "https://${local.network_full_domain}" +} \ No newline at end of file diff --git a/frontend/deployment/network/route_53/modules/variables.tf b/frontend/deployment/network/route_53/modules/variables.tf new file mode 100644 index 00000000..9f67cf99 --- /dev/null +++ b/frontend/deployment/network/route_53/modules/variables.tf @@ -0,0 +1,15 @@ +variable "network_hosted_zone_id" { + description = "Route53 hosted zone ID" + type = string +} + +variable "network_domain" { + description = "Root domain name (e.g., example.com)" + type = string +} + +variable "network_subdomain" { + description = "Subdomain prefix (e.g., 'app' for app.example.com, empty string for apex)" + type = string + default = "" +} \ No newline at end of file diff --git a/frontend/deployment/network/route_53/setup b/frontend/deployment/network/route_53/setup new file mode 100755 index 00000000..aaf1fa93 --- /dev/null +++ b/frontend/deployment/network/route_53/setup @@ -0,0 +1,118 @@ +#!/bin/bash + +hosted_zone_id=$(echo "$CONTEXT" | jq -r '.providers["cloud-providers"].networking.hosted_public_zone_id // empty') + +if [ -z "$hosted_zone_id" ]; then + echo "❌ hosted_public_zone_id is not set in context" + exit 1 +fi + +application_slug=$(echo "$CONTEXT" | jq -r .application.slug) +scope_slug=$(echo "$CONTEXT" | jq -r .scope.slug) + +# Fetch the domain name from Route 53 hosted zone +echo "🔍 Fetching domain from Route 53 hosted zone: $hosted_zone_id" + +aws_output=$(aws route53 get-hosted-zone --id "$hosted_zone_id" 2>&1) +aws_exit_code=$? + +if [ $aws_exit_code -ne 0 ]; then + echo "" + echo "❌ Failed to fetch Route 53 hosted zone information" + echo "" + + if echo "$aws_output" | grep -q "NoSuchHostedZone"; then + echo " 🔎 Error: Hosted zone '$hosted_zone_id' does not exist" + echo "" + echo " 💡 Possible causes:" + echo " • The hosted zone ID is incorrect or has a typo" + echo " • The hosted zone was deleted" + echo " • The hosted zone ID format is wrong (should be like 'Z1234567890ABC' or '/hostedzone/Z1234567890ABC')" + echo "" + echo " 🔧 How to fix:" + echo " 1. Verify the hosted zone exists: aws route53 list-hosted-zones" + echo " 2. Update 'hosted_public_zone_id' in your cloud provider configuration" + + elif echo "$aws_output" | grep -q "AccessDenied\|not authorized"; then + echo " 🔒 Error: Permission denied when accessing Route 53" + echo "" + echo " 💡 Possible causes:" + echo " • The AWS credentials don't have Route 53 read permissions" + echo " • The IAM role/user is missing the 'route53:GetHostedZone' permission" + echo "" + echo " 🔧 How to fix:" + echo " 1. Check your AWS credentials are configured correctly" + echo " 2. Ensure your IAM policy includes:" + echo " {" + echo " \"Effect\": \"Allow\"," + echo " \"Action\": \"route53:GetHostedZone\"," + echo " \"Resource\": \"arn:aws:route53:::hostedzone/$hosted_zone_id\"" + echo " }" + + elif echo "$aws_output" | grep -q "InvalidInput"; then + echo " ⚠️ Error: Invalid hosted zone ID format" + echo "" + echo " The hosted zone ID '$hosted_zone_id' is not valid." + echo "" + echo " 🔧 How to fix:" + echo " • Use the format 'Z1234567890ABC' or '/hostedzone/Z1234567890ABC'" + echo " • Find valid zone IDs with: aws route53 list-hosted-zones" + + elif echo "$aws_output" | grep -q "Unable to locate credentials\|ExpiredToken\|InvalidClientTokenId"; then + echo " 🔑 Error: AWS credentials issue" + echo "" + echo " 💡 Possible causes:" + echo " • AWS credentials are not configured" + echo " • AWS credentials have expired" + echo " • AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment variables are missing" + echo "" + echo " 🔧 How to fix:" + echo " 1. Run 'aws configure' to set up credentials" + echo " 2. Or set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables" + echo " 3. If using temporary credentials, refresh your session token" + + else + echo " 📋 Error details:" + echo "$aws_output" | sed 's/^/ /' + fi + + echo "" + exit 1 +fi + +network_domain=$(echo "$aws_output" | jq -r '.HostedZone.Name' | sed 's/\.$//') + +if [ -z "$network_domain" ] || [ "$network_domain" = "null" ]; then + echo "" + echo "❌ Failed to extract domain name from hosted zone response" + echo "" + echo " 🤔 The AWS API returned successfully but the domain name could not be parsed." + echo " This is unexpected - please check the hosted zone configuration." + echo "" + exit 1 +fi + +echo "✅ Domain resolved: $network_domain" + +network_subdomain="$application_slug-$scope_slug" +echo "✅ Subdomain: $network_subdomain" + +TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ + --arg hosted_zone_id "$hosted_zone_id" \ + --arg domain "$network_domain" \ + --arg subdomain "$network_subdomain" \ + '. + { + network_hosted_zone_id: $hosted_zone_id, + network_domain: $domain, + network_subdomain: $subdomain + }') + +# Add module to composition list +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +module_name="${script_dir}/modules" + +if [[ -n $MODULES_TO_USE ]]; then + MODULES_TO_USE="$MODULES_TO_USE,$module_name" +else + MODULES_TO_USE="$module_name" +fi diff --git a/frontend/deployment/scripts/setup-layer b/frontend/deployment/scripts/setup-layer deleted file mode 100755 index 85ef3f0a..00000000 --- a/frontend/deployment/scripts/setup-layer +++ /dev/null @@ -1,642 +0,0 @@ -#!/bin/bash -# ============================================================================= -# Layer Boilerplate Generator -# -# Creates the folder structure and template files for a new layer implementation. -# -# Usage: -# ./setup-layer --type network --name cloudflare -# ./setup-layer --type distribution --name netlify -# ./setup-layer --type provider --name digitalocean -# -# This will create: -# frontend/deployment/{type}/{name}/ -# ├── setup -# └── modules/ -# ├── main.tf -# ├── variables.tf -# ├── locals.tf -# ├── outputs.tf -# └── test_locals.tf -# ============================================================================= - -set -euo pipefail - -# Colors -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -CYAN='\033[0;36m' -NC='\033[0m' - -# Script location -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -DEPLOYMENT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" - -# Parse arguments -LAYER_TYPE="" -LAYER_NAME="" - -while [[ $# -gt 0 ]]; do - case $1 in - --type|-t) - LAYER_TYPE="$2" - shift 2 - ;; - --name|-n) - LAYER_NAME="$2" - shift 2 - ;; - --help|-h) - echo "Usage: $0 --type --name " - echo "" - echo "Examples:" - echo " $0 --type network --name cloudflare" - echo " $0 --type distribution --name netlify" - echo " $0 --type provider --name digitalocean" - exit 0 - ;; - *) - echo -e "${RED}Unknown option: $1${NC}" - exit 1 - ;; - esac -done - -# Validate arguments -if [[ -z "$LAYER_TYPE" ]]; then - echo -e "${RED}Error: --type is required${NC}" - echo "Valid types: provider, network, distribution" - exit 1 -fi - -if [[ -z "$LAYER_NAME" ]]; then - echo -e "${RED}Error: --name is required${NC}" - exit 1 -fi - -if [[ ! "$LAYER_TYPE" =~ ^(provider|network|distribution)$ ]]; then - echo -e "${RED}Error: Invalid layer type '$LAYER_TYPE'${NC}" - echo "Valid types: provider, network, distribution" - exit 1 -fi - -# Sanitize name (lowercase, replace spaces with underscores) -LAYER_NAME=$(echo "$LAYER_NAME" | tr '[:upper:]' '[:lower:]' | tr ' ' '_' | tr '-' '_') - -# Target directory -TARGET_DIR="$DEPLOYMENT_DIR/$LAYER_TYPE/$LAYER_NAME" - -if [[ -d "$TARGET_DIR" ]]; then - echo -e "${RED}Error: Directory already exists: $TARGET_DIR${NC}" - exit 1 -fi - -echo "" -echo -e "${CYAN}Creating $LAYER_TYPE layer: $LAYER_NAME${NC}" -echo "" - -# Create directory structure -mkdir -p "$TARGET_DIR/modules" - -# Determine variable prefix based on layer type -case $LAYER_TYPE in - provider) - VAR_PREFIX="${LAYER_NAME}_provider" - ;; - network) - VAR_PREFIX="network" - ;; - distribution) - VAR_PREFIX="distribution" - ;; -esac - -# ============================================================================= -# Generate setup script -# ============================================================================= - -cat > "$TARGET_DIR/setup" << 'SETUP_HEADER' -#!/bin/bash -# ============================================================================= -SETUP_HEADER - -# Capitalize first letter of layer type and name -LAYER_TYPE_CAP="$(echo "${LAYER_TYPE:0:1}" | tr '[:lower:]' '[:upper:]')${LAYER_TYPE:1}" -LAYER_NAME_CAP="$(echo "${LAYER_NAME:0:1}" | tr '[:lower:]' '[:upper:]')${LAYER_NAME:1}" - -cat >> "$TARGET_DIR/setup" << SETUP_META -# ${LAYER_TYPE_CAP}: ${LAYER_NAME} -# -# TODO: Add description of what this layer does. -# -# Required environment variables: -# - TODO: List required env vars -# -# Required context values: -# - TODO: List required context paths -# ============================================================================= - -set -euo pipefail - -SETUP_META - -cat >> "$TARGET_DIR/setup" << 'SETUP_BODY' -echo "🔍 Validating configuration..." -echo "" - -# ============================================================================= -# Input Validation -# ============================================================================= - -# TODO: Add validation for required environment variables -# Example: -# if [ -z "${REQUIRED_VAR:-}" ]; then -# echo " ❌ REQUIRED_VAR is missing" -# echo "" -# echo " 💡 Possible causes:" -# echo " • Variable not set in environment" -# echo "" -# echo " 🔧 How to fix:" -# echo " • export REQUIRED_VAR=value" -# exit 1 -# fi -# echo " ✅ REQUIRED_VAR=$REQUIRED_VAR" - -# TODO: Add validation for context values -# Example: -# config_value=$(echo "$CONTEXT" | jq -r '.path.to.value // empty') -# if [ -z "$config_value" ]; then -# echo " ❌ config_value not found in context" -# exit 1 -# fi -# echo " ✅ config_value=$config_value" - -# ============================================================================= -# External Data Fetching (if needed) -# ============================================================================= - -# TODO: Add API calls to fetch external data -# Example: -# echo "" -# echo " 📡 Fetching resource..." -# api_response=$(curl -s "https://api.example.com/resource") -# if [ $? -ne 0 ]; then -# echo " ❌ Failed to fetch resource" -# exit 1 -# fi - -# ============================================================================= -# Update TOFU_VARIABLES -# ============================================================================= - -# TODO: Add layer-specific variables to TOFU_VARIABLES -# Use the appropriate prefix for your layer type: -# - provider: {cloud}_provider object + provider_* vars -# - network: network_* vars -# - distribution: distribution_* vars -# -# Example: -# TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ -# --arg var1 "$value1" \ -# --arg var2 "$value2" \ -# '. + { -# layer_variable1: $var1, -# layer_variable2: $var2 -# }') - -# ============================================================================= -# Register Module -# ============================================================================= - -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -module_name="${script_dir}/modules" - -if [[ -n ${MODULES_TO_USE:-} ]]; then - MODULES_TO_USE="$MODULES_TO_USE,$module_name" -else - MODULES_TO_USE="$module_name" -fi - -echo "" -echo "✨ Configuration completed successfully" -echo "" -SETUP_BODY - -chmod +x "$TARGET_DIR/setup" -echo -e " ${GREEN}✓${NC} Created setup script" - -# ============================================================================= -# Generate main.tf -# ============================================================================= - -cat > "$TARGET_DIR/modules/main.tf" << MAIN_TF -# ============================================================================= -# ${LAYER_TYPE_CAP}: ${LAYER_NAME} -# -# TODO: Add description of resources managed by this module. -# ============================================================================= - -# TODO: Add terraform resources -# -# Example for network layer: -# resource "cloudflare_record" "main" { -# zone_id = data.cloudflare_zone.main.id -# name = var.network_subdomain -# value = local.distribution_target_domain -# type = local.distribution_record_type -# ttl = 300 -# proxied = true -# } -# -# Example for distribution layer: -# resource "netlify_site" "main" { -# name = var.distribution_app_name -# custom_domain = local.network_full_domain -# } -MAIN_TF - -echo -e " ${GREEN}✓${NC} Created modules/main.tf" - -# ============================================================================= -# Generate variables.tf -# ============================================================================= - -case $LAYER_TYPE in - provider) - cat > "$TARGET_DIR/modules/variables.tf" << VARIABLES_TF -# ============================================================================= -# Provider Layer Variables -# ============================================================================= - -variable "${LAYER_NAME}_provider" { - description = "${LAYER_NAME_CAP} provider configuration" - type = object({ - # TODO: Define provider-specific fields - # Example: - # api_token = string - # account_id = string - }) -} - -variable "provider_resource_tags_json" { - description = "Resource tags to apply to all resources" - type = map(string) - default = {} -} -VARIABLES_TF - ;; - - network) - cat > "$TARGET_DIR/modules/variables.tf" << 'VARIABLES_TF' -# ============================================================================= -# Network Layer Variables -# -# NOTE: Use consistent naming with other network implementations: -# - network_domain: Base domain (e.g., "example.com") -# - network_subdomain: Subdomain part (e.g., "app") -# - network_dns_zone_name: DNS zone identifier -# ============================================================================= - -variable "network_domain" { - description = "Base domain name (e.g., example.com)" - type = string -} - -variable "network_subdomain" { - description = "Subdomain for the application (e.g., app)" - type = string -} - -variable "network_dns_zone_name" { - description = "DNS zone name/identifier" - type = string -} - -# TODO: Add provider-specific variables with network_ prefix -# Example: -# variable "network_zone_id" { -# description = "Cloudflare zone ID" -# type = string -# } -VARIABLES_TF - ;; - - distribution) - cat > "$TARGET_DIR/modules/variables.tf" << 'VARIABLES_TF' -# ============================================================================= -# Distribution Layer Variables -# -# NOTE: Use consistent naming with other distribution implementations: -# - distribution_app_name: Application/site name -# - distribution_storage_account / distribution_bucket: Asset storage -# - distribution_container / distribution_prefix: Asset path -# ============================================================================= - -variable "distribution_app_name" { - description = "Application name for the CDN/hosting" - type = string -} - -# TODO: Add provider-specific variables with distribution_ prefix -# Example for storage-backed CDN: -# variable "distribution_bucket" { -# description = "S3/GCS bucket name for assets" -# type = string -# } -# -# variable "distribution_prefix" { -# description = "Path prefix within the bucket" -# type = string -# default = "" -# } -VARIABLES_TF - ;; -esac - -echo -e " ${GREEN}✓${NC} Created modules/variables.tf" - -# ============================================================================= -# Generate locals.tf -# ============================================================================= - -case $LAYER_TYPE in - provider) - cat > "$TARGET_DIR/modules/locals.tf" << 'LOCALS_TF' -# ============================================================================= -# Provider Layer Locals -# ============================================================================= - -locals { - # Provider layers typically don't need many locals - # Add any computed values here -} -LOCALS_TF - ;; - - network) - cat > "$TARGET_DIR/modules/locals.tf" << 'LOCALS_TF' -# ============================================================================= -# Network Layer Locals -# -# These locals are used by the distribution layer for cross-module integration. -# ============================================================================= - -locals { - # Computed full domain - REQUIRED for distribution layer - network_full_domain = "${var.network_subdomain}.${var.network_domain}" - - # Expose base domain for cross-module use - network_domain = var.network_domain -} -LOCALS_TF - ;; - - distribution) - cat > "$TARGET_DIR/modules/locals.tf" << 'LOCALS_TF' -# ============================================================================= -# Distribution Layer Locals -# -# Cross-layer integration with network layer. -# ============================================================================= - -locals { - # Check if custom domain is configured (from network layer) - distribution_has_custom_domain = local.network_full_domain != "" - - # Full domain from network layer - distribution_full_domain = local.network_full_domain - - # TODO: Set these based on your CDN/hosting provider - # These are consumed by the network layer for DNS record creation - # - # distribution_target_domain: The CDN endpoint hostname (e.g., "d123.cloudfront.net") - # distribution_record_type: DNS record type ("CNAME" or "A" for alias) - # - # Example: - # distribution_target_domain = netlify_site.main.ssl_url - # distribution_record_type = "CNAME" -} -LOCALS_TF - ;; -esac - -echo -e " ${GREEN}✓${NC} Created modules/locals.tf" - -# ============================================================================= -# Generate outputs.tf -# ============================================================================= - -case $LAYER_TYPE in - provider) - cat > "$TARGET_DIR/modules/outputs.tf" << 'OUTPUTS_TF' -# ============================================================================= -# Provider Layer Outputs -# ============================================================================= - -# Provider layers typically don't have outputs -# The provider configuration is used implicitly by other resources -OUTPUTS_TF - ;; - - network) - cat > "$TARGET_DIR/modules/outputs.tf" << 'OUTPUTS_TF' -# ============================================================================= -# Network Layer Outputs -# ============================================================================= - -output "network_full_domain" { - description = "Full domain name (subdomain.domain)" - value = local.network_full_domain -} - -output "network_website_url" { - description = "Full website URL with protocol" - value = "https://${local.network_full_domain}" -} - -# TODO: Add provider-specific outputs -# Example: -# output "network_fqdn" { -# description = "Fully qualified domain name with trailing dot" -# value = "${local.network_full_domain}." -# } -OUTPUTS_TF - ;; - - distribution) - cat > "$TARGET_DIR/modules/outputs.tf" << 'OUTPUTS_TF' -# ============================================================================= -# Distribution Layer Outputs -# ============================================================================= - -output "distribution_cdn_endpoint_hostname" { - description = "CDN endpoint hostname" - value = local.distribution_target_domain -} - -output "distribution_website_url" { - description = "Website URL (custom domain if configured, otherwise CDN URL)" - value = local.distribution_has_custom_domain ? "https://${local.distribution_full_domain}" : "https://${local.distribution_target_domain}" -} - -# TODO: Add provider-specific outputs -# Example: -# output "distribution_site_id" { -# description = "Netlify site ID" -# value = netlify_site.main.id -# } -OUTPUTS_TF - ;; -esac - -echo -e " ${GREEN}✓${NC} Created modules/outputs.tf" - -# ============================================================================= -# Generate test_locals.tf -# ============================================================================= - -case $LAYER_TYPE in - provider) - cat > "$TARGET_DIR/modules/test_locals.tf" << 'TEST_LOCALS_TF' -# ============================================================================= -# Test-Only Locals -# -# NOTE: Files matching test_*.tf are skipped by compose_modules. -# This file provides stubs for unit testing in isolation. -# ============================================================================= - -# Provider layers typically don't need test locals -TEST_LOCALS_TF - ;; - - network) - cat > "$TARGET_DIR/modules/test_locals.tf" << 'TEST_LOCALS_TF' -# ============================================================================= -# Test-Only Locals -# -# NOTE: Files matching test_*.tf are skipped by compose_modules. -# This file provides stubs for unit testing in isolation. -# ============================================================================= - -# Network layer needs distribution layer outputs for DNS records -variable "distribution_target_domain" { - description = "Test-only: CDN endpoint hostname from distribution layer" - type = string - default = "test-cdn.example.net" -} - -variable "distribution_record_type" { - description = "Test-only: DNS record type from distribution layer" - type = string - default = "CNAME" -} - -locals { - distribution_target_domain = var.distribution_target_domain - distribution_record_type = var.distribution_record_type -} -TEST_LOCALS_TF - ;; - - distribution) - cat > "$TARGET_DIR/modules/test_locals.tf" << 'TEST_LOCALS_TF' -# ============================================================================= -# Test-Only Locals -# -# NOTE: Files matching test_*.tf are skipped by compose_modules. -# This file provides stubs for unit testing in isolation. -# ============================================================================= - -# Distribution layer needs network layer outputs for custom domain -variable "network_full_domain" { - description = "Test-only: Full domain from network layer" - type = string - default = "" -} - -variable "network_domain" { - description = "Test-only: Base domain from network layer" - type = string - default = "example.com" -} - -locals { - network_full_domain = var.network_full_domain - network_domain = var.network_domain -} -TEST_LOCALS_TF - ;; -esac - -echo -e " ${GREEN}✓${NC} Created modules/test_locals.tf" - -# ============================================================================= -# Create test directory structure -# ============================================================================= - -TEST_DIR="$DEPLOYMENT_DIR/tests/$LAYER_TYPE/$LAYER_NAME" -mkdir -p "$TEST_DIR" - -cat > "$TEST_DIR/${LAYER_NAME}.tftest.hcl" << TEST_HCL -# ============================================================================= -# Unit Tests: ${LAYER_TYPE}/${LAYER_NAME} -# ============================================================================= - -mock_provider "${LAYER_NAME}" {} - -# ============================================================================= -# Test Variables -# ============================================================================= - -variables { - # TODO: Add test variable values -} - -# ============================================================================= -# Tests -# ============================================================================= - -run "test_basic_configuration" { - command = plan - - # TODO: Add assertions - # assert { - # condition = resource.type.name != null - # error_message = "Resource should be created" - # } -} -TEST_HCL - -echo -e " ${GREEN}✓${NC} Created tests/$LAYER_TYPE/$LAYER_NAME/${LAYER_NAME}.tftest.hcl" - -# ============================================================================= -# Summary -# ============================================================================= - -echo "" -echo -e "${GREEN}Layer created successfully!${NC}" -echo "" -echo "Created structure:" -echo " $TARGET_DIR/" -echo " ├── setup" -echo " └── modules/" -echo " ├── main.tf" -echo " ├── variables.tf" -echo " ├── locals.tf" -echo " ├── outputs.tf" -echo " └── test_locals.tf" -echo "" -echo " $TEST_DIR/" -echo " └── ${LAYER_NAME}.tftest.hcl" -echo "" -echo -e "${YELLOW}Next steps:${NC}" -echo " 1. Edit the setup script to add validation and TOFU_VARIABLES" -echo " 2. Edit modules/main.tf to add Terraform resources" -echo " 3. Update modules/variables.tf with required inputs" -echo " 4. Update modules/locals.tf with cross-layer references" -echo " 5. Add unit tests to tests/$LAYER_TYPE/$LAYER_NAME/" -echo "" -echo -e "${CYAN}Tip:${NC} See frontend/README.md for the Claude prompt template" -echo " to help implement the setup script and Terraform files." -echo "" diff --git a/frontend/deployment/tests/integration/mocks/asset_repository/list_provider_spec.json b/frontend/deployment/tests/integration/mocks/asset_repository/list_provider_spec.json index e723a0fd..29903863 100644 --- a/frontend/deployment/tests/integration/mocks/asset_repository/list_provider_spec.json +++ b/frontend/deployment/tests/integration/mocks/asset_repository/list_provider_spec.json @@ -18,4 +18,4 @@ } ] } -} +} \ No newline at end of file diff --git a/frontend/deployment/tests/integration/test_cases/aws_cloudfront_route53/lifecycle_test.bats b/frontend/deployment/tests/integration/test_cases/aws_cloudfront_route53/lifecycle_test.bats index ce2f2ea6..9b99dfb1 100644 --- a/frontend/deployment/tests/integration/test_cases/aws_cloudfront_route53/lifecycle_test.bats +++ b/frontend/deployment/tests/integration/test_cases/aws_cloudfront_route53/lifecycle_test.bats @@ -127,4 +127,4 @@ setup() { assert_cloudfront_not_configured "$TEST_DISTRIBUTION_COMMENT" assert_route53_not_configured "$TEST_NETWORK_FULL_DOMAIN" "A" "$HOSTED_ZONE_ID" -} +} \ No newline at end of file diff --git a/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/cdn_assertions.bash b/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/cdn_assertions.bash index e7149095..de481447 100644 --- a/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/cdn_assertions.bash +++ b/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/cdn_assertions.bash @@ -136,4 +136,4 @@ assert_azure_cdn_not_configured() { fi return 0 -} +} \ No newline at end of file diff --git a/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/lifecycle_test.bats b/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/lifecycle_test.bats index 78622454..6e0eedbc 100644 --- a/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/lifecycle_test.bats +++ b/frontend/deployment/tests/integration/test_cases/azure_blobcdn_azuredns/lifecycle_test.bats @@ -152,4 +152,4 @@ setup() { "$TEST_NETWORK_DOMAIN" \ "$TEST_SUBSCRIPTION_ID" \ "$TEST_DNS_ZONE_RESOURCE_GROUP" -} +} \ No newline at end of file diff --git a/frontend/deployment/tests/provider/aws/setup_test.bats b/frontend/deployment/tests/provider/aws/setup_test.bats index 4fc50557..b8bba606 100644 --- a/frontend/deployment/tests/provider/aws/setup_test.bats +++ b/frontend/deployment/tests/provider/aws/setup_test.bats @@ -182,4 +182,4 @@ run_aws_setup() { run_aws_setup assert_equal "$MODULES_TO_USE" "existing/module,$PROJECT_DIR/provider/aws/modules" -} +} \ No newline at end of file diff --git a/frontend/deployment/tests/scripts/build_context_test.bats b/frontend/deployment/tests/scripts/build_context_test.bats index 1c49fe5a..7b624138 100644 --- a/frontend/deployment/tests/scripts/build_context_test.bats +++ b/frontend/deployment/tests/scripts/build_context_test.bats @@ -146,4 +146,4 @@ run_build_context() { }' assert_json_equal "$RESOURCE_TAGS_JSON" "$expected" "RESOURCE_TAGS_JSON" -} +} \ No newline at end of file diff --git a/frontend/deployment/tests/scripts/do_tofu_test.bats b/frontend/deployment/tests/scripts/do_tofu_test.bats index 67748dbd..0a108b8c 100644 --- a/frontend/deployment/tests/scripts/do_tofu_test.bats +++ b/frontend/deployment/tests/scripts/do_tofu_test.bats @@ -228,4 +228,4 @@ EOF run bash "$SCRIPT_PATH" assert_equal "$status" "1" -} +} \ No newline at end of file diff --git a/frontend/deployment/tofu_state/aws/modules/provider.tf b/frontend/deployment/tofu_state/aws/modules/provider.tf new file mode 100644 index 00000000..c6ef3b81 --- /dev/null +++ b/frontend/deployment/tofu_state/aws/modules/provider.tf @@ -0,0 +1,20 @@ +terraform { + required_version = ">= 1.4.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 5.0" + } + } + + backend "s3" {} +} + +provider "aws" { + region = var.aws_provider.region + + default_tags { + tags = var.provider_resource_tags_json + } +} \ No newline at end of file diff --git a/frontend/deployment/tofu_state/aws/modules/variables.tf b/frontend/deployment/tofu_state/aws/modules/variables.tf new file mode 100644 index 00000000..27d2535b --- /dev/null +++ b/frontend/deployment/tofu_state/aws/modules/variables.tf @@ -0,0 +1,14 @@ +variable "aws_provider" { + description = "AWS provider configuration" + type = object({ + region = string + state_bucket = string + lock_table = string + }) +} + +variable "provider_resource_tags_json" { + description = "Resource tags as JSON object - applied as default tags to all AWS resources" + type = map(string) + default = {} +} \ No newline at end of file diff --git a/frontend/deployment/tofu_state/aws/setup b/frontend/deployment/tofu_state/aws/setup new file mode 100755 index 00000000..c12d6280 --- /dev/null +++ b/frontend/deployment/tofu_state/aws/setup @@ -0,0 +1,35 @@ +#!/bin/bash + +if [ -z "${AWS_REGION:-}" ]; then + echo "✗ AWS_REGION is not set" + exit 1 +fi + +if [ -z "${TOFU_STATE_BUCKET:-}" ]; then + echo "✗ TOFU_STATE_BUCKET is not set" + exit 1 +fi + +if [ -z "${TOFU_LOCK_TABLE:-}" ]; then + echo "✗ TOFU_LOCK_TABLE is not set" + exit 1 +fi + +TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ + --arg aws_region "$AWS_REGION" \ + --arg tf_state_bucket "$TOFU_STATE_BUCKET" \ + --arg tf_lock_table "$TOFU_LOCK_TABLE" \ + '. + {aws_provider: {region: $aws_region, state_bucket: $tf_state_bucket, lock_table: $tf_lock_table}}') + +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=\"bucket=$TOFU_STATE_BUCKET\"" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=\"region=$AWS_REGION\"" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=\"dynamodb_table=$TOFU_LOCK_TABLE\"" + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +module_name="${script_dir}/modules" + +if [[ -n $MODULES_TO_USE ]]; then + MODULES_TO_USE="$MODULES_TO_USE,$module_name" +else + MODULES_TO_USE="$module_name" +fi \ No newline at end of file diff --git a/frontend/deployment/tofu_state/azure/modules/provider.tf b/frontend/deployment/tofu_state/azure/modules/provider.tf new file mode 100644 index 00000000..285e92ea --- /dev/null +++ b/frontend/deployment/tofu_state/azure/modules/provider.tf @@ -0,0 +1,17 @@ +terraform { + required_version = ">= 1.4.0" + + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~> 3.0" + } + } + + backend "azurerm" {} +} + +provider "azurerm" { + features {} + subscription_id = var.azure_provider.subscription_id +} diff --git a/frontend/deployment/tofu_state/azure/modules/variables.tf b/frontend/deployment/tofu_state/azure/modules/variables.tf new file mode 100644 index 00000000..843cad7a --- /dev/null +++ b/frontend/deployment/tofu_state/azure/modules/variables.tf @@ -0,0 +1,9 @@ +variable "azure_provider" { + description = "Azure provider configuration" + type = object({ + subscription_id = string + resource_group_name = string + storage_account_name = string + container_name = string + }) +} \ No newline at end of file diff --git a/frontend/deployment/tofu_state/azure/setup b/frontend/deployment/tofu_state/azure/setup new file mode 100755 index 00000000..bff5aa97 --- /dev/null +++ b/frontend/deployment/tofu_state/azure/setup @@ -0,0 +1,46 @@ +#!/bin/bash + +if [ -z "${AZURE_SUBSCRIPTION_ID:-}" ]; then + echo "✗ AZURE_SUBSCRIPTION_ID is not set" + exit 1 +fi + +if [ -z "${TOFU_STATE_RESOURCE_GROUP:-}" ]; then + echo "✗ TOFU_STATE_RESOURCE_GROUP is not set" + exit 1 +fi + +if [ -z "${TOFU_STATE_STORAGE_ACCOUNT:-}" ]; then + echo "✗ TOFU_STATE_STORAGE_ACCOUNT is not set" + exit 1 +fi + +if [ -z "${TOFU_STATE_CONTAINER:-}" ]; then + echo "✗ TOFU_STATE_CONTAINER is not set" + exit 1 +fi + +TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ + --arg subscription_id "$AZURE_SUBSCRIPTION_ID" \ + --arg resource_group "$TOFU_STATE_RESOURCE_GROUP" \ + --arg storage_account "$TOFU_STATE_STORAGE_ACCOUNT" \ + --arg container "$TOFU_STATE_CONTAINER" \ + '. + {azure_provider: { + subscription_id: $subscription_id, + resource_group_name: $resource_group, + storage_account_name: $storage_account, + container_name: $container + }}') + +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=\"resource_group_name=$TOFU_STATE_RESOURCE_GROUP\"" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=\"storage_account_name=$TOFU_STATE_STORAGE_ACCOUNT\"" +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=\"container_name=$TOFU_STATE_CONTAINER\"" + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +module_name="${script_dir}/modules" + +if [[ -n $MODULES_TO_USE ]]; then + MODULES_TO_USE="$MODULES_TO_USE,$module_name" +else + MODULES_TO_USE="$module_name" +fi diff --git a/frontend/deployment/tofu_state/gcp/modules/provider.tf b/frontend/deployment/tofu_state/gcp/modules/provider.tf new file mode 100644 index 00000000..25db4e39 --- /dev/null +++ b/frontend/deployment/tofu_state/gcp/modules/provider.tf @@ -0,0 +1,26 @@ +terraform { + required_version = ">= 1.4.0" + + required_providers { + google = { + source = "hashicorp/google" + version = "~> 5.0" + } + google-beta = { + source = "hashicorp/google-beta" + version = "~> 5.0" + } + } + + backend "gcs" {} +} + +provider "google" { + project = var.gcp_provider.project + region = var.gcp_provider.region +} + +provider "google-beta" { + project = var.gcp_provider.project + region = var.gcp_provider.region +} diff --git a/frontend/deployment/tofu_state/gcp/modules/variables.tf b/frontend/deployment/tofu_state/gcp/modules/variables.tf new file mode 100644 index 00000000..12a6b20d --- /dev/null +++ b/frontend/deployment/tofu_state/gcp/modules/variables.tf @@ -0,0 +1,8 @@ +variable "gcp_provider" { + description = "GCP provider configuration" + type = object({ + project = string + region = string + bucket = string + }) +} diff --git a/frontend/deployment/tofu_state/gcp/setup b/frontend/deployment/tofu_state/gcp/setup new file mode 100755 index 00000000..3d021672 --- /dev/null +++ b/frontend/deployment/tofu_state/gcp/setup @@ -0,0 +1,37 @@ +#!/bin/bash + +if [ -z "${GCP_PROJECT:-}" ]; then + echo "✗ GCP_PROJECT is not set" + exit 1 +fi + +if [ -z "${GCP_REGION:-}" ]; then + echo "✗ GCP_REGION is not set" + exit 1 +fi + +if [ -z "${TOFU_STATE_BUCKET:-}" ]; then + echo "✗ TOFU_STATE_BUCKET is not set" + exit 1 +fi + +TOFU_VARIABLES=$(echo "$TOFU_VARIABLES" | jq \ + --arg project "$GCP_PROJECT" \ + --arg region "$GCP_REGION" \ + --arg bucket "$TOFU_STATE_BUCKET" \ + '. + {gcp_provider: { + project: $project, + region: $region, + bucket: $bucket + }}') + +TOFU_INIT_VARIABLES="$TOFU_INIT_VARIABLES -backend-config=\"bucket=$TOFU_STATE_BUCKET\"" + +script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +module_name="${script_dir}/modules" + +if [[ -n $MODULES_TO_USE ]]; then + MODULES_TO_USE="$module_name" +else + MODULES_TO_USE="$MODULES_TO_USE,$module_name" +fi \ No newline at end of file diff --git a/k8s/instance/build_context b/k8s/instance/build_context index ce0b3c89..5c6c89c9 100644 --- a/k8s/instance/build_context +++ b/k8s/instance/build_context @@ -1,4 +1,4 @@ -#!/bin/bash +mm#!/bin/bash ARGUMENTS=$(echo "$CONTEXT" | jq -r '.arguments // empty') diff --git a/testing/bin/az b/testing/bin/az deleted file mode 100755 index 67e18dcc..00000000 --- a/testing/bin/az +++ /dev/null @@ -1,265 +0,0 @@ -#!/bin/bash -# ============================================================================= -# Mock Azure CLI for Integration Testing -# -# This script emulates the Azure CLI by querying the Azure Mock server. -# Only implements the commands needed for integration testing. -# -# Supported commands: -# az network dns zone show --name --resource-group -# az network dns record-set cname show --name --zone-name --resource-group -# az cdn profile show --name --resource-group -# az cdn endpoint show --name --profile-name --resource-group -# az storage account show --name --resource-group -# ============================================================================= - -AZURE_MOCK_ENDPOINT="${AZURE_MOCK_ENDPOINT:-http://localhost:8090}" -ARM_SUBSCRIPTION_ID="${ARM_SUBSCRIPTION_ID:-mock-subscription-id}" - -# Parse command structure -cmd="$1" -subcmd="$2" -action="$3" - -case "$cmd" in - network) - case "$subcmd" in - dns) - case "$action" in - zone) - shift 3 - subaction="$1" - case "$subaction" in - show) - shift - # Parse arguments - while [[ $# -gt 0 ]]; do - case $1 in - --name|-n) zone_name="$2"; shift 2 ;; - --resource-group|-g) resource_group="$2"; shift 2 ;; - *) shift ;; - esac - done - - if [[ -z "$zone_name" || -z "$resource_group" ]]; then - echo "ERROR: --name and --resource-group are required" >&2 - exit 1 - fi - - # Query Azure Mock - response=$(curl -s "${AZURE_MOCK_ENDPOINT}/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${resource_group}/providers/Microsoft.Network/dnszones/${zone_name}") - - # Check for error - error_code=$(echo "$response" | jq -r '.error.code // empty') - if [[ -n "$error_code" ]]; then - echo "ERROR: ResourceNotFound - The DNS zone '${zone_name}' was not found." >&2 - exit 1 - fi - - echo "$response" - ;; - *) - echo "ERROR: Unknown subaction: $subaction" >&2 - exit 1 - ;; - esac - ;; - record-set) - shift 3 - record_type="$1" - subaction="$2" - case "$record_type" in - cname) - case "$subaction" in - show) - shift 2 - # Parse arguments - while [[ $# -gt 0 ]]; do - case $1 in - --name|-n) record_name="$2"; shift 2 ;; - --zone-name|-z) zone_name="$2"; shift 2 ;; - --resource-group|-g) resource_group="$2"; shift 2 ;; - *) shift ;; - esac - done - - if [[ -z "$record_name" || -z "$zone_name" || -z "$resource_group" ]]; then - echo "ERROR: --name, --zone-name, and --resource-group are required" >&2 - exit 1 - fi - - # Query Azure Mock - response=$(curl -s "${AZURE_MOCK_ENDPOINT}/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${resource_group}/providers/Microsoft.Network/dnszones/${zone_name}/CNAME/${record_name}") - - # Check for error - error_code=$(echo "$response" | jq -r '.error.code // empty') - if [[ -n "$error_code" ]]; then - echo "ERROR: ResourceNotFound - The CNAME record '${record_name}' was not found." >&2 - exit 1 - fi - - echo "$response" - ;; - *) - echo "ERROR: Unknown subaction: $subaction" >&2 - exit 1 - ;; - esac - ;; - *) - echo "ERROR: Unknown record type: $record_type" >&2 - exit 1 - ;; - esac - ;; - *) - echo "ERROR: Unknown dns action: $action" >&2 - exit 1 - ;; - esac - ;; - *) - echo "ERROR: Unknown network subcommand: $subcmd" >&2 - exit 1 - ;; - esac - ;; - - cdn) - case "$subcmd" in - profile) - case "$action" in - show) - shift 3 - # Parse arguments - while [[ $# -gt 0 ]]; do - case $1 in - --name|-n) profile_name="$2"; shift 2 ;; - --resource-group|-g) resource_group="$2"; shift 2 ;; - *) shift ;; - esac - done - - if [[ -z "$profile_name" || -z "$resource_group" ]]; then - echo "ERROR: --name and --resource-group are required" >&2 - exit 1 - fi - - # Query Azure Mock - response=$(curl -s "${AZURE_MOCK_ENDPOINT}/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${resource_group}/providers/Microsoft.Cdn/profiles/${profile_name}") - - # Check for error - error_code=$(echo "$response" | jq -r '.error.code // empty') - if [[ -n "$error_code" ]]; then - echo "ERROR: ResourceNotFound - The CDN profile '${profile_name}' was not found." >&2 - exit 1 - fi - - echo "$response" - ;; - *) - echo "ERROR: Unknown cdn profile action: $action" >&2 - exit 1 - ;; - esac - ;; - endpoint) - case "$action" in - show) - shift 3 - # Parse arguments - while [[ $# -gt 0 ]]; do - case $1 in - --name|-n) endpoint_name="$2"; shift 2 ;; - --profile-name) profile_name="$2"; shift 2 ;; - --resource-group|-g) resource_group="$2"; shift 2 ;; - *) shift ;; - esac - done - - if [[ -z "$endpoint_name" || -z "$profile_name" || -z "$resource_group" ]]; then - echo "ERROR: --name, --profile-name, and --resource-group are required" >&2 - exit 1 - fi - - # Query Azure Mock - response=$(curl -s "${AZURE_MOCK_ENDPOINT}/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${resource_group}/providers/Microsoft.Cdn/profiles/${profile_name}/endpoints/${endpoint_name}") - - # Check for error - error_code=$(echo "$response" | jq -r '.error.code // empty') - if [[ -n "$error_code" ]]; then - echo "ERROR: ResourceNotFound - The CDN endpoint '${endpoint_name}' was not found." >&2 - exit 1 - fi - - echo "$response" - ;; - *) - echo "ERROR: Unknown cdn endpoint action: $action" >&2 - exit 1 - ;; - esac - ;; - *) - echo "ERROR: Unknown cdn subcommand: $subcmd" >&2 - exit 1 - ;; - esac - ;; - - storage) - case "$subcmd" in - account) - case "$action" in - show) - shift 3 - # Parse arguments - while [[ $# -gt 0 ]]; do - case $1 in - --name|-n) account_name="$2"; shift 2 ;; - --resource-group|-g) resource_group="$2"; shift 2 ;; - *) shift ;; - esac - done - - if [[ -z "$account_name" || -z "$resource_group" ]]; then - echo "ERROR: --name and --resource-group are required" >&2 - exit 1 - fi - - # Query Azure Mock - response=$(curl -s "${AZURE_MOCK_ENDPOINT}/subscriptions/${ARM_SUBSCRIPTION_ID}/resourceGroups/${resource_group}/providers/Microsoft.Storage/storageAccounts/${account_name}") - - # Check for error - error_code=$(echo "$response" | jq -r '.error.code // empty') - if [[ -n "$error_code" ]]; then - echo "ERROR: ResourceNotFound - The storage account '${account_name}' was not found." >&2 - exit 1 - fi - - echo "$response" - ;; - *) - echo "ERROR: Unknown storage account action: $action" >&2 - exit 1 - ;; - esac - ;; - *) - echo "ERROR: Unknown storage subcommand: $subcmd" >&2 - exit 1 - ;; - esac - ;; - - version) - echo '{"azure-cli": "2.99.0-mock", "azure-cli-core": "2.99.0-mock"}' - ;; - - *) - echo "ERROR: Unknown command: $cmd" >&2 - echo "This is a mock Azure CLI for integration testing." >&2 - echo "Supported commands: network dns zone, cdn profile/endpoint, storage account" >&2 - exit 1 - ;; -esac diff --git a/testing/docker/azure-mock/main.go b/testing/docker/azure-mock/main.go index 74439640..0ade7e2b 100644 --- a/testing/docker/azure-mock/main.go +++ b/testing/docker/azure-mock/main.go @@ -52,6 +52,7 @@ type Store struct { metricAlerts map[string]MetricAlert diagnosticSettings map[string]DiagnosticSetting trafficRouting map[string][]TrafficRoutingRule + webAppSettings map[string]map[string]string // key: lowercase resource ID → app settings key/value } // TrafficRoutingRule represents a traffic routing rule for a slot @@ -82,6 +83,7 @@ func NewStore() *Store { metricAlerts: make(map[string]MetricAlert), diagnosticSettings: make(map[string]DiagnosticSetting), trafficRouting: make(map[string][]TrafficRoutingRule), + webAppSettings: make(map[string]map[string]string), } } @@ -532,6 +534,12 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } + // List subscriptions endpoint + if matchListSubscriptions(path) { + s.handleListSubscriptions(w, r) + return + } + // Subscription endpoint if matchSubscription(path) { s.handleSubscription(w, r) @@ -640,6 +648,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { // ============================================================================= var ( + listSubscriptionsRegex = regexp.MustCompile(`^/subscriptions$`) subscriptionRegex = regexp.MustCompile(`^/subscriptions/[^/]+$`) listProvidersRegex = regexp.MustCompile(`^/subscriptions/[^/]+/providers$`) providerRegistrationRegex = regexp.MustCompile(`/subscriptions/[^/]+/providers/Microsoft\.[^/]+$`) @@ -684,6 +693,7 @@ var ( diagnosticSettingRegex = regexp.MustCompile(`(?i)/providers/Microsoft\.Insights/diagnosticSettings/[^/]+$`) ) +func matchListSubscriptions(path string) bool { return listSubscriptionsRegex.MatchString(path) } func matchSubscription(path string) bool { return subscriptionRegex.MatchString(path) } func matchListProviders(path string) bool { return listProvidersRegex.MatchString(path) } func matchProviderRegistration(path string) bool { return providerRegistrationRegex.MatchString(path) } @@ -2052,12 +2062,30 @@ func (s *Server) handleWebAppAppSettings(w http.ResponseWriter, r *http.Request) return } - // Return empty app settings + // Build app resource ID from path to look up stored settings + path := r.URL.Path + parts := strings.Split(path, "/") + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + appResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s", + subscriptionID, resourceGroup, appName) + storeKey := strings.ToLower(appResourceID) + + s.store.mu.RLock() + settings := s.store.webAppSettings[storeKey] + s.store.mu.RUnlock() + + properties := map[string]string{} + if settings != nil { + properties = settings + } + response := map[string]interface{}{ - "id": r.URL.Path, + "id": path, "name": "appsettings", "type": "Microsoft.Web/sites/config", - "properties": map[string]string{}, + "properties": properties, } w.WriteHeader(http.StatusOK) @@ -2270,16 +2298,35 @@ func (s *Server) handleWebAppConfigFallback(w http.ResponseWriter, r *http.Reque // Return an empty properties response which should work for most cases path := r.URL.Path - // Extract config name from path + // Extract config name and build app resource ID from path parts := strings.Split(path, "/") configName := "unknown" for i, p := range parts { - if p == "config" && i+1 < len(parts) { + if strings.EqualFold(p, "config") && i+1 < len(parts) { configName = parts[i+1] break } } + // Persist app settings when the provider writes them via PUT + if strings.EqualFold(configName, "appsettings") && (r.Method == http.MethodPut || r.Method == http.MethodPatch) { + subscriptionID := parts[2] + resourceGroup := parts[4] + appName := parts[8] + appResourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Web/sites/%s", + subscriptionID, resourceGroup, appName) + storeKey := strings.ToLower(appResourceID) + + var req struct { + Properties map[string]string `json:"properties"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err == nil && req.Properties != nil { + s.store.mu.Lock() + s.store.webAppSettings[storeKey] = req.Properties + s.store.mu.Unlock() + } + } + response := map[string]interface{}{ "id": path, "name": configName, @@ -3609,9 +3656,25 @@ func (s *Server) handleProviderRegistration(w http.ResponseWriter, r *http.Reque } // ============================================================================= -// Subscription Handler +// Subscription Handlers // ============================================================================= +func (s *Server) handleListSubscriptions(w http.ResponseWriter, r *http.Request) { + // Return a mock list of subscriptions for az account set + response := map[string]interface{}{ + "value": []map[string]interface{}{ + { + "id": "/subscriptions/mock-subscription-id", + "subscriptionId": "mock-subscription-id", + "displayName": "Mock Subscription", + "state": "Enabled", + "tenantId": "mock-tenant-id", + }, + }, + } + json.NewEncoder(w).Encode(response) +} + func (s *Server) handleSubscription(w http.ResponseWriter, r *http.Request) { path := r.URL.Path parts := strings.Split(path, "/") @@ -3637,7 +3700,7 @@ func main() { log.Println("=====================") log.Println("ARM Endpoints:") log.Println(" OAuth Token: /{tenant}/oauth2/token (POST)") - log.Println(" Subscriptions: /subscriptions/{sub}") + log.Println(" Subscriptions: /subscriptions (list), /subscriptions/{sub}") log.Println(" CDN Profiles: .../Microsoft.Cdn/profiles/{name}") log.Println(" CDN Endpoints: .../Microsoft.Cdn/profiles/{profile}/endpoints/{name}") log.Println(" DNS Zones: .../Microsoft.Network/dnszones/{name}") diff --git a/testing/run_bats_tests.sh b/testing/run_bats_tests.sh index d17384e6..36d72173 100755 --- a/testing/run_bats_tests.sh +++ b/testing/run_bats_tests.sh @@ -191,4 +191,4 @@ if [ ${#FAILED_TESTS[@]} -gt 0 ]; then exit 1 fi -echo -e "${GREEN}All BATS tests passed!${NC}" +echo -e "${GREEN}All BATS tests passed!${NC}" \ No newline at end of file