diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 00000000..e228c8e1
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,354 @@
+name: Release Management
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened, labeled, unlabeled, closed]
+ branches:
+ - main
+ workflow_dispatch:
+ inputs:
+ branch:
+ description: 'Branch to create release from'
+ required: true
+ type: string
+ default: 'main'
+ version:
+ description: 'Version number (e.g., 1.9.1)'
+ required: true
+ type: string
+
+permissions:
+ contents: write
+ pull-requests: write
+ checks: write
+
+jobs:
+ # Job 1: Check that PR has version label (runs on open/sync/label changes)
+ check-version-label:
+ if: |
+ github.event.pull_request.state == 'open' &&
+ github.head_ref == 'beta'
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check for version label
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const labels = context.payload.pull_request.labels.map(l => l.name);
+
+ // Find label that matches version pattern (v1.9.1 or 1.9.1)
+ const versionLabel = labels.find(l => /^v?\d+\.\d+\.\d+$/.test(l));
+
+ if (versionLabel) {
+ console.log(`Found version label: ${versionLabel}`);
+
+ // Post success comment
+ const comments = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const botComment = comments.data.find(comment =>
+ comment.user.type === 'Bot' &&
+ comment.body.includes('Version Label Check')
+ );
+
+ const body = `## ✅ Version Label Check\n\n**Version**: \`${versionLabel}\`\n\nThis PR is ready to merge. The CHANGELOG will be automatically updated with this version.`;
+
+ if (botComment) {
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: botComment.id,
+ body: body
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: body
+ });
+ }
+ } else {
+ console.log('No version label found');
+
+ // Post error comment
+ const body = `## ❌ Version Label Check\n\n**Missing version label**\n\nPlease add a label with the version number (e.g., \`v1.9.1\` or \`1.9.1\`) before merging.\n\nThe version label will be used to automatically update the CHANGELOG.`;
+
+ const comments = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const botComment = comments.data.find(comment =>
+ comment.user.type === 'Bot' &&
+ comment.body.includes('Version Label Check')
+ );
+
+ if (botComment) {
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: botComment.id,
+ body: body
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: body
+ });
+ }
+
+ core.setFailed('Missing version label');
+ }
+
+ # Job 2: Create release on merge (runs when PR is merged)
+ create-release:
+ if: |
+ github.event.pull_request.merged == true &&
+ github.event.pull_request.head.ref == 'beta'
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ ref: main
+ fetch-depth: 0
+ token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Extract version from PR labels
+ id: version
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const labels = context.payload.pull_request.labels.map(l => l.name);
+
+ // Find label that matches version pattern
+ const versionLabel = labels.find(l => /^v?\d+\.\d+\.\d+$/.test(l));
+
+ if (!versionLabel) {
+ core.setFailed('No version label found on merged PR');
+ return;
+ }
+
+ // Remove 'v' prefix if present
+ const version = versionLabel.replace(/^v/, '');
+ console.log(`Using version: ${version}`);
+ core.setOutput('version', version);
+
+ - name: Get current date
+ id: date
+ run: echo "today=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
+
+ - name: Update CHANGELOG
+ run: |
+ VERSION="${{ steps.version.outputs.version }}"
+ DATE="${{ steps.date.outputs.today }}"
+
+ # Check if unreleased section exists
+ if ! grep -q "## \[Unreleased\]" CHANGELOG.md; then
+ echo "Error: No [Unreleased] section found in CHANGELOG.md"
+ exit 1
+ fi
+
+ # Replace [Unreleased] with the version and date
+ sed -i.bak "s/## \[Unreleased\]/## [$VERSION] - $DATE/" CHANGELOG.md
+ rm CHANGELOG.md.bak
+
+ # Add new empty Unreleased section at the top
+ awk '/^## \[/ && !found {
+ print "## [Unreleased]\n"
+ found=1
+ }
+ {print}' CHANGELOG.md > CHANGELOG.tmp && mv CHANGELOG.tmp CHANGELOG.md
+
+ - name: Configure Git
+ run: |
+ git config user.name "github-actions[bot]"
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+
+ - name: Commit and push CHANGELOG changes
+ run: |
+ git add CHANGELOG.md
+
+ # Extract the release notes
+ release_notes=$(sed -n "/## \[${{ steps.version.outputs.version }}\]/,/## \[/p" CHANGELOG.md | sed '$d' | tail -n +2)
+
+ # Create commit message
+ cat > commit_msg.txt << EOF
+ chore: release v${{ steps.version.outputs.version }}
+
+ ${release_notes}
+ EOF
+
+ git commit -F commit_msg.txt
+ git push origin main
+
+ - name: Fetch latest changes
+ run: git pull origin main
+
+ - name: Check if version already exists
+ run: |
+ if git rev-parse "${{ steps.version.outputs.version }}" >/dev/null 2>&1; then
+ echo "Error: Tag ${{ steps.version.outputs.version }} already exists"
+ echo "This version has already been released"
+ exit 1
+ fi
+ if git ls-remote --heads origin "${{ steps.version.outputs.version }}" | grep -q "${{ steps.version.outputs.version }}"; then
+ echo "Error: Branch ${{ steps.version.outputs.version }} already exists"
+ exit 1
+ fi
+
+ - name: Create and push version tag
+ run: |
+ git tag ${{ steps.version.outputs.version }}
+ git push origin ${{ steps.version.outputs.version }}
+
+ - name: Create and push version branch
+ run: |
+ git checkout -b ${{ steps.version.outputs.version }}
+ git push origin ${{ steps.version.outputs.version }}
+
+ - name: Update latest tag
+ run: |
+ git checkout main
+ git tag -d latest 2>/dev/null || true
+ git push origin :refs/tags/latest 2>/dev/null || true
+ git tag latest
+ git push origin latest --force
+
+ - name: Summary
+ run: |
+ echo "### Release Created Successfully! :rocket:" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "- **Version**: ${{ steps.version.outputs.version }}" >> $GITHUB_STEP_SUMMARY
+ echo "- **Date**: ${{ steps.date.outputs.today }}" >> $GITHUB_STEP_SUMMARY
+ echo "- **PR**: #${{ github.event.pull_request.number }}" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Created:**" >> $GITHUB_STEP_SUMMARY
+ echo "- Tag: \`${{ steps.version.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Branch: \`${{ steps.version.outputs.version }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Updated tag: \`latest\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Updated CHANGELOG.md" >> $GITHUB_STEP_SUMMARY
+
+ # Job 3: Manual release creation (runs on workflow_dispatch)
+ create-release-manual:
+ if: github.event_name == 'workflow_dispatch'
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.branch }}
+ fetch-depth: 0
+ token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Validate version format
+ run: |
+ if ! [[ "${{ inputs.version }}" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
+ echo "Error: Version must be in format X.Y.Z (e.g., 1.9.1)"
+ exit 1
+ fi
+
+ - name: Check if version already exists
+ run: |
+ if git rev-parse "${{ inputs.version }}" >/dev/null 2>&1; then
+ echo "Error: Tag ${{ inputs.version }} already exists"
+ echo "This version has already been released"
+ exit 1
+ fi
+ if git ls-remote --heads origin "${{ inputs.version }}" | grep -q "${{ inputs.version }}"; then
+ echo "Error: Branch ${{ inputs.version }} already exists"
+ exit 1
+ fi
+
+ - name: Get current date
+ id: date
+ run: echo "today=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
+
+ - name: Update CHANGELOG
+ run: |
+ VERSION="${{ inputs.version }}"
+ DATE="${{ steps.date.outputs.today }}"
+
+ # Check if unreleased section exists
+ if ! grep -q "## \[Unreleased\]" CHANGELOG.md; then
+ echo "Error: No [Unreleased] section found in CHANGELOG.md"
+ exit 1
+ fi
+
+ # Replace [Unreleased] with the version and date
+ sed -i.bak "s/## \[Unreleased\]/## [$VERSION] - $DATE/" CHANGELOG.md
+ rm CHANGELOG.md.bak
+
+ # Add new empty Unreleased section at the top
+ awk '/^## \[/ && !found {
+ print "## [Unreleased]\n"
+ found=1
+ }
+ {print}' CHANGELOG.md > CHANGELOG.tmp && mv CHANGELOG.tmp CHANGELOG.md
+
+ - name: Configure Git
+ run: |
+ git config user.name "github-actions[bot]"
+ git config user.email "github-actions[bot]@users.noreply.github.com"
+
+ - name: Commit and push CHANGELOG changes
+ run: |
+ git add CHANGELOG.md
+
+ # Extract the release notes
+ release_notes=$(sed -n "/## \[${{ inputs.version }}\]/,/## \[/p" CHANGELOG.md | sed '$d' | tail -n +2)
+
+ # Create commit message
+ cat > commit_msg.txt << EOF
+ chore: release v${{ inputs.version }}
+
+ ${release_notes}
+ EOF
+
+ git commit -F commit_msg.txt
+ git push origin ${{ inputs.branch }}
+
+ - name: Fetch latest changes
+ run: git pull origin ${{ inputs.branch }}
+
+ - name: Create and push version tag
+ run: |
+ git tag ${{ inputs.version }}
+ git push origin ${{ inputs.version }}
+
+ - name: Create and push version branch
+ run: |
+ git checkout -b ${{ inputs.version }}
+ git push origin ${{ inputs.version }}
+
+ - name: Update latest tag
+ run: |
+ git checkout ${{ inputs.branch }}
+ git tag -d latest 2>/dev/null || true
+ git push origin :refs/tags/latest 2>/dev/null || true
+ git tag latest
+ git push origin latest --force
+
+ - name: Summary
+ run: |
+ echo "### Release Created Successfully! :rocket:" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "- **Version**: ${{ inputs.version }}" >> $GITHUB_STEP_SUMMARY
+ echo "- **Date**: ${{ steps.date.outputs.today }}" >> $GITHUB_STEP_SUMMARY
+ echo "- **Source Branch**: ${{ inputs.branch }}" >> $GITHUB_STEP_SUMMARY
+ echo "" >> $GITHUB_STEP_SUMMARY
+ echo "**Created:**" >> $GITHUB_STEP_SUMMARY
+ echo "- Tag: \`${{ inputs.version }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Branch: \`${{ inputs.version }}\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Updated tag: \`latest\`" >> $GITHUB_STEP_SUMMARY
+ echo "- Updated CHANGELOG.md" >> $GITHUB_STEP_SUMMARY
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 75236bb6..14798df6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [Unreleased]
+- Add support to configure the traffic manager nginx through a configmap.
+- Add **k8s/diagnose** documentation && new checks
+- Fix **k8s/diagnose** checks, adding logs && improvements
+- Add support for `NAMESPACE_OVERRIDE` configuration in k8s scope and deployment actions.
+- Change delete cluster objects to maintain only one deployment_id per scope
+- Do not execute actions that are not valid for current deployment status
+- Upgrade libs versions in k8s/log/kube-logger-go
+
## [1.9.0] - 2025-12-17
- Add namespace validation and auto-creation
- Add deployment hints for failed deployments
diff --git a/azure/specs/actions/diagnose-deployment.json.tpl b/azure/specs/actions/diagnose-deployment.json.tpl
new file mode 100644
index 00000000..91a02434
--- /dev/null
+++ b/azure/specs/actions/diagnose-deployment.json.tpl
@@ -0,0 +1,43 @@
+{
+ "name": "Diagnose Deployment",
+ "slug": "diagnose-deployment",
+ "type": "diagnose",
+ "retryable": true,
+ "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}",
+ "parameters": {
+ "schema": {
+ "type": "object",
+ "required": [
+ "scope_id",
+ "deployment_id"
+ ],
+ "properties": {
+ "scope_id": {
+ "type": "number",
+ "readOnly": true,
+ "visibleOn": ["read"]
+ },
+ "deployment_id": {
+ "type": "number",
+ "readOnly": true,
+ "visibleOn": ["read"]
+ }
+ }
+ },
+ "values": {}
+ },
+ "annotations": {
+ "show_on": [
+ "deployment"
+ ],
+ "runs_over": "deployment"
+ },
+ "results": {
+ "schema": {
+ "type": "object",
+ "required": [],
+ "properties": {}
+ },
+ "values": {}
+ }
+}
\ No newline at end of file
diff --git a/azure/specs/actions/diagnose-scope.json.tpl b/azure/specs/actions/diagnose-scope.json.tpl
new file mode 100644
index 00000000..ec2d2586
--- /dev/null
+++ b/azure/specs/actions/diagnose-scope.json.tpl
@@ -0,0 +1,37 @@
+{
+ "name": "Diagnose Scope",
+ "slug": "diagnose-scope",
+ "type": "diagnose",
+ "retryable": true,
+ "service_specification_id": "{{ env.Getenv "SERVICE_SPECIFICATION_ID" }}",
+ "parameters": {
+ "schema": {
+ "type": "object",
+ "required": [
+ "scope_id"
+ ],
+ "properties": {
+ "scope_id": {
+ "type": "number",
+ "readOnly": true,
+ "visibleOn": ["read"]
+ }
+ }
+ },
+ "values": {}
+ },
+ "results": {
+ "schema": {
+ "type": "object",
+ "required": [],
+ "properties": {}
+ },
+ "values": {}
+ },
+ "annotations": {
+ "show_on": [
+ "manage", "performance"
+ ],
+ "runs_over": "scope"
+ }
+}
\ No newline at end of file
diff --git a/k8s/deployment/build_context b/k8s/deployment/build_context
index d37e6c71..b05c657a 100755
--- a/k8s/deployment/build_context
+++ b/k8s/deployment/build_context
@@ -22,6 +22,59 @@ MIN_REPLICAS=$(echo "$MIN_REPLICAS" | awk '{printf "%d", ($1 == int($1) ? $1 : i
DEPLOYMENT_STATUS=$(echo $CONTEXT | jq -r ".deployment.status")
+validate_status() {
+ local action="$1"
+ local status="$2"
+ local expected_status=""
+
+ case "$action" in
+ start-initial|start-blue-green)
+ expected_status="creating, waiting_for_instances or running"
+ ;;
+ switch-traffic)
+ expected_status="running or waiting_for_instances"
+ ;;
+ rollback-deployment)
+ expected_status="rolling_back or cancelling"
+ ;;
+ finalize-blue-green)
+ expected_status="finalizing"
+ ;;
+ delete-deployment)
+ expected_status="deleting, rolling_back or cancelling"
+ ;;
+ *)
+ echo "🔄 Running action '$action', any deployment status is accepted"
+ return 0
+ ;;
+ esac
+
+ echo "🔄 Running action '$action' (current status: '$status', expected: $expected_status)"
+
+ case "$action" in
+ start-initial|start-blue-green)
+ [[ "$status" == "creating" || "$status" == "waiting_for_instances" || "$status" == "running" ]]
+ ;;
+ switch-traffic)
+ [[ "$status" == "running" || "$status" == "waiting_for_instances" ]]
+ ;;
+ rollback-deployment)
+ [[ "$status" == "rolling_back" || "$status" == "cancelling" ]]
+ ;;
+ finalize-blue-green)
+ [[ "$status" == "finalizing" || "$status" == "cancelling" ]]
+ ;;
+ delete-deployment)
+ [[ "$status" == "deleting" || "$status" == "cancelling" || "$status" == "rolling_back" ]]
+ ;;
+ esac
+}
+
+if ! validate_status "$SERVICE_ACTION" "$DEPLOYMENT_STATUS"; then
+ echo "❌ Invalid deployment status '$DEPLOYMENT_STATUS' for action '$SERVICE_ACTION'" >&2
+ exit 1
+fi
+
if [ "$DEPLOY_STRATEGY" = "rolling" ] && [ "$DEPLOYMENT_STATUS" = "running" ]; then
GREEN_REPLICAS=$(echo "scale=10; ($GREEN_REPLICAS * $SWITCH_TRAFFIC) / 100" | bc)
GREEN_REPLICAS=$(echo "$GREEN_REPLICAS" | awk '{printf "%d", ($1 == int($1) ? $1 : int($1)+1)}')
@@ -64,6 +117,36 @@ if [[ "$IAM_ENABLED" == "true" ]]; then
SERVICE_ACCOUNT_NAME=$(echo "$IAM" | jq -r .PREFIX)-"$SCOPE_ID"
fi
+TRAFFIC_MANAGER_CONFIG_MAP=${TRAFFIC_MANAGER_CONFIG_MAP:-""}
+
+if [[ -n "$TRAFFIC_MANAGER_CONFIG_MAP" ]]; then
+ echo "🔍 Validating ConfigMap '$TRAFFIC_MANAGER_CONFIG_MAP' in namespace '$K8S_NAMESPACE'"
+
+ # Check if the ConfigMap exists
+ if ! kubectl get configmap "$TRAFFIC_MANAGER_CONFIG_MAP" -n "$K8S_NAMESPACE" &>/dev/null; then
+ echo "❌ ConfigMap '$TRAFFIC_MANAGER_CONFIG_MAP' does not exist in namespace '$K8S_NAMESPACE'"
+ exit 1
+ fi
+ echo "✅ ConfigMap '$TRAFFIC_MANAGER_CONFIG_MAP' exists"
+
+ # Check for required keys (subPaths)
+ REQUIRED_KEYS=("nginx.conf" "default.conf")
+
+ # Get all keys from the ConfigMap data
+ CONFIGMAP_KEYS=$(kubectl get configmap "$TRAFFIC_MANAGER_CONFIG_MAP" -n "$K8S_NAMESPACE" -o go-template='{{range $k, $v := .data}}{{$k}}{{"\n"}}{{end}}')
+
+ for key in "${REQUIRED_KEYS[@]}"; do
+ if ! echo "$CONFIGMAP_KEYS" | grep -qx "$key"; then
+ echo "❌ ConfigMap '$TRAFFIC_MANAGER_CONFIG_MAP' is missing required key '$key'"
+ echo "💡 The ConfigMap must contain data entries for: ${REQUIRED_KEYS[*]}"
+ exit 1
+ fi
+ echo "✅ Found required key '$key' in ConfigMap"
+ done
+
+ echo "🎉 ConfigMap '$TRAFFIC_MANAGER_CONFIG_MAP' validation successful"
+fi
+
CONTEXT=$(echo "$CONTEXT" | jq \
--arg blue_deployment_id "$BLUE_DEPLOYMENT_ID" \
--arg blue_replicas "$BLUE_REPLICAS" \
@@ -74,6 +157,7 @@ CONTEXT=$(echo "$CONTEXT" | jq \
--arg pdb_enabled "$PDB_ENABLED" \
--arg pdb_max_unavailable "$PDB_MAX_UNAVAILABLE" \
--arg service_account_name "$SERVICE_ACCOUNT_NAME" \
+ --arg traffic_manager_config_map "$TRAFFIC_MANAGER_CONFIG_MAP" \
'. + {blue_deployment_id: $blue_deployment_id,
blue_replicas: $blue_replicas,
green_replicas: $green_replicas,
@@ -82,7 +166,8 @@ CONTEXT=$(echo "$CONTEXT" | jq \
traffic_image: $traffic_image,
pdb_enabled: $pdb_enabled,
pdb_max_unavailable: $pdb_max_unavailable,
- service_account_name: $service_account_name
+ service_account_name: $service_account_name,
+ traffic_manager_config_map: $traffic_manager_config_map
}')
DEPLOYMENT_ID=$(echo "$CONTEXT" | jq -r '.deployment.id')
diff --git a/k8s/deployment/delete_cluster_objects b/k8s/deployment/delete_cluster_objects
old mode 100644
new mode 100755
index 288c639e..5e069bca
--- a/k8s/deployment/delete_cluster_objects
+++ b/k8s/deployment/delete_cluster_objects
@@ -1,10 +1,43 @@
#!/bin/bash
-DEPLOYMENT_TO_CLEAN="$DEPLOYMENT_ID"
+OBJECTS_TO_DELETE="deployment,service,hpa,ingress,pdb,secret,configmap"
+
+# Function to delete all resources for a given deployment_id
+delete_deployment_resources() {
+ local DEPLOYMENT_ID_TO_DELETE="$1"
+ kubectl delete "$OBJECTS_TO_DELETE" \
+ -l deployment_id="$DEPLOYMENT_ID_TO_DELETE" -n "$K8S_NAMESPACE" --cascade=foreground --wait=true
+}
+
+CURRENT_ACTIVE=$(echo "$CONTEXT" | jq -r '.scope.current_active_deployment // empty')
if [ "$DEPLOYMENT" = "blue" ]; then
- DEPLOYMENT_TO_CLEAN=$(echo "$CONTEXT" | jq .scope.current_active_deployment -r)
+ # Deleting blue (old) deployment, keeping green (new)
+ DEPLOYMENT_TO_CLEAN="$CURRENT_ACTIVE"
+ DEPLOYMENT_TO_KEEP="$DEPLOYMENT_ID"
+elif [ "$DEPLOYMENT" = "green" ]; then
+ # Deleting green (new) deployment, keeping blue (old)
+ DEPLOYMENT_TO_CLEAN="$DEPLOYMENT_ID"
+ DEPLOYMENT_TO_KEEP="$CURRENT_ACTIVE"
+fi
+
+delete_deployment_resources "$DEPLOYMENT_TO_CLEAN"
+
+echo "Verifying cleanup for scope_id: $SCOPE_ID in namespace: $K8S_NAMESPACE"
+
+# Get all unique deployment_ids for this scope_id
+ALL_DEPLOYMENT_IDS=$(kubectl get "$OBJECTS_TO_DELETE" -n "$K8S_NAMESPACE" \
+ -l "scope_id=$SCOPE_ID" \
+ -o jsonpath='{range .items[*]}{.metadata.labels.deployment_id}{"\n"}{end}' 2>/dev/null | sort -u | grep -v '^$')
+
+# Delete all deployment_ids except DEPLOYMENT_TO_KEEP
+if [ -n "$ALL_DEPLOYMENT_IDS" ]; then
+ while IFS= read -r EXTRA_DEPLOYMENT_ID; do
+ if [ "$EXTRA_DEPLOYMENT_ID" != "$DEPLOYMENT_TO_KEEP" ]; then
+ delete_deployment_resources "$EXTRA_DEPLOYMENT_ID"
+ fi
+ done <<< "$ALL_DEPLOYMENT_IDS"
fi
-kubectl delete deployment,service,hpa,ingress,pdb,secret,configmap \
- -l deployment_id="$DEPLOYMENT_TO_CLEAN" -n "$K8S_NAMESPACE" --cascade=foreground --wait=true
+
+echo "Cleanup verification successful: Only deployment_id=$DEPLOYMENT_TO_KEEP remains for scope_id=$SCOPE_ID"
\ No newline at end of file
diff --git a/k8s/deployment/templates/deployment.yaml.tpl b/k8s/deployment/templates/deployment.yaml.tpl
index 497b5141..5b2bddfd 100644
--- a/k8s/deployment/templates/deployment.yaml.tpl
+++ b/k8s/deployment/templates/deployment.yaml.tpl
@@ -122,6 +122,15 @@ spec:
securityContext:
runAsUser: 0
image: {{ .traffic_image }}
+ {{- if .traffic_manager_config_map }}
+ volumeMounts:
+ - name: nginx-config
+ mountPath: /etc/nginx/nginx.conf
+ subPath: nginx.conf
+ - name: nginx-config
+ mountPath: /etc/nginx/conf.d/default.conf
+ subPath: default.conf
+ {{- end }}
ports:
- containerPort: 80
protocol: TCP
@@ -294,6 +303,11 @@ spec:
{{- end }}
{{- end }}
volumes:
+ {{- if .traffic_manager_config_map }}
+ - name: nginx-config
+ configMap:
+ name: {{ .traffic_manager_config_map }}
+ {{- end }}
{{- if .parameters.results }}
{{- range .parameters.results }}
{{- if and (eq .type "file") }}
diff --git a/k8s/deployment/workflows/diagnose.yaml b/k8s/deployment/workflows/diagnose.yaml
index 481281f2..66223726 100644
--- a/k8s/deployment/workflows/diagnose.yaml
+++ b/k8s/deployment/workflows/diagnose.yaml
@@ -2,14 +2,6 @@ continue_on_error: true
include:
- "$SERVICE_PATH/values.yaml"
steps:
- - name: build context
- type: script
- file: "$SERVICE_PATH/diagnose/build_context"
- output:
- - name: CONTEXT
- type: environment
- - name: LABEL_SELECTOR
- type: environment
- name: load_functions
type: script
file: "$SERVICE_PATH/diagnose/utils/diagnose_utils"
@@ -21,6 +13,14 @@ steps:
evidence: object
- name: notify_results
type: function
+ - name: build context
+ type: script
+ file: "$SERVICE_PATH/diagnose/build_context"
+ output:
+ - name: CONTEXT
+ type: environment
+ - name: LABEL_SELECTOR
+ type: environment
- name: diagnose
type: executor
before_each:
diff --git a/k8s/diagnose/DIAGNOSTICS_GUIDE.md b/k8s/diagnose/DIAGNOSTICS_GUIDE.md
new file mode 100644
index 00000000..022083e6
--- /dev/null
+++ b/k8s/diagnose/DIAGNOSTICS_GUIDE.md
@@ -0,0 +1,301 @@
+# Kubernetes diagnostics guide
+
+This guide documents all diagnostic checks available in the `k8s/diagnose` workflow, including what errors they detect,
+possible solutions, and example outputs.
+
+## How diagnostics work
+
+The diagnostic workflow follows a two-phase approach:
+
+### Phase 1: build context (`build_context`)
+
+Before running any checks, the `build_context` script collects a **snapshot of the Kubernetes cluster state**. This
+snapshot includes:
+
+- **Pods**: All pods matching the scope labels
+- **Services**: Services associated with the deployment
+- **Endpoints**: Service endpoint information
+- **Ingresses**: Ingress resources for the scope
+- **Secrets**: Secret metadata (no actual secret data)
+- **IngressClasses**: Available ingress classes in the cluster
+- **Events**: Recent Kubernetes events for troubleshooting
+- **ALB Controller Data**: AWS Load Balancer controller pods and logs (if applicable)
+
+All this data is stored in JSON files within the `data/` subdirectory of the output folder. Storing the data this way
+enables a few key benefits:
+
+- **Better performance:**: Each check reads from pre-collected files instead of making repeated API calls
+- **Consistent results**: All checks analyze the same point-in-time snapshot
+- **Lower API load**: Fewer requests to the Kubernetes API server
+- **More reliable runs**: Prevents issues like “*argument list too long*” when processing many resources
+
+### Phase 2: diagnostic checks
+
+After the context is built, individual diagnostic checks run in parallel, reading from the pre-collected data files.
+Each check:
+
+1. Validates that required resources exist (using helper functions like
+ `require_pods`, `require_services`, `require_ingresses`)
+2. Analyzes the data for specific issues
+3. Reports findings with status: `success`, `failed`, or provides warnings
+4. Generates actionable evidence and recommendations
+
+---
+
+## Diagnostic checks reference
+
+Below is the complete list of diagnostic checks executed during a run. Checks are grouped by category and run in
+parallel to identify common networking, scope, and service-level issues in the cluster.
+
+### Networking checks (`k8s/diagnose/networking/`)
+1. [ingress_existence](#1-ingress_existence) - `networking/ingress_existence`
+2. [ingress_class_validation](#2-ingress_class_validation) - `networking/ingress_class_validation`
+3. [ingress_controller_sync](#3-ingress_controller_sync) - `networking/ingress_controller_sync`
+4. [ingress_host_rules](#4-ingress_host_rules) - `networking/ingress_host_rules`
+5. [ingress_backend_service](#5-ingress_backend_service) - `networking/ingress_backend_service`
+6. [ingress_tls_configuration](#6-ingress_tls_configuration) - `networking/ingress_tls_configuration`
+7. [alb_capacity_check](#7-alb_capacity_check) - `networking/alb_capacity_check`
+
+### Scope checks (`k8s/diagnose/scope/`)
+1. [pod_existence](#1-pod_existence) - `scope/pod_existence`
+2. [container_crash_detection](#2-container_crash_detection) - `scope/container_crash_detection`
+3. [image_pull_status](#3-image_pull_status) - `scope/image_pull_status`
+4. [memory_limits_check](#4-memory_limits_check) - `scope/memory_limits_check`
+5. [resource_availability](#6-resource_availability) - `scope/resource_availability`
+6. [storage_mounting](#7-storage_mounting) - `scope/storage_mounting`
+7. [container_port_health](#8-container_port_health) - `scope/container_port_health`
+8. [health_probe_endpoints](#9-health_probe_endpoints) - `scope/health_probe_endpoints`
+
+### Service checks (`k8s/diagnose/service/`)
+1. [service_existence](#1-service_existence) - `service/service_existence`
+2. [service_selector_match](#2-service_selector_match) - `service/service_selector_match`
+3. [service_endpoints](#3-service_endpoints) - `service/service_endpoints`
+4. [service_port_configuration](#4-service_port_configuration) - `service/service_port_configuration`
+5. [service_type_validation](#5-service_type_validation) - `service/service_type_validation`
+
+
+---
+
+## Networking checks
+
+### 1. ingress_existence
+
+| **Aspect** | **Details** |
+| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **What it detects** | Missing ingress resources |
+| **Common causes** | - Ingress not created
- Ingress in wrong namespace
- Label selector mismatch
- Ingress deleted accidentally |
+| **Possible solutions** | - Create ingress resource
- Verify ingress is in correct namespace
- Check ingress labels match scope selectors
- Review ingress creation in deployment pipeline |
+| **Example output (failure)** | `✗ No ingresses found with labels scope_id=123456 in namespace production`
`ℹ Action: Create ingress resource to expose services externally` |
+| **Example output (success)** | `✓ Found 1 ingress(es): web-app-ingress`
`ℹ web-app-ingress hosts: example.com, www.example.com` |
+
+### 2. ingress_class_validation
+
+| **Aspect** | **Details** |
+| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| **What it detects** | Invalid or missing ingress class configuration |
+| **Common causes** | - IngressClass does not exist
- Using deprecated annotation instead of ingressClassName
- No default IngressClass defined
- Ingress controller not installed |
+| **Possible solutions** | - Install ingress controller (nginx, ALB, traefik, etc.)
- Create IngressClass resource
- Set default IngressClass:
```yaml
metadata:
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
```
- Update ingress to use `spec.ingressClassName` instead of annotation
- Verify IngressClass matches installed controller |
+| **Example output (failure)** | `✗ Ingress web-app-ingress: IngressClass 'nginx-internal' not found`
`⚠ Available classes: nginx, alb`
`ℹ Action: Use an available IngressClass or install the required controller` |
+| **Example output (success)** | `✓ Ingress web-app-ingress: IngressClass 'alb' is valid` |
+
+### 3. ingress_controller_sync
+
+| **Aspect** | **Details** |
+| ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **What it detects** | Ingress controller failing to reconcile/sync ingress resources |
+| **Common causes** | - Ingress controller pods not running
- Backend service errors
- Certificate validation failures
- Subnet IP exhaustion (for ALB)
- Security group misconfiguration
- Ingress syntax errors |
+| **Possible solutions** | - Check ingress controller logs
- Verify backend services exist and have endpoints
- For ALB: check AWS ALB controller logs
- Verify certificates are valid
- Check subnet capacity
- Review ingress configuration for errors
- Ensure required AWS IAM permissions |
+| **Example output (failure)** | `✗ Ingress web-app-ingress: Sync errors detected`
` Found error/warning events:`
` 2024-01-15 10:30:45 Warning SyncError Failed to reconcile`
`✗ ALB address not assigned yet (sync may be in progress or failing)`
`ℹ Action: Check ingress controller logs and verify backend services are healthy` |
+| **Example output (success)** | `✓ All 2 ingress(es) synchronized successfully with controller` |
+
+### 4. ingress_host_rules
+
+| **Aspect** | **Details** |
+| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| **What it detects** | Invalid or problematic host and path rules |
+| **Common causes** | - No rules and no default backend defined
- Invalid pathType (must be Exact, Prefix, or ImplementationSpecific)
- Path ending with `/` for Prefix type (can cause routing issues)
- Duplicate host rules
- Wildcard hosts without proper configuration |
+| **Possible solutions** | - Define at least one rule or a default backend
- Use valid pathType values
- Remove trailing slashes from Prefix paths
- Consolidate duplicate host rules
- Specify explicit hostnames instead of wildcards when possible |
+| **Example output (failure)** | `✗ Ingress web-app-ingress: No rules and no default backend configured`
`ℹ Action: Add at least one rule or configure default backend` |
+| **Example output (success)** | `✓ Host and path rules valid for all 2 ingress(es)` |
+
+### 5. ingress_backend_service
+
+| **Aspect** | **Details** |
+| ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **What it detects** | Backend services that don't exist or have no endpoints |
+| **Common causes** | - Backend service doesn't exist
- Service has no healthy endpoints
- Service port mismatch
- Service in different namespace (not supported) |
+| **Possible solutions** | - Create missing backend services
- Fix service endpoint issues (see service_endpoints check)
- Verify service port matches ingress backend port
- Ensure all backends are in same namespace as ingress
- Check service selector matches pods |
+| **Example output (failure)** | `✗ Ingress web-app-ingress: Backend api-service:8080 (no endpoints)`
`ℹ Action: Verify pods are running and service selector matches` |
+| **Example output (success)** | `✓ All backend services healthy for 2 ingress(es)` |
+
+### 6. ingress_tls_configuration
+
+| **Aspect** | **Details** |
+| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **What it detects** | TLS/SSL certificate configuration issues |
+| **Common causes** | - TLS secret does not exist
- Secret is wrong type (not kubernetes.io/tls)
- Secret missing required keys (tls.crt, tls.key)
- Certificate expired or expiring soon
- Certificate doesn't cover requested hostnames |
+| **Possible solutions** | - Create TLS secret with certificate and key
- Verify secret type and keys
- Renew expired certificates
- Ensure certificate covers all ingress hosts
- For cert-manager, check certificate resource status |
+| **Example output (failure)** | `✗ Ingress web-app-ingress: TLS Secret 'app-tls-cert' not found in namespace`
`ℹ Action: Create TLS secret or update ingress configuration` |
+| **Example output (success)** | `✓ TLS configuration valid for all 2 ingress(es)` |
+
+### 7. alb_capacity_check
+
+| **Aspect** | **Details** |
+| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| **What it detects** | AWS ALB-specific capacity and configuration issues |
+| **Common causes** | - Subnet IP exhaustion
- Invalid or missing certificate ARN
- Security group misconfigurations
- Target group registration failures
- Missing or invalid subnet annotations
- Scheme not specified (internal vs internet-facing) |
+| **Possible solutions** | - For IP exhaustion: expand subnet CIDR or use different subnets
- Verify ACM certificate ARN exists and is in correct region
- Check security groups allow ALB traffic
- Review ALB controller logs for detailed errors
- Explicitly specify subnets:
```yaml
annotations:
alb.ingress.kubernetes.io/subnets: subnet-abc123,subnet-def456
```
- Specify scheme:
```yaml
annotations:
alb.ingress.kubernetes.io/scheme: internet-facing
``` |
+| **Example output (failure)** | `✗ ALB capacity check failed`
` ALB subnet IP exhaustion detected, Recent logs:`
` Error allocating address: InsufficientFreeAddressesInSubnet`
`ℹ Action: Check subnet CIDR ranges and consider expanding or using different subnets`
`ℹ Annotation: alb.ingress.kubernetes.io/subnets=` |
+| **Example output (success)** | `✓ No critical ALB capacity or configuration issues detected`
` No IP exhaustion issues detected`
` SSL/TLS configured`
` Certificate ARN: arn:aws:acm:us-east-1:123456789:certificate/abc123`
` Scheme: internet-facing` |
+
+
+## Scope checks
+
+### 1. pod_existence
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Missing pod resources for the deployment |
+| **Common causes** | - Pods not created
- Pods deleted or evicted
- Deployment failed to create pods
- Label selector mismatch
- Namespace mismatch |
+| **Possible solutions** | - Check deployment status and events
- Verify deployment spec is correct
- Review pod creation errors
- Check resource quotas and limits
- Verify namespace and label selectors
- Review deployment controller logs |
+| **Example output (failure)** | `✗ No pods found with labels scope_id=123456 in namespace production`
`ℹ Action: Check deployment status and verify label selectors match` |
+| **Example output (success)** | `✓ Found 3 pod(s): web-app-123-abc web-app-123-def web-app-123-ghi` |
+
+### 2. container_crash_detection
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Containers that are crashing repeatedly (CrashLoopBackOff state) |
+| **Common causes** | - Application crashes at startup
- Missing dependencies or configuration
- Invalid command or entrypoint
- OOMKilled (Out of Memory)
- Failed health checks causing restarts |
+| **Possible solutions** | - Check application logs
- Review container command and arguments
- Verify environment variables and secrets are properly mounted
- Increase memory limits if OOMKilled
- Fix application code causing the crash
- Ensure all required config files exist |
+| **Example output (failure)** | `✗ Pod web-app-123: CrashLoopBackOff in container(s): app`
`⚠ Container: app \| Restarts: 5 \| Exit Code: 1`
`⚠ Exit 1 = Application error`
`ℹ Last logs from web-app-123:`
` [application logs...]`
`ℹ Action: Check container logs and fix application startup issues` |
+| **Example output (success)** | `✓ All 3 pod(s) running without crashes` |
+
+### 3. image_pull_status
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Failures to pull container images from registries |
+| **Common causes** | - Image does not exist in the registry
- Incorrect image name or tag
- Missing or invalid imagePullSecrets for private registries
- Network connectivity issues to registry
- Registry authentication failures
- Rate limiting from public registries |
+| **Possible solutions** | - Verify image name and tag are correct
- Check if image exists in registry
- For private registries, ensure imagePullSecrets are configured
- Verify registry credentials are valid
- Check network connectivity to registry
- Consider using a registry mirror or cache |
+| **Example output (failure)** | `✗ Pod web-app-123: ImagePullBackOff/ErrImagePull in container(s): app`
`⚠ Image: registry.example.com/app:v1.0.0`
`⚠ Reason: Failed to pull image: pull access denied`
`ℹ Action: Verify image exists and imagePullSecrets are configured for private registries` |
+| **Example output (success)** | `✓ All 3 pod(s) have images pulled successfully` |
+
+### 4. memory_limits_check
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Containers without memory limits configured |
+| **Common causes** | - Missing resource limits in deployment specification
- Resource limits commented out or removed
- Using default configurations without limits |
+| **Possible solutions** | - Add memory limits to container spec:
```yaml
resources:
limits:
memory: "512Mi"
requests:
memory: "256Mi"
```
- Follow resource sizing best practices
- Monitor actual memory usage to set appropriate limits
- Consider using LimitRanges for namespace defaults |
+| **Example output (failure)** | `✗ Pod web-app-123: Container app has no memory limits`
`ℹ Current resources: requests.memory=128Mi, limits.memory=NONE`
`⚠ Risk: Container can consume all node memory`
`ℹ Action: Add memory limits to prevent resource exhaustion` |
+| **Example output (success)** | `✓ No OOMKilled containers detected in 3 pod(s)` |
+
+### 5. resource_availability
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Insufficient cluster resources to schedule pods |
+| **Common causes** | - Requesting more CPU/memory than available on any node
- All nodes at capacity
- Resource quotas exceeded
- Taints/tolerations preventing scheduling
- Node selector/affinity rules too restrictive
- Too many replicas requested |
+| **Possible solutions** | - Reduce resource requests in deployment
- Scale up cluster (add more nodes)
- Remove or adjust node selectors/affinity rules
- Check and adjust resource quotas
- Verify node taints and add tolerations if needed
- Review and optimize resource usage across cluster
- Consider pod priority classes for critical workloads |
+| **Example output (failure)** | `✗ Pod web-app-123: Cannot be scheduled`
`⚠ Reason: 0/3 nodes are available: 1 Insufficient cpu, 2 Insufficient memory`
`⚠ Issue: Insufficient CPU in cluster`
`⚠ Issue: Insufficient memory in cluster`
`ℹ Action: Reduce resource requests or add more nodes to cluster` |
+| **Example output (success)** | `✓ All 3 pod(s) successfully scheduled with sufficient resources` |
+
+### 6. storage_mounting
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Failures to mount volumes (PVCs, ConfigMaps, Secrets) |
+| **Common causes** | - Referenced PersistentVolumeClaim does not exist
- PVC is bound to unavailable PersistentVolume
- Storage class does not exist
- ConfigMap or Secret not found
- Volume attachment failures (CSI driver issues)
- Insufficient storage capacity
- Multi-attach errors for ReadWriteOnce volumes |
+| **Possible solutions** | - Verify PVC exists and is bound
- Check PVC status and events
- Ensure ConfigMap/Secret exists and is in correct namespace
- Verify storage class is available and properly configured
- Check storage provisioner logs for errors
- For multi-attach errors, ensure volume is detached from previous node
- Verify sufficient storage quota |
+| **Example output (failure)** | `✗ Pod web-app-123: Volume mount failed`
` Volume: data-volume (PersistentVolumeClaim)`
` PVC: app-data-pvc`
` Status: Pending`
` Events:`
` MountVolume.SetUp failed: PersistentVolumeClaim "app-data-pvc" not found`
`ℹ Action: Create missing PVC or fix volume reference in deployment` |
+| **Example output (success)** | `✓ All volumes mounted successfully for 3 pod(s)` |
+
+### 7. container_port_health
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Containers not listening on their declared ports |
+| **Common causes** | - Application configured to listen on different port than Kubernetes configuration
- Application failed to bind to port (permission issues, port conflict)
- Application code error preventing port binding
- Wrong port number in deployment spec
- Environment variable for port not set correctly |
+| **Possible solutions** | - Check application configuration files (e.g., nginx.conf, application.properties)
- Verify environment variables controlling port binding
- Review application startup logs for port binding errors
- Ensure containerPort in deployment matches application's listen port
- Test port connectivity from within cluster |
+| **Example output (failure)** | `ℹ Checking pod web-app-123:`
`ℹ Container 'application':`
`✗ Port 8080: ✗ Declared but not listening or unreachable`
`ℹ Action: Check application configuration and ensure it listens on port 8080` |
+| **Example output (success)** | `ℹ Checking pod web-app-123:`
`ℹ Container 'application':`
`✓ Port 8080: ✓ Listening` |
+
+### 8. health_probe_endpoints
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Health probe endpoints (readiness, liveness, startup) that are misconfigured or failing |
+| **Common causes** | - Health check endpoint path not found (404)
- Application not exposing health endpoint
- Port not listening or network unreachable
- Application returning errors (5xx) preventing health validation
- Path mismatch between probe config and application routes
- Health check dependencies failing (database, cache, etc.) |
+| **Possible solutions** | - Verify health endpoint exists in application:
```yaml
readinessProbe:
httpGet:
path: /health
port: 8080
```
- Check application logs for health endpoint errors
- Ensure health endpoint path matches probe configuration
- For 5xx errors, fix application dependencies or internal issues
- For connection failures, verify port is listening and accessible
- Test endpoint manually: `curl http://POD_IP:PORT/health`
- Review probe timing settings (initialDelaySeconds, timeoutSeconds) |
+| **Example output (failed)** | `ℹ Checking pod web-app-123:`
`ℹ Container 'app':`
`✗ Readiness Probe on HTTP://8080/health: ✗ HTTP 404 - Health check endpoint not found, verify path in deployment config`
`ℹ Action: Update probe path or implement /health endpoint in application` |
+| **Example output (warning)** | `ℹ Checking pod web-app-123:`
`ℹ Container 'app':`
`⚠ Readiness Probe on HTTP://8080/health: ⚠ HTTP 500 - Cannot complete check due to application error`
`⚠ Liveness Probe on HTTP://8080/health: ⚠ Connection failed (response: connection refused, exit code: 7)` |
+| **Example output (success)** | `ℹ Checking pod web-app-123:`
`ℹ Container 'app':`
`✓ Readiness Probe on HTTP://8080/health: ✓ HTTP 200`
`✓ Liveness Probe on HTTP://8080/health: ✓ HTTP 200` |
+
+---
+
+## Service checks
+
+### 1. service_existence
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Missing service resources for the deployment |
+| **Common causes** | - Service not created
- Service deleted accidentally
- Service in wrong namespace
- Label selector mismatch preventing service discovery |
+| **Possible solutions** | - Create service resource
- Verify service is in correct namespace
- Check service label selectors match pods
- Review service creation in CI/CD pipeline |
+| **Example output (failure)** | `✗ No services found with labels scope_id=123456 in namespace production`
`ℹ Action: Create service resource or verify label selectors` |
+| **Example output (success)** | `✓ Found 1 service(s): web-app-service` |
+
+### 2. service_selector_match
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Service selectors that don't match any pod labels |
+| **Common causes** | - Service selector labels don't match pod labels
- Typo in selector or labels
- Labels changed in deployment but not in service
- Using wrong label keys or values |
+| **Possible solutions** | - Compare service selectors with pod labels
- Update service selectors to match pods
- Ensure consistent labeling strategy
- Fix selectors in service definition |
+| **Example output (failure)** | `✗ Service web-app-service: No pods match selector (app=web-app,env=prod)`
`⚠ Existing pods with deployment_id: web-app-123`
`ℹ Pod labels: app=webapp,env=production,deployment_id=123`
`ℹ Selector check: 8/10 labels match`
`⚠ Selector mismatches:`
` ✗ app: selector='web-app', pod='webapp'`
` ✗ env: selector='prod', pod='production'`
`ℹ Action: Update service selectors to match pod labels` |
+| **Example output (success)** | `✓ Service web-app-service: Selector matches 3 pod(s)` |
+
+### 3. service_endpoints
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Services without healthy backend endpoints |
+| **Common causes** | - No pods matching service selector
- All pods failing readiness probes
- Pods not exposing the correct port
- Network policy blocking traffic |
+| **Possible solutions** | - Verify pods exist and match service selector
- Fix pod readiness issues
- Ensure container exposes port specified in service
- Check network policies allow traffic
- Verify service targetPort matches container port |
+| **Example output (failure)** | `✗ Service web-app-service: No ready endpoints available`
`⚠ Not ready endpoints: 3`
`ℹ Action: Check pod readiness probes and pod status` |
+| **Example output (success)** | `✓ Service web-app-service: 3 ready endpoint(s)` |
+
+### 4. service_port_configuration
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Service port configuration issues |
+| **Common causes** | - Service targetPort doesn't match container port
- Container not listening on expected port
- Port protocol mismatch (TCP vs UDP)
- Named ports not defined in container |
+| **Possible solutions** | - Verify container is listening on targetPort
- Check container port in deployment matches service targetPort
- Test port connectivity from within pod
- Review application logs for port binding issues
- Ensure protocol (TCP/UDP) matches application |
+| **Example output (failure)** | `✗ Port 80 -> 8080 (http): Container port 8080 not found`
`⚠ Available ports by container: app: 3000,9090`
`ℹ Action: Update service targetPort to match container port or fix container port` |
+| **Example output (success)** | `✓ Service web-app-service port configuration:`
` Port 80 → 8080 (http): OK` |
+
+### 5. service_type_validation
+
+| **Aspect** | **Details** |
+|------------|-------------|
+| **What it detects** | Invalid or unsupported service types |
+| **Common causes** | - Using LoadBalancer type without cloud provider support
- NodePort outside allowed range
- Attempting to use ExternalName with selectors
- LoadBalancer stuck in pending state |
+| **Possible solutions** | - Use appropriate service type for your environment
- For LoadBalancer without cloud provider, use NodePort or Ingress
- Verify cloud provider integration is configured
- Check NodePort is in valid range (30000-32767)
- Review cloud provider load balancer logs |
+| **Example output (failure)** | `ℹ Service web-app-service: Type=LoadBalancer`
`⚠ LoadBalancer IP/Hostname is Pending`
`ℹ This may take a few minutes to provision`
`ℹ Action: Wait for provisioning or check cloud provider logs for errors` |
+| **Example output (success)** | `✓ Service web-app-service: Type=ClusterIP`
` Internal service with ClusterIP: 10.96.100.50` |
+
+
+
+---
+
+## Quick reference: error categories
+
+| **Category** | **Checks** | **Common Root Causes** |
+|--------------|------------|------------------------|
+| **Pod Issues** | container_crash_detection, image_pull_status, pod_readiness, container_port_health, health_probe_endpoints | Application errors, configuration issues, image problems, port misconfigurations, health check failures |
+| **Resource Issues** | memory_limits_check, resource_availability, storage_mounting | Insufficient resources, missing limits, capacity planning |
+| **Service Routing** | service_existence, service_selector_match, service_endpoints | Label mismatches, configuration errors, no healthy pods |
+| **Ingress/Networking** | ingress_existence, ingress_class_validation, ingress_controller_sync | Missing resources, controller issues, backend problems |
+| **TLS/Security** | ingress_tls_configuration, alb_capacity_check | Certificate issues, missing secrets, AWS-specific problems |
\ No newline at end of file
diff --git a/k8s/diagnose/build_context b/k8s/diagnose/build_context
index c0feb3a6..8ec7e8dc 100755
--- a/k8s/diagnose/build_context
+++ b/k8s/diagnose/build_context
@@ -7,7 +7,7 @@ NAMESPACE=$(echo "$CONTEXT" | jq -r --arg default "$K8S_NAMESPACE" '
SCOPE_LABEL_SELECTOR="scope_id=$SCOPE_ID"
LABEL_SELECTOR="$SCOPE_LABEL_SELECTOR"
-DEPLOYMENT_ID=$(echo "$CONTEXT" | jq -r '.deployment.id // .scope.current_active_deployment // empty')
+DEPLOYMENT_ID=$(echo "$CONTEXT" | jq -r '.parameters.deployment_id // .deployment.id // .scope.current_active_deployment // empty')
if [ -n "$DEPLOYMENT_ID" ]; then
LABEL_SELECTOR="$LABEL_SELECTOR,deployment_id=$DEPLOYMENT_ID"
fi
diff --git a/k8s/diagnose/networking/alb_capacity_check b/k8s/diagnose/networking/alb_capacity_check
index 9e3a2db5..445971f4 100644
--- a/k8s/diagnose/networking/alb_capacity_check
+++ b/k8s/diagnose/networking/alb_capacity_check
@@ -16,17 +16,25 @@ ALB_CONTROLLER_PODS=$(jq -r '.items[].metadata.name' "$ALB_CONTROLLER_PODS_FILE"
if [[ -n "$ALB_CONTROLLER_PODS" ]]; then
for POD in $ALB_CONTROLLER_PODS; do
# Look for IP exhaustion errors in pre-collected controller logs
- if [[ -f "$ALB_CONTROLLER_LOGS_DIR/${POD}.log" ]]; then
- IP_ERRORS=$(cat "$ALB_CONTROLLER_LOGS_DIR/${POD}.log" | grep -iE "no available ip|insufficient ip|ip address.*(exhausted|unavailable)" || true)
+ LOG_FILE="$ALB_CONTROLLER_LOGS_DIR/${POD}.log"
+ if [[ -f "$LOG_FILE" ]] && [[ -r "$LOG_FILE" ]]; then
+ # Use tail and awk to handle massive log lines efficiently
+ IP_ERRORS=$(tail -n 500 "$LOG_FILE" 2>/dev/null | \
+ awk 'length <= 10000' 2>/dev/null | \
+ grep -iE "no available ip|insufficient ip|ip address.*(exhausted|unavailable)" 2>/dev/null || true)
if [[ -n "$IP_ERRORS" ]]; then
HAS_ISSUES=1
print_error " ALB subnet IP exhaustion detected, Recent logs:"
- echo "$IP_ERRORS" | tail -n 3 | sed 's/^/ /'
- print_info " Action: Check subnet CIDR ranges and consider expanding or using different subnets"
+ if ! echo "$IP_ERRORS" | tail -n 3 2>/dev/null | cut -c1-200 2>/dev/null | sed 's/^/ /' 2>/dev/null; then
+ print_warning " [Log details could not be displayed]"
+ fi
+ print_action "Check subnet CIDR ranges and consider expanding or using different subnets"
print_info " Annotation: alb.ingress.kubernetes.io/subnets="
break
fi
+ elif [[ -e "$LOG_FILE" ]] && [[ ! -r "$LOG_FILE" ]]; then
+ print_warning " Cannot read ALB controller log file (permission denied): ${POD}.log"
fi
done
@@ -56,14 +64,21 @@ for INGRESS_NAME in $INGRESSES; do
# Check controller logs for certificate errors
if [[ -n "$ALB_CONTROLLER_PODS" ]]; then
for POD in $ALB_CONTROLLER_PODS; do
- if [[ -f "$ALB_CONTROLLER_LOGS_DIR/${POD}.log" ]]; then
- CERT_ERRORS=$(cat "$ALB_CONTROLLER_LOGS_DIR/${POD}.log" | grep -i "$INGRESS_NAME" | grep -iE "certificate.*(not found|invalid|failed|error)" || true)
+ LOG_FILE="$ALB_CONTROLLER_LOGS_DIR/${POD}.log"
+ if [[ -f "$LOG_FILE" ]] && [[ -r "$LOG_FILE" ]]; then
+ # Use tail and awk to handle massive log lines efficiently
+ CERT_ERRORS=$(tail -n 500 "$LOG_FILE" 2>/dev/null | \
+ awk 'length <= 10000' 2>/dev/null | \
+ grep -iF "$INGRESS_NAME" 2>/dev/null | \
+ grep -iE "certificate.*(not found|invalid|failed|error)" 2>/dev/null || true)
if [[ -n "$CERT_ERRORS" ]]; then
HAS_ISSUES=1
print_error " Certificate validation errors found:"
- echo "$CERT_ERRORS" | tail -n 2 | sed 's/^/ /'
- print_info " Action: Verify certificate ARN exists in ACM and covers the required domains"
+ if ! echo "$CERT_ERRORS" | tail -n 2 2>/dev/null | cut -c1-200 2>/dev/null | sed 's/^/ /' 2>/dev/null; then
+ print_warning " [Certificate error details could not be displayed]"
+ fi
+ print_action "Verify certificate ARN exists in ACM and covers the required domains"
fi
fi
done
@@ -76,7 +91,7 @@ for INGRESS_NAME in $INGRESSES; do
if ! echo "$TLS_HOSTS" | grep -qw "$HOST"; then
HAS_ISSUES=1
print_error " Host '$HOST' in rules but not in TLS configuration"
- print_info " Action: Add host to spec.tls or ensure certificate covers this domain"
+ print_action "Add host to spec.tls or ensure certificate covers this domain"
fi
done
fi
@@ -91,31 +106,53 @@ for INGRESS_NAME in $INGRESSES; do
fi
# ===== Events Checks (subnet, security group, target group) =====
- EVENTS=$(jq -r --arg name "$INGRESS_NAME" '.items[] | select(.involvedObject.name == $name) | "\(.lastTimestamp) \(.type) \(.reason) \(.message)"' "$EVENTS_FILE" 2>/dev/null | tail -n 20)
-
- if [[ -n "$EVENTS" ]]; then
- # Check for subnet errors
- SUBNET_ERRORS=$(echo "$EVENTS" | grep -iE "subnet|availability zone" | grep -iE "error|failed" || true)
- if [[ -n "$SUBNET_ERRORS" ]]; then
- HAS_ISSUES=1
- print_error " Subnet configuration issues"
- echo "$SUBNET_ERRORS" | tail -n 2 | sed 's/^/ /'
- fi
+ # Get events sorted by timestamp, most recent first
+ EVENTS_JSON=$(jq --arg name "$INGRESS_NAME" --arg kind "Ingress" '
+ .items
+ | map(select(.involvedObject.name == $name and .involvedObject.kind == $kind))
+ | sort_by(.lastTimestamp)
+ | reverse
+ ' "$EVENTS_FILE" 2>/dev/null)
+
+ EVENT_COUNT=$(echo "$EVENTS_JSON" | jq 'length' 2>/dev/null)
+
+ if [[ "$EVENT_COUNT" -gt 0 ]]; then
+ # Get all error/warning events
+ ERROR_EVENTS=$(echo "$EVENTS_JSON" | jq -r '
+ .[]
+ | select(.type == "Warning" or .type == "Error")
+ ' 2>/dev/null)
+
+ if [[ -n "$ERROR_EVENTS" ]]; then
+ # Check for subnet errors
+ SUBNET_ERRORS=$(echo "$ERROR_EVENTS" | jq -r 'select(.message | test("subnet|availability zone"; "i")) | "\(.lastTimestamp) [\(.type)] \(.reason): \(.message)"' 2>/dev/null || true)
+ if [[ -n "$SUBNET_ERRORS" ]]; then
+ HAS_ISSUES=1
+ print_error " Subnet configuration issues"
+ if ! echo "$SUBNET_ERRORS" | head -n 2 2>/dev/null | sed 's/^/ /' 2>/dev/null; then
+ print_warning " [Event details could not be displayed]"
+ fi
+ fi
- # Check for security group errors
- SG_ERRORS=$(echo "$EVENTS" | grep -iE "security.?group" | grep -iE "error|failed" || true)
- if [[ -n "$SG_ERRORS" ]]; then
- HAS_ISSUES=1
- print_error " Security group issues"
- echo "$SG_ERRORS" | tail -n 2 | sed 's/^/ /'
- fi
+ # Check for security group errors
+ SG_ERRORS=$(echo "$ERROR_EVENTS" | jq -r 'select(.message | test("security.?group"; "i")) | "\(.lastTimestamp) [\(.type)] \(.reason): \(.message)"' 2>/dev/null || true)
+ if [[ -n "$SG_ERRORS" ]]; then
+ HAS_ISSUES=1
+ print_error " Security group issues"
+ if ! echo "$SG_ERRORS" | head -n 2 2>/dev/null | sed 's/^/ /' 2>/dev/null; then
+ print_warning " [Event details could not be displayed]"
+ fi
+ fi
- # Check for target group errors
- TG_ERRORS=$(echo "$EVENTS" | grep -iE "target.?group" | grep -iE "error|failed" || true)
- if [[ -n "$TG_ERRORS" ]]; then
- HAS_ISSUES=1
- print_error " Target group registration issues"
- echo "$TG_ERRORS" | tail -n 2 | sed 's/^/ /'
+ # Check for target group errors
+ TG_ERRORS=$(echo "$ERROR_EVENTS" | jq -r 'select(.message | test("target.?group"; "i")) | "\(.lastTimestamp) [\(.type)] \(.reason): \(.message)"' 2>/dev/null || true)
+ if [[ -n "$TG_ERRORS" ]]; then
+ HAS_ISSUES=1
+ print_error " Target group registration issues"
+ if ! echo "$TG_ERRORS" | head -n 2 2>/dev/null | sed 's/^/ /' 2>/dev/null; then
+ print_warning " [Event details could not be displayed]"
+ fi
+ fi
fi
fi
diff --git a/k8s/diagnose/networking/ingress_backend_service b/k8s/diagnose/networking/ingress_backend_service
index f205a975..e20a570a 100644
--- a/k8s/diagnose/networking/ingress_backend_service
+++ b/k8s/diagnose/networking/ingress_backend_service
@@ -50,22 +50,75 @@ for INGRESS_NAME in $INGRESSES; do
fi
# Check each unique backend
- echo "$BACKENDS" | sort -u | while IFS=':' read -r SERVICE_NAME SERVICE_PORT; do
+ # Use process substitution to avoid subshell and preserve HAS_ISSUES updates
+ while IFS=':' read -r SERVICE_NAME SERVICE_PORT; do
# Check if service exists in pre-collected data
SERVICE_INFO=$(jq --arg name "$SERVICE_NAME" '.items[] | select(.metadata.name == $name)' "$SERVICES_FILE" 2>/dev/null)
if [[ -n "$SERVICE_INFO" && "$SERVICE_INFO" != "null" ]]; then
# Check if service has endpoints from pre-collected data
ENDPOINT_INFO=$(jq --arg name "$SERVICE_NAME" '.items[] | select(.metadata.name == $name)' "$ENDPOINTS_FILE" 2>/dev/null)
- ENDPOINTS=$(echo "$ENDPOINT_INFO" | jq -r '.subsets[].addresses[].ip' 2>/dev/null | tr '\n' ' ')
-
- if [[ -n "$ENDPOINTS" ]]; then
- ENDPOINT_COUNT=$(echo "$ENDPOINTS" | wc -w)
- print_success " Backend: $SERVICE_NAME:$SERVICE_PORT ($ENDPOINT_COUNT endpoint(s))"
+ READY_ENDPOINTS=$(echo "$ENDPOINT_INFO" | jq -r '.subsets[]?.addresses[]? | "\(.targetRef.name // "unknown"):\(.ip)"' 2>/dev/null)
+ NOT_READY_ENDPOINTS=$(echo "$ENDPOINT_INFO" | jq -r '.subsets[]?.notReadyAddresses[]? | "\(.targetRef.name // "unknown"):\(.ip)"' 2>/dev/null)
+
+ # Get port info
+ PORT_NUMBER=$(echo "$ENDPOINT_INFO" | jq -r '.subsets[0]?.ports[0]?.port // empty' 2>/dev/null)
+
+ READY_COUNT=$(echo "$READY_ENDPOINTS" | grep -c '^' 2>/dev/null || echo 0)
+ NOT_READY_COUNT=$(echo "$NOT_READY_ENDPOINTS" | grep -c '^' 2>/dev/null || echo 0)
+
+ if [[ $READY_COUNT -gt 0 ]]; then
+ print_success " Backend: $SERVICE_NAME:$SERVICE_PORT ($READY_COUNT ready endpoint(s))"
+ echo "$READY_ENDPOINTS" | while IFS=':' read -r POD_NAME IP; do
+ [[ -n "$IP" ]] && print_success " - $POD_NAME -> $IP:$PORT_NUMBER"
+ done
+
+ if [[ $NOT_READY_COUNT -gt 0 ]]; then
+ print_warning " Also has $NOT_READY_COUNT not ready endpoint(s)"
+ echo "$NOT_READY_ENDPOINTS" | while IFS=':' read -r POD_NAME IP; do
+ [[ -n "$IP" ]] && print_warning " - $POD_NAME -> $IP:$PORT_NUMBER"
+ done
+ fi
else
HAS_ISSUES=1
- print_error " Backend: $SERVICE_NAME:$SERVICE_PORT (no endpoints)"
- print_info " Action: Verify pods are running and service selector matches"
+ print_error " Backend: $SERVICE_NAME:$SERVICE_PORT (no ready endpoints)"
+
+ # Get service selector to help debug
+ SERVICE_SELECTOR=$(echo "$SERVICE_INFO" | jq -c '.spec.selector // {}' 2>/dev/null)
+ print_info " Service selector: $SERVICE_SELECTOR"
+
+ if [[ $NOT_READY_COUNT -gt 0 ]]; then
+ print_warning " Found $NOT_READY_COUNT not ready endpoint(s):"
+ echo "$NOT_READY_ENDPOINTS" | while IFS=':' read -r POD_NAME IP; do
+ [[ -n "$IP" ]] && print_warning " - $POD_NAME -> $IP:$PORT_NUMBER (not ready)"
+ done
+ print_action "Check pod readiness - pods exist but are not ready to serve traffic"
+ else
+ # Check if there are any pods matching the selector
+ if [[ "$SERVICE_SELECTOR" != "{}" && "$SERVICE_SELECTOR" != "null" ]]; then
+ MATCHING_PODS=$(jq -r --argjson selectors "$SERVICE_SELECTOR" '
+ .items[] |
+ . as $pod |
+ select(
+ $selectors | to_entries | all(.key as $k | .value as $v |
+ $pod.metadata.labels[$k] == $v
+ )
+ ) |
+ .metadata.name
+ ' "$PODS_FILE" 2>/dev/null | tr '\n' ' ')
+
+ if [[ -n "$MATCHING_PODS" ]]; then
+ print_warning " Found pods matching selector but no endpoints: $MATCHING_PODS"
+ print_action "Pods exist but endpoints not created - check pod readiness probes and status"
+ else
+ print_warning " No pods found matching service selector"
+ print_action "Create pods with labels matching the service selector: $SERVICE_SELECTOR"
+ fi
+ else
+ print_warning " Service has no selector defined"
+ print_action "Add selector to service or check if this is a headless/ExternalName service"
+ fi
+ fi
fi
# Verify port exists in service from pre-collected data
@@ -80,10 +133,12 @@ for INGRESS_NAME in $INGRESSES; do
HAS_ISSUES=1
print_error " Backend: Service '$SERVICE_NAME' not found in namespace"
fi
- done
+ done < <(echo "$BACKENDS" | sort -u)
done
if [[ $HAS_ISSUES -eq 0 ]]; then
+ INGRESS_COUNT=$(echo "$INGRESSES" | wc -w)
+ print_success "All backend services healthy for $INGRESS_COUNT ingress(es)"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/networking/ingress_class_validation b/k8s/diagnose/networking/ingress_class_validation
index 373e565c..d796fd91 100644
--- a/k8s/diagnose/networking/ingress_class_validation
+++ b/k8s/diagnose/networking/ingress_class_validation
@@ -36,7 +36,7 @@ for INGRESS_NAME in $INGRESSES; do
else
HAS_ISSUES=1
print_error "Ingress $INGRESS_NAME: No IngressClass specified and no default found"
- print_info " Action: Specify ingressClassName or set a default IngressClass"
+ print_action "Specify ingressClassName or set a default IngressClass"
fi
else
# Verify the class exists
@@ -56,6 +56,8 @@ for INGRESS_NAME in $INGRESSES; do
done
if [[ $HAS_ISSUES -eq 0 ]]; then
+ INGRESS_COUNT=$(echo "$INGRESSES" | wc -w)
+ print_success "All $INGRESS_COUNT ingress(es) have valid IngressClass configuration"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/networking/ingress_controller_sync b/k8s/diagnose/networking/ingress_controller_sync
index 47e34603..1f0a40d6 100644
--- a/k8s/diagnose/networking/ingress_controller_sync
+++ b/k8s/diagnose/networking/ingress_controller_sync
@@ -23,39 +23,58 @@ fi
for INGRESS_NAME in $INGRESSES; do
print_info "Checking sync status for ingress: $INGRESS_NAME"
- # Get ingress events from pre-collected data
- INGRESS_EVENTS=$(jq -r --arg name "$INGRESS_NAME" '.items[] | select(.involvedObject.name == $name) | "\(.lastTimestamp) \(.type) \(.reason) \(.message)"' "$EVENTS_FILE" 2>/dev/null | tail -n 20)
-
- if [[ -n "$INGRESS_EVENTS" ]]; then
- # Look for error/warning events
- ERROR_EVENTS=$(echo "$INGRESS_EVENTS" | grep -iE "error|failed|warning" || true)
-
- if [[ -n "$ERROR_EVENTS" ]]; then
- HAS_ISSUES=1
- print_error " Found error/warning events:"
- echo "$ERROR_EVENTS" | tail -n 5 | sed 's/^/ /'
-
- # Check for specific ALB errors
- if echo "$ERROR_EVENTS" | grep -qi "failed to reconcile"; then
- print_error " Issue: Failed to reconcile ingress"
- fi
-
- if echo "$ERROR_EVENTS" | grep -qi "no available ip"; then
- print_error " Issue: No available IPs in subnet (see alb_capacity_check)"
- fi
-
- if echo "$ERROR_EVENTS" | grep -qi "certificate"; then
- print_error " Issue: Certificate problem detected (see alb_capacity_check)"
- fi
+ # Get ingress events from pre-collected data - sorted by timestamp, most recent first
+ INGRESS_EVENTS_JSON=$(jq --arg name "$INGRESS_NAME" --arg kind "Ingress" '
+ .items
+ | map(select(.involvedObject.name == $name and .involvedObject.kind == $kind))
+ | sort_by(.lastTimestamp)
+ | reverse
+ ' "$EVENTS_FILE" 2>/dev/null)
+
+ EVENT_COUNT=$(echo "$INGRESS_EVENTS_JSON" | jq 'length' 2>/dev/null)
+
+ if [[ "$EVENT_COUNT" -gt 0 ]]; then
+ # Get the most recent event
+ NEWEST_EVENT=$(echo "$INGRESS_EVENTS_JSON" | jq -r 'first')
+ EVENT_TYPE=$(echo "$NEWEST_EVENT" | jq -r '.type')
+ EVENT_REASON=$(echo "$NEWEST_EVENT" | jq -r '.reason')
+ EVENT_MESSAGE=$(echo "$NEWEST_EVENT" | jq -r '.message')
+ EVENT_TIMESTAMP=$(echo "$NEWEST_EVENT" | jq -r '.lastTimestamp')
+
+ # Check for successful reconciliation first
+ if [[ "$EVENT_REASON" == "SuccessfullyReconciled" ]]; then
+ print_success " ✓ Successfully reconciled at $EVENT_TIMESTAMP"
+ elif [[ "$EVENT_TYPE" == "Normal" ]] && echo "$EVENT_REASON" | grep -qiE "ensured|synced"; then
+ print_success " ✓ Last event: $EVENT_REASON at $EVENT_TIMESTAMP"
else
- # Look for successful sync
- SUCCESS_EVENTS=$(echo "$INGRESS_EVENTS" | grep -iE "successfully reconciled|ensured" || true)
+ # Look for error/warning events in recent history
+ ERROR_EVENTS=$(echo "$INGRESS_EVENTS_JSON" | jq -r '
+ .[]
+ | select(.type == "Warning" or .type == "Error")
+ | "\(.lastTimestamp) [\(.type)] \(.reason): \(.message)"
+ ' | head -n 5)
+
+ if [[ -n "$ERROR_EVENTS" ]]; then
+ HAS_ISSUES=1
+ print_error " Found error/warning events:"
+ echo "$ERROR_EVENTS" | sed 's/^/ /'
+
+ # Check for specific ALB errors in all error events
+ ALL_ERROR_MESSAGES=$(echo "$INGRESS_EVENTS_JSON" | jq -r '.[] | select(.type == "Warning" or .type == "Error") | .message' 2>/dev/null)
+
+ if echo "$ALL_ERROR_MESSAGES" | grep -qi "failed to reconcile"; then
+ print_error " Issue: Failed to reconcile ingress"
+ fi
- if [[ -n "$SUCCESS_EVENTS" ]]; then
- LAST_SUCCESS=$(echo "$SUCCESS_EVENTS" | tail -n 1 | awk '{print $1, $2}')
- print_success " Last successful reconciliation: $LAST_SUCCESS"
+ if echo "$ALL_ERROR_MESSAGES" | grep -qi "no available ip\|insufficient.*address"; then
+ print_error " Issue: No available IPs in subnet (see alb_capacity_check)"
+ fi
+
+ if echo "$ALL_ERROR_MESSAGES" | grep -qi "certificate\|tls.*secret"; then
+ print_error " Issue: Certificate problem detected (see ingress_tls_configuration)"
+ fi
else
- print_warning " No explicit success or error events found"
+ print_info " Last event: $EVENT_REASON at $EVENT_TIMESTAMP"
fi
fi
else
@@ -66,21 +85,36 @@ for INGRESS_NAME in $INGRESSES; do
if [[ -n "$ALB_CONTROLLER_PODS" ]]; then
for POD in $ALB_CONTROLLER_PODS; do
# Get recent logs related to this ingress from pre-collected logs
- if [[ -f "$ALB_CONTROLLER_LOGS_DIR/${POD}.log" ]]; then
- CONTROLLER_LOGS=$(cat "$ALB_CONTROLLER_LOGS_DIR/${POD}.log" | grep -i "$INGRESS_NAME" || true)
+ LOG_FILE="$ALB_CONTROLLER_LOGS_DIR/${POD}.log"
+ if [[ -f "$LOG_FILE" ]] && [[ -r "$LOG_FILE" ]]; then
+ # Use tail to limit log size and grep with line-buffered to avoid memory issues
+ # Skip lines longer than 10000 chars to avoid processing massive JSON lines
+ CONTROLLER_LOGS=$(tail -n 500 "$LOG_FILE" 2>/dev/null | \
+ awk 'length <= 10000' 2>/dev/null | \
+ grep -iF "$INGRESS_NAME" 2>/dev/null || true)
if [[ -n "$CONTROLLER_LOGS" ]]; then
- # Look for errors in controller logs
- ERROR_LOGS=$(echo "$CONTROLLER_LOGS" | grep -iE "error|failed|warning" || true)
+ # Look for errors in controller logs (excluding "successfully built model" info logs)
+ ERROR_LOGS=$(echo "$CONTROLLER_LOGS" | \
+ grep -ivE "successfully built model|successfully reconciled" 2>/dev/null | \
+ grep -iE "level.*error|level.*warn|failed|warning" 2>/dev/null | \
+ head -n 5 || true)
if [[ -n "$ERROR_LOGS" ]]; then
HAS_ISSUES=1
print_error " Found errors in ALB controller logs:"
- echo "$ERROR_LOGS" | tail -n 3 | sed 's/^/ /'
+ # Safely print error logs with proper error handling and truncation
+ if ! echo "$ERROR_LOGS" | head -n 3 2>/dev/null | cut -c1-200 2>/dev/null | sed 's/^/ /' 2>/dev/null; then
+ print_warning " [Error logs could not be displayed due to formatting issues]"
+ fi
else
print_success " No errors in ALB controller logs for this ingress"
fi
+ else
+ print_info " No relevant logs found for this ingress in controller"
fi
+ elif [[ -e "$LOG_FILE" ]] && [[ ! -r "$LOG_FILE" ]]; then
+ print_warning " Cannot read ALB controller log file (permission denied): ${POD}.log"
fi
done
fi
@@ -91,12 +125,15 @@ for INGRESS_NAME in $INGRESSES; do
if [[ -z "$INGRESS_ADDRESS" ]]; then
HAS_ISSUES=1
print_error " ALB address not assigned yet (sync may be in progress or failing)"
+ print_action "Check ingress controller logs and verify backend services are healthy"
else
print_success " ALB address assigned: $INGRESS_ADDRESS"
fi
done
if [[ $HAS_ISSUES -eq 0 ]]; then
+ INGRESS_COUNT=$(echo "$INGRESSES" | wc -w)
+ print_success "All $INGRESS_COUNT ingress(es) synchronized successfully with controller"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/networking/ingress_existence b/k8s/diagnose/networking/ingress_existence
index 88c1206e..48654bcc 100644
--- a/k8s/diagnose/networking/ingress_existence
+++ b/k8s/diagnose/networking/ingress_existence
@@ -2,12 +2,16 @@
# Check: Ingress Existence
# Verifies that ingress resources exist in the namespace
-# Validate ingresses exist
-require_ingresses || return 0
-
# Read ingresses from pre-collected data
INGRESSES=$(jq -r '.items[].metadata.name' "$INGRESSES_FILE" 2>/dev/null | tr '\n' ' ')
+if [[ -z "$INGRESSES" ]]; then
+ print_error "No ingresses found with labels $SCOPE_LABEL_SELECTOR in namespace $NAMESPACE"
+ print_action "Create ingress resource to expose services externally"
+ update_check_result --status "failed" --evidence "{}"
+ return 1
+fi
+
INGRESS_COUNT=$(echo "$INGRESSES" | wc -w)
print_success "Found $INGRESS_COUNT ingress(es): $INGRESSES"
diff --git a/k8s/diagnose/networking/ingress_host_rules b/k8s/diagnose/networking/ingress_host_rules
index f481d83b..25fd223f 100644
--- a/k8s/diagnose/networking/ingress_host_rules
+++ b/k8s/diagnose/networking/ingress_host_rules
@@ -36,6 +36,7 @@ for INGRESS_NAME in $INGRESSES; do
else
HAS_ISSUES=1
print_error " No rules and no default backend configured"
+ print_action "Add at least one rule or configure default backend"
fi
continue
fi
@@ -43,7 +44,8 @@ for INGRESS_NAME in $INGRESSES; do
# Check each rule
RULES=$(echo "$INGRESS_INFO" | jq -c '.spec.rules[]' 2>/dev/null)
- echo "$RULES" | while read -r RULE; do
+ # Use process substitution to avoid subshell and preserve HAS_ISSUES updates
+ while read -r RULE; do
HOST=$(echo "$RULE" | jq -r '.host // "*"')
# Check if host is defined
@@ -59,29 +61,32 @@ for INGRESS_NAME in $INGRESSES; do
if [[ -z "$PATHS" ]]; then
HAS_ISSUES=1
print_error " No paths defined for host $HOST"
+ print_action "Define at least one path for this host"
continue
fi
- echo "$PATHS" | while read -r PATH_RULE; do
- PATH=$(echo "$PATH_RULE" | jq -r '.path // "/"')
+ # Use process substitution to avoid subshell and preserve HAS_ISSUES updates
+ while read -r PATH_RULE; do
+ PATH_VALUE=$(echo "$PATH_RULE" | jq -r '.path // "/"')
PATH_TYPE=$(echo "$PATH_RULE" | jq -r '.pathType // "Prefix"')
BACKEND_SERVICE=$(echo "$PATH_RULE" | jq -r '.backend.service.name')
BACKEND_PORT=$(echo "$PATH_RULE" | jq -r '.backend.service.port.number // .backend.service.port.name')
- print_info " Path: $PATH ($PATH_TYPE) -> $BACKEND_SERVICE:$BACKEND_PORT"
+ print_info " Path: $PATH_VALUE ($PATH_TYPE) -> $BACKEND_SERVICE:$BACKEND_PORT"
# Validate pathType
if [[ "$PATH_TYPE" != "Exact" && "$PATH_TYPE" != "Prefix" && "$PATH_TYPE" != "ImplementationSpecific" ]]; then
HAS_ISSUES=1
print_error " Invalid pathType: $PATH_TYPE (must be Exact, Prefix, or ImplementationSpecific)"
+ print_action "Use valid pathType value"
fi
# Warn about path conventions
- if [[ "$PATH_TYPE" == "Prefix" && "$PATH" != "/" && ! "$PATH" =~ ^/.*[^/]$ ]]; then
+ if [[ "$PATH_TYPE" == "Prefix" && "$PATH_VALUE" != "/" && ! "$PATH" =~ ^/.*[^/]$ ]]; then
print_warning " Path ends with '/' - this may cause routing issues with Prefix type"
fi
- done
- done
+ done < <(echo "$PATHS")
+ done < <(echo "$RULES")
# Check for conflicting rules
HOSTS=$(echo "$INGRESS_INFO" | jq -r '.spec.rules[].host' 2>/dev/null | sort)
@@ -94,6 +99,8 @@ for INGRESS_NAME in $INGRESSES; do
done
if [[ $HAS_ISSUES -eq 0 ]]; then
+ INGRESS_COUNT=$(echo "$INGRESSES" | wc -w)
+ print_success "Host and path rules valid for all $INGRESS_COUNT ingress(es)"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/networking/ingress_tls_configuration b/k8s/diagnose/networking/ingress_tls_configuration
index 3ae8a4c2..1062a5d0 100644
--- a/k8s/diagnose/networking/ingress_tls_configuration
+++ b/k8s/diagnose/networking/ingress_tls_configuration
@@ -26,7 +26,8 @@ for INGRESS_NAME in $INGRESSES; do
# Get TLS secrets
TLS_SECRETS=$(echo "$INGRESS_INFO" | jq -r '.spec.tls[] | "\(.secretName):\(.hosts | join(","))"' 2>/dev/null)
- echo "$TLS_SECRETS" | while IFS=':' read -r SECRET_NAME HOSTS; do
+ # Use process substitution to avoid subshell and preserve HAS_ISSUES updates
+ while IFS=':' read -r SECRET_NAME HOSTS; do
# Check if secret exists in pre-collected data
SECRET_INFO=$(jq --arg name "$SECRET_NAME" '.items[] | select(.metadata.name == $name)' "$SECRETS_FILE" 2>/dev/null)
@@ -78,12 +79,14 @@ for INGRESS_NAME in $INGRESSES; do
else
HAS_ISSUES=1
print_error " TLS Secret: '$SECRET_NAME' not found in namespace"
- print_info " Action: Create TLS secret or update ingress configuration"
+ print_action "Create TLS secret or update ingress configuration"
fi
- done
+ done < <(echo "$TLS_SECRETS")
done
if [[ $HAS_ISSUES -eq 0 ]]; then
+ INGRESS_COUNT=$(echo "$INGRESSES" | wc -w)
+ print_success "TLS configuration valid for all $INGRESS_COUNT ingress(es)"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/networking/workflow.yml b/k8s/diagnose/networking/workflow.yml
index 411ab2fb..eb40fc9e 100644
--- a/k8s/diagnose/networking/workflow.yml
+++ b/k8s/diagnose/networking/workflow.yml
@@ -1,36 +1,36 @@
steps:
- - name: ingress_existence
+ - name: Ingress Existence
description: Verifies that ingress resources exist in the namespace
- category: networking
+ category: Networking
type: script
file: "$SERVICE_PATH/diagnose/networking/ingress_existence"
- - name: ingress_class_validation
+ - name: Ingress Class
description: Validates ingress class is correctly configured
- category: networking
+ category: Networking
type: script
file: "$SERVICE_PATH/diagnose/networking/ingress_class_validation"
- - name: ingress_backend_service
+ - name: Ingress Backend Service
description: Checks if ingress backend services exist and are reachable
- category: networking
+ category: Networking
type: script
file: "$SERVICE_PATH/diagnose/networking/ingress_backend_service"
- - name: ingress_tls_configuration
+ - name: TLS Configuration
description: Validates TLS/SSL certificate configuration
- category: networking
+ category: Networking
type: script
file: "$SERVICE_PATH/diagnose/networking/ingress_tls_configuration"
- - name: ingress_host_rules
+ - name: Host & Path Rules
description: Verifies host and path rules are properly configured
- category: networking
+ category: Networking
type: script
file: "$SERVICE_PATH/diagnose/networking/ingress_host_rules"
- - name: ingress_controller_sync
+ - name: Ingress Controller Sync
description: Verifies ALB ingress controller has synchronized successfully
- category: networking
+ category: Networking
type: script
file: "$SERVICE_PATH/diagnose/networking/ingress_controller_sync"
- - name: alb_capacity_check
+ - name: ALB Capacity
description: Checks for common ALB issues (IP exhaustion, certificate problems)
- category: networking
+ category: Networking
type: script
file: "$SERVICE_PATH/diagnose/networking/alb_capacity_check"
\ No newline at end of file
diff --git a/k8s/diagnose/scope/container_crash_detection b/k8s/diagnose/scope/container_crash_detection
index 99d48051..8a8121c3 100644
--- a/k8s/diagnose/scope/container_crash_detection
+++ b/k8s/diagnose/scope/container_crash_detection
@@ -1,6 +1,6 @@
#!/bin/bash
# Check: Container Crash Detection
-# Checks if containers are crashing on startup
+# Checks if containers are crashing or in error states
# Validate pods exist
require_pods || return 0
@@ -14,6 +14,7 @@ for POD_NAME in $PODS; do
# Get pod info from pre-collected data
POD_INFO=$(jq --arg name "$POD_NAME" '.items[] | select(.metadata.name == $name)' "$PODS_FILE" 2>/dev/null)
+ # Check for containers in crash states
CRASH_LOOP=$(echo "$POD_INFO" | jq -r '.status.containerStatuses[]? | select(.state.waiting.reason == "CrashLoopBackOff") | .name')
if [[ -n "$CRASH_LOOP" ]]; then
@@ -23,22 +24,73 @@ for POD_NAME in $PODS; do
for CONTAINER in $CRASH_LOOP; do
RESTART_COUNT=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER\") | .restartCount")
EXIT_CODE=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER\") | .lastState.terminated.exitCode // \"N/A\"")
+ TERMINATION_REASON=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER\") | .lastState.terminated.reason // \"Unknown\"")
- print_warning " Container: $CONTAINER | Restarts: $RESTART_COUNT | Exit Code: $EXIT_CODE"
+ print_warning " Container: $CONTAINER | Restarts: $RESTART_COUNT | Exit Code: $EXIT_CODE | Reason: $TERMINATION_REASON"
case "$EXIT_CODE" in
137) print_warning " Exit 137 = OOMKilled (out of memory)" ;;
143) print_warning " Exit 143 = SIGTERM (graceful termination)" ;;
1) print_warning " Exit 1 = Application error" ;;
+ 139) print_warning " Exit 139 = SIGSEGV (segmentation fault)" ;;
esac
done
print_info "Last logs from $POD_NAME:"
kubectl logs "$POD_NAME" -n "$NAMESPACE" --tail=10 2>&1 | sed 's/^/ /'
+ print_action "Check container logs and fix application startup issues"
+ fi
+
+ # Check for containers that terminated but haven't restarted yet
+ TERMINATED_CONTAINERS=$(echo "$POD_INFO" | jq -r '.status.containerStatuses[]? | select(.state.terminated) | .name')
+
+ if [[ -n "$TERMINATED_CONTAINERS" ]]; then
+ HAS_CRASHES=1
+ print_error "Pod $POD_NAME: Terminated container(s): $TERMINATED_CONTAINERS"
+
+ for CONTAINER in $TERMINATED_CONTAINERS; do
+ EXIT_CODE=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER\") | .state.terminated.exitCode // \"N/A\"")
+ TERMINATION_REASON=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER\") | .state.terminated.reason // \"Unknown\"")
+ RESTART_COUNT=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER\") | .restartCount")
+
+ print_warning " Container: $CONTAINER | Exit Code: $EXIT_CODE | Reason: $TERMINATION_REASON | Restarts: $RESTART_COUNT"
+
+ case "$EXIT_CODE" in
+ 137) print_warning " Exit 137 = OOMKilled (out of memory)" ;;
+ 143) print_warning " Exit 143 = SIGTERM (graceful termination)" ;;
+ 1) print_warning " Exit 1 = Application error" ;;
+ 139) print_warning " Exit 139 = SIGSEGV (segmentation fault)" ;;
+ 0) print_info " Exit 0 = Clean exit (container finished successfully)" ;;
+ esac
+ done
+
+ print_action "Check why container terminated and review logs"
+ fi
+
+ # Check for containers with high restart counts (even if currently running)
+ HIGH_RESTART_CONTAINERS=$(echo "$POD_INFO" | jq -r '.status.containerStatuses[]? | select(.restartCount >= 3) | "\(.name):\(.restartCount)"')
+
+ if [[ -n "$HIGH_RESTART_CONTAINERS" ]]; then
+ HAS_CRASHES=1
+ print_warning "Pod $POD_NAME: Container(s) with high restart count:"
+
+ while IFS= read -r CONTAINER_INFO; do
+ CONTAINER_NAME=$(echo "$CONTAINER_INFO" | cut -d':' -f1)
+ RESTART_COUNT=$(echo "$CONTAINER_INFO" | cut -d':' -f2)
+
+ LAST_EXIT_CODE=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER_NAME\") | .lastState.terminated.exitCode // \"N/A\"")
+ LAST_REASON=$(echo "$POD_INFO" | jq -r ".status.containerStatuses[] | select(.name==\"$CONTAINER_NAME\") | .lastState.terminated.reason // \"Unknown\"")
+
+ print_warning " Container: $CONTAINER_NAME | Restarts: $RESTART_COUNT | Last Exit: $LAST_EXIT_CODE | Reason: $LAST_REASON"
+ done <<< "$HIGH_RESTART_CONTAINERS"
+
+ print_action "Container has restarted multiple times - check for intermittent issues"
fi
done
if [[ $HAS_CRASHES -eq 0 ]]; then
+ POD_COUNT=$(echo "$PODS" | wc -w)
+ print_success "All $POD_COUNT pod(s) running without crashes or errors"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/scope/container_port_health b/k8s/diagnose/scope/container_port_health
new file mode 100755
index 00000000..78152ee9
--- /dev/null
+++ b/k8s/diagnose/scope/container_port_health
@@ -0,0 +1,161 @@
+#!/bin/bash
+# Check: Container Port Health
+# Validates that containers are actually listening on their declared ports
+
+# Validate pods exist
+require_pods || return 0
+
+# Read pods from pre-collected data
+PODS=$(jq -r '.items[].metadata.name' "$PODS_FILE" 2>/dev/null | tr '\n' ' ')
+
+HAS_PORT_ISSUES=0
+CONTAINERS_TESTED=0
+CONTAINERS_SKIPPED=0
+
+for POD_NAME in $PODS; do
+ # Get pod info from pre-collected data
+ POD_INFO=$(jq --arg name "$POD_NAME" '.items[] | select(.metadata.name == $name)' "$PODS_FILE" 2>/dev/null)
+
+ # Check if pod is running
+ POD_PHASE=$(echo "$POD_INFO" | jq -r '.status.phase')
+ if [[ "$POD_PHASE" != "Running" ]]; then
+ print_warning "Pod $POD_NAME: Not running (phase: $POD_PHASE), skipping port checks"
+ continue
+ fi
+
+ # Get pod IP
+ POD_IP=$(echo "$POD_INFO" | jq -r '.status.podIP')
+ if [[ -z "$POD_IP" || "$POD_IP" == "null" ]]; then
+ print_warning "Pod $POD_NAME: No IP assigned, skipping port checks"
+ continue
+ fi
+
+ print_info "Checking pod $POD_NAME:"
+
+ # Get all containers with their ports
+ CONTAINERS=$(echo "$POD_INFO" | jq -r '.spec.containers[] | @base64')
+
+ for CONTAINER_B64 in $CONTAINERS; do
+ CONTAINER_DATA=$(echo "$CONTAINER_B64" | base64 -d)
+ CONTAINER_NAME=$(echo "$CONTAINER_DATA" | jq -r '.name')
+
+ # Check container status before testing ports
+ CONTAINER_STATUS=$(echo "$POD_INFO" | jq -r --arg name "$CONTAINER_NAME" '.status.containerStatuses[]? | select(.name == $name)')
+
+ if [[ -z "$CONTAINER_STATUS" ]]; then
+ print_warning " Container '$CONTAINER_NAME': Status not found, skipping"
+ continue
+ fi
+
+ # Check if container is ready
+ CONTAINER_READY=$(echo "$CONTAINER_STATUS" | jq -r '.ready')
+ CONTAINER_STATE=$(echo "$CONTAINER_STATUS" | jq -r '
+ if .state.running then "running"
+ elif .state.waiting then "waiting"
+ elif .state.terminated then "terminated"
+ else "unknown"
+ end
+ ')
+
+ # Get declared ports for this container
+ CONTAINER_PORTS=$(echo "$CONTAINER_DATA" | jq -r '.ports[]? | .containerPort' | tr '\n' ' ')
+
+ if [[ -z "$CONTAINER_PORTS" ]]; then
+ print_info " Container '$CONTAINER_NAME': No ports declared"
+ continue
+ fi
+
+ print_info " Container '$CONTAINER_NAME':"
+
+ # If container is not running, explain why we can't test ports
+ if [[ "$CONTAINER_STATE" != "running" ]]; then
+ if [[ "$CONTAINER_STATE" == "waiting" ]]; then
+ WAITING_REASON=$(echo "$CONTAINER_STATUS" | jq -r '.state.waiting.reason // "Unknown"')
+ WAITING_MESSAGE=$(echo "$CONTAINER_STATUS" | jq -r '.state.waiting.message // ""')
+
+ # Check if it's a normal startup state or a problem
+ case "$WAITING_REASON" in
+ ContainerCreating|PodInitializing|Pulling)
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_info " Container is starting ($WAITING_REASON) - skipping port checks"
+ continue
+ ;;
+ CrashLoopBackOff|ImagePullBackOff|ErrImagePull)
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_warning " Cannot test ports - container is in error state: $WAITING_REASON"
+ if [[ -n "$WAITING_MESSAGE" ]]; then
+ print_warning " Message: $WAITING_MESSAGE"
+ fi
+ print_action "Fix container startup issues (check container_crash_detection results)"
+ continue
+ ;;
+ *)
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_warning " Container waiting: $WAITING_REASON - skipping port checks"
+ continue
+ ;;
+ esac
+ elif [[ "$CONTAINER_STATE" == "terminated" ]]; then
+ EXIT_CODE=$(echo "$CONTAINER_STATUS" | jq -r '.state.terminated.exitCode // "N/A"')
+ TERMINATION_REASON=$(echo "$CONTAINER_STATUS" | jq -r '.state.terminated.reason // "Unknown"')
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_warning " Cannot test ports - container terminated (Exit: $EXIT_CODE, Reason: $TERMINATION_REASON)"
+ print_action "Fix container termination (check container_crash_detection results)"
+ continue
+ else
+ print_warning " Container in unknown state - skipping port checks"
+ continue
+ fi
+ fi
+
+ # Container is running - check if it's ready
+ if [[ "$CONTAINER_READY" != "true" ]]; then
+ print_warning " Container is running but not ready - port connectivity may fail"
+ fi
+
+ # Test connectivity to each declared port from agent
+ CONTAINERS_TESTED=$((CONTAINERS_TESTED + 1))
+
+ for PORT in $CONTAINER_PORTS; do
+ # Try nc first, then timeout + /dev/tcp, then curl
+ if command -v nc >/dev/null 2>&1; then
+ timeout 2 nc -z -w 1 "$POD_IP" "$PORT" >/dev/null 2>&1
+ CONNECTIVITY_EXIT_CODE=$?
+ elif command -v timeout >/dev/null 2>&1; then
+ timeout 2 bash -c "cat < /dev/null > /dev/tcp/$POD_IP/$PORT" 2>/dev/null
+ CONNECTIVITY_EXIT_CODE=$?
+ elif command -v curl >/dev/null 2>&1; then
+ curl -s --connect-timeout 2 --max-time 2 "telnet://$POD_IP:$PORT" >/dev/null 2>&1
+ CONNECTIVITY_EXIT_CODE=$?
+ else
+ print_warning " Port $PORT: Cannot test (nc/timeout/curl not available in agent)"
+ continue
+ fi
+
+ if [[ $CONNECTIVITY_EXIT_CODE -eq 0 ]]; then
+ print_success " Port $PORT: ✓ Listening"
+ else
+ HAS_PORT_ISSUES=1
+ print_error " Port $PORT: ✗ Declared but not listening or unreachable"
+ print_action "Check application configuration and ensure it listens on port $PORT"
+ fi
+ done
+ done
+done
+
+echo ""
+if [[ $CONTAINERS_TESTED -eq 0 ]]; then
+ # No containers were tested - all were skipped
+ print_info "All containers skipped - no port checks could be performed"
+ update_check_result --status "skipped" --evidence "{\"tested\":$CONTAINERS_TESTED,\"skipped\":$CONTAINERS_SKIPPED}"
+elif [[ $HAS_PORT_ISSUES -eq 0 ]]; then
+ # Some/all containers were tested and all passed
+ print_success "Port connectivity verified on $CONTAINERS_TESTED container(s)"
+ update_check_result --status "success" --evidence "{\"tested\":$CONTAINERS_TESTED,\"skipped\":$CONTAINERS_SKIPPED}"
+else
+ # Some containers were tested and had issues
+ if [[ $CONTAINERS_SKIPPED -gt 0 ]]; then
+ print_warning "Port issues found ($CONTAINERS_TESTED tested, $CONTAINERS_SKIPPED skipped)"
+ fi
+ update_check_result --status "failed" --evidence "{\"tested\":$CONTAINERS_TESTED,\"skipped\":$CONTAINERS_SKIPPED}"
+fi
diff --git a/k8s/diagnose/scope/health_probe_endpoints b/k8s/diagnose/scope/health_probe_endpoints
new file mode 100755
index 00000000..a7bfd2c6
--- /dev/null
+++ b/k8s/diagnose/scope/health_probe_endpoints
@@ -0,0 +1,401 @@
+#!/bin/bash
+# Check: Health Probe Endpoints
+# Validates that liveness and readiness probe endpoints are configured and responding correctly
+
+# Validate pods exist
+require_pods || return 0
+
+# Read pods from pre-collected data
+PODS=$(jq -r '.items[].metadata.name' "$PODS_FILE" 2>/dev/null | tr '\n' ' ')
+
+HAS_PROBE_ISSUES=0
+HAS_PROBE_WARNINGS=0
+CONTAINERS_TESTED=0
+CONTAINERS_SKIPPED=0
+
+for POD_NAME in $PODS; do
+ # Get pod info from pre-collected data
+ POD_INFO=$(jq --arg name "$POD_NAME" '.items[] | select(.metadata.name == $name)' "$PODS_FILE" 2>/dev/null)
+
+ # Check if pod is running
+ POD_PHASE=$(echo "$POD_INFO" | jq -r '.status.phase')
+ if [[ "$POD_PHASE" != "Running" ]]; then
+ print_warning "Pod $POD_NAME: Not running (phase: $POD_PHASE), skipping probe checks"
+ continue
+ fi
+
+ # Get pod IP
+ POD_IP=$(echo "$POD_INFO" | jq -r '.status.podIP')
+ if [[ -z "$POD_IP" || "$POD_IP" == "null" ]]; then
+ print_warning "Pod $POD_NAME: No IP assigned, skipping probe checks"
+ continue
+ fi
+
+ print_info "Checking pod $POD_NAME:"
+
+ # Get all containers
+ CONTAINERS=$(echo "$POD_INFO" | jq -r '.spec.containers[] | @base64')
+
+ for CONTAINER_B64 in $CONTAINERS; do
+ CONTAINER_DATA=$(echo "$CONTAINER_B64" | base64 -d)
+ CONTAINER_NAME=$(echo "$CONTAINER_DATA" | jq -r '.name')
+
+ print_info " Container '$CONTAINER_NAME':"
+
+ # Check container status before testing probes
+ CONTAINER_STATUS=$(echo "$POD_INFO" | jq -r --arg name "$CONTAINER_NAME" '.status.containerStatuses[]? | select(.name == $name)')
+
+ if [[ -z "$CONTAINER_STATUS" ]]; then
+ print_warning " Container status not found, skipping probe checks"
+ continue
+ fi
+
+ # Check if container is ready
+ CONTAINER_READY=$(echo "$CONTAINER_STATUS" | jq -r '.ready')
+ CONTAINER_STATE=$(echo "$CONTAINER_STATUS" | jq -r '
+ if .state.running then "running"
+ elif .state.waiting then "waiting"
+ elif .state.terminated then "terminated"
+ else "unknown"
+ end
+ ')
+
+ # If container is not running, explain why we can't test probes
+ if [[ "$CONTAINER_STATE" != "running" ]]; then
+ if [[ "$CONTAINER_STATE" == "waiting" ]]; then
+ WAITING_REASON=$(echo "$CONTAINER_STATUS" | jq -r '.state.waiting.reason // "Unknown"')
+ WAITING_MESSAGE=$(echo "$CONTAINER_STATUS" | jq -r '.state.waiting.message // ""')
+
+ # Check if it's a normal startup state or a problem
+ case "$WAITING_REASON" in
+ ContainerCreating|PodInitializing|Pulling)
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_info " Container is starting ($WAITING_REASON) - skipping probe checks"
+ continue
+ ;;
+ CrashLoopBackOff|ImagePullBackOff|ErrImagePull)
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_warning " Cannot test probes - container is in error state: $WAITING_REASON"
+ if [[ -n "$WAITING_MESSAGE" ]]; then
+ print_warning " Message: $WAITING_MESSAGE"
+ fi
+ print_action "Fix container startup issues (check container_crash_detection results)"
+ continue
+ ;;
+ *)
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_warning " Container waiting: $WAITING_REASON - skipping probe checks"
+ continue
+ ;;
+ esac
+ elif [[ "$CONTAINER_STATE" == "terminated" ]]; then
+ EXIT_CODE=$(echo "$CONTAINER_STATUS" | jq -r '.state.terminated.exitCode // "N/A"')
+ TERMINATION_REASON=$(echo "$CONTAINER_STATUS" | jq -r '.state.terminated.reason // "Unknown"')
+ CONTAINERS_SKIPPED=$((CONTAINERS_SKIPPED + 1))
+ print_warning " Cannot test probes - container terminated (Exit: $EXIT_CODE, Reason: $TERMINATION_REASON)"
+ print_action "Fix container termination (check container_crash_detection results)"
+ continue
+ else
+ print_warning " Container in unknown state - skipping probe checks"
+ continue
+ fi
+ fi
+
+ # Container is running - check if it's ready
+ if [[ "$CONTAINER_READY" != "true" ]]; then
+ print_info " Container is running but not ready - probe checks may show why"
+ fi
+
+ # Check if container has any probes configured
+ HAS_READINESS=$(echo "$CONTAINER_DATA" | jq -r '.readinessProbe // empty')
+ HAS_LIVENESS=$(echo "$CONTAINER_DATA" | jq -r '.livenessProbe // empty')
+ HAS_STARTUP=$(echo "$CONTAINER_DATA" | jq -r '.startupProbe // empty')
+
+ if [[ -z "$HAS_READINESS" && -z "$HAS_LIVENESS" && -z "$HAS_STARTUP" ]]; then
+ print_warning " No health probes configured (recommend adding readiness/liveness probes)"
+ continue
+ fi
+
+ # Container has probes and is testable
+ CONTAINERS_TESTED=$((CONTAINERS_TESTED + 1))
+
+ # Track issues for this container to avoid repetitive action messages
+ CONTAINER_HAS_CONNECTION_ISSUES=0
+ CONTAINER_HAS_4XX_ISSUES=0
+ CONTAINER_HAS_5XX_ISSUES=0
+ FAILED_PROBES_LIST=""
+
+ # Check Readiness Probe
+ if [[ -n "$HAS_READINESS" ]]; then
+ PROBE_TYPE=$(echo "$CONTAINER_DATA" | jq -r 'if .readinessProbe.httpGet then "httpGet" elif .readinessProbe.tcpSocket then "tcpSocket" elif .readinessProbe.exec then "exec" else "unknown" end')
+
+ if [[ "$PROBE_TYPE" == "httpGet" ]]; then
+ PROBE_PATH=$(echo "$CONTAINER_DATA" | jq -r '.readinessProbe.httpGet.path')
+ PROBE_PORT=$(echo "$CONTAINER_DATA" | jq -r '.readinessProbe.httpGet.port')
+ PROBE_SCHEME=$(echo "$CONTAINER_DATA" | jq -r '.readinessProbe.httpGet.scheme // "HTTP"')
+ PROBE_URL="${PROBE_SCHEME,,}://$POD_IP:$PROBE_PORT$PROBE_PATH"
+
+ # Try curl first from agent, then wget
+ if command -v curl >/dev/null 2>&1; then
+ if [[ "${PROBE_SCHEME^^}" == "HTTPS" ]]; then
+ PROBE_RESPONSE=$(curl -k -s -o /dev/null -w '%{http_code}' --max-time 2 "$PROBE_URL" 2>&1)
+ else
+ PROBE_RESPONSE=$(curl -s -o /dev/null -w '%{http_code}' --max-time 2 "$PROBE_URL" 2>&1)
+ fi
+ PROBE_EXIT_CODE=$?
+ elif command -v wget >/dev/null 2>&1; then
+ if [[ "${PROBE_SCHEME^^}" == "HTTPS" ]]; then
+ PROBE_RESPONSE=$(wget --no-check-certificate -O /dev/null --timeout=2 "$PROBE_URL" 2>&1)
+ else
+ PROBE_RESPONSE=$(wget -O /dev/null --timeout=2 "$PROBE_URL" 2>&1)
+ fi
+ PROBE_EXIT_CODE=$?
+ # Parse wget output to extract HTTP status or error
+ if [[ $PROBE_EXIT_CODE -eq 0 ]]; then
+ PROBE_RESPONSE="200"
+ else
+ # Extract error from wget output - try multiple patterns
+ ERROR_MSG=$(echo "$PROBE_RESPONSE" | grep -iE "failed:|connection refused|timed? ?out|cannot connect|unable to|network|unreachable" | head -1)
+ if [[ -n "$ERROR_MSG" ]]; then
+ # Shorten the message if too long
+ PROBE_RESPONSE=$(echo "$ERROR_MSG" | cut -c1-80)
+ else
+ # If no specific error found, show exit code
+ PROBE_RESPONSE="wget failed with exit code $PROBE_EXIT_CODE"
+ fi
+ fi
+ else
+ print_warning " Readiness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: Cannot test (curl/wget not available in agent)"
+ continue
+ fi
+
+ if [[ $PROBE_EXIT_CODE -eq 0 && "$PROBE_RESPONSE" =~ ^[2-3][0-9][0-9]$ ]]; then
+ print_success " Readiness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ✓ HTTP $PROBE_RESPONSE"
+ else
+ # Probe failed - check if it's config issue or app issue
+ if [[ "$PROBE_RESPONSE" =~ ^4[0-9][0-9]$ ]]; then
+ # 4xx error: endpoint not found or bad config
+ HAS_PROBE_ISSUES=1
+ CONTAINER_HAS_4XX_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Readiness"
+ print_error " Readiness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ✗ HTTP $PROBE_RESPONSE - Health check endpoint not found"
+ elif [[ "$PROBE_RESPONSE" =~ ^5[0-9][0-9]$ ]]; then
+ # 5xx error: app has internal issues
+ HAS_PROBE_WARNINGS=1
+ CONTAINER_HAS_5XX_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Readiness"
+ print_warning " Readiness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ⚠ HTTP $PROBE_RESPONSE - Application error"
+ else
+ # Connection failed or other error (port not listening, network issue, etc)
+ HAS_PROBE_WARNINGS=1
+ CONTAINER_HAS_CONNECTION_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Readiness"
+ print_warning " Readiness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ⚠ Connection failed (response: $PROBE_RESPONSE, exit code: $PROBE_EXIT_CODE)"
+ fi
+ fi
+ elif [[ "$PROBE_TYPE" == "tcpSocket" ]]; then
+ PROBE_PORT=$(echo "$CONTAINER_DATA" | jq -r '.readinessProbe.tcpSocket.port')
+ print_info " Readiness Probe: TCP Socket on port $PROBE_PORT (tested in port health check)"
+ elif [[ "$PROBE_TYPE" == "exec" ]]; then
+ PROBE_COMMAND=$(echo "$CONTAINER_DATA" | jq -r '.readinessProbe.exec.command | join(" ")')
+ print_info " Readiness Probe: Exec [$PROBE_COMMAND] (cannot test directly)"
+ fi
+ fi
+
+ # Check Liveness Probe
+ if [[ -n "$HAS_LIVENESS" ]]; then
+ PROBE_TYPE=$(echo "$CONTAINER_DATA" | jq -r 'if .livenessProbe.httpGet then "httpGet" elif .livenessProbe.tcpSocket then "tcpSocket" elif .livenessProbe.exec then "exec" else "unknown" end')
+
+ if [[ "$PROBE_TYPE" == "httpGet" ]]; then
+ PROBE_PATH=$(echo "$CONTAINER_DATA" | jq -r '.livenessProbe.httpGet.path')
+ PROBE_PORT=$(echo "$CONTAINER_DATA" | jq -r '.livenessProbe.httpGet.port')
+ PROBE_SCHEME=$(echo "$CONTAINER_DATA" | jq -r '.livenessProbe.httpGet.scheme // "HTTP"')
+ PROBE_URL="${PROBE_SCHEME,,}://$POD_IP:$PROBE_PORT$PROBE_PATH"
+
+ # Try curl first from agent, then wget
+ if command -v curl >/dev/null 2>&1; then
+ if [[ "${PROBE_SCHEME^^}" == "HTTPS" ]]; then
+ PROBE_RESPONSE=$(curl -k -s -o /dev/null -w '%{http_code}' --max-time 2 "$PROBE_URL" 2>&1)
+ else
+ PROBE_RESPONSE=$(curl -s -o /dev/null -w '%{http_code}' --max-time 2 "$PROBE_URL" 2>&1)
+ fi
+ PROBE_EXIT_CODE=$?
+ elif command -v wget >/dev/null 2>&1; then
+ if [[ "${PROBE_SCHEME^^}" == "HTTPS" ]]; then
+ PROBE_RESPONSE=$(wget --no-check-certificate -O /dev/null --timeout=2 "$PROBE_URL" 2>&1)
+ else
+ PROBE_RESPONSE=$(wget -O /dev/null --timeout=2 "$PROBE_URL" 2>&1)
+ fi
+ PROBE_EXIT_CODE=$?
+ # Parse wget output to extract HTTP status or error
+ if [[ $PROBE_EXIT_CODE -eq 0 ]]; then
+ PROBE_RESPONSE="200"
+ else
+ # Extract error from wget output - try multiple patterns
+ ERROR_MSG=$(echo "$PROBE_RESPONSE" | grep -iE "failed:|connection refused|timed? ?out|cannot connect|unable to|network|unreachable" | head -1)
+ if [[ -n "$ERROR_MSG" ]]; then
+ # Shorten the message if too long
+ PROBE_RESPONSE=$(echo "$ERROR_MSG" | cut -c1-80)
+ else
+ # If no specific error found, show exit code
+ PROBE_RESPONSE="wget failed with exit code $PROBE_EXIT_CODE"
+ fi
+ fi
+ else
+ print_warning " Liveness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: Cannot test (curl/wget not available in agent)"
+ continue
+ fi
+
+ if [[ $PROBE_EXIT_CODE -eq 0 && "$PROBE_RESPONSE" =~ ^[2-3][0-9][0-9]$ ]]; then
+ print_success " Liveness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ✓ HTTP $PROBE_RESPONSE"
+ else
+ # Probe failed - check if it's config issue or app issue
+ if [[ "$PROBE_RESPONSE" =~ ^4[0-9][0-9]$ ]]; then
+ # 4xx error: endpoint not found or bad config
+ HAS_PROBE_ISSUES=1
+ CONTAINER_HAS_4XX_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Liveness"
+ print_error " Liveness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ✗ HTTP $PROBE_RESPONSE - Health check endpoint not found"
+ elif [[ "$PROBE_RESPONSE" =~ ^5[0-9][0-9]$ ]]; then
+ # 5xx error: app has internal issues
+ HAS_PROBE_WARNINGS=1
+ CONTAINER_HAS_5XX_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Liveness"
+ print_warning " Liveness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ⚠ HTTP $PROBE_RESPONSE - Application error"
+ else
+ # Connection failed or other error (port not listening, network issue, etc)
+ HAS_PROBE_WARNINGS=1
+ CONTAINER_HAS_CONNECTION_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Liveness"
+ print_warning " Liveness Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ⚠ Connection failed (response: $PROBE_RESPONSE, exit code: $PROBE_EXIT_CODE)"
+ fi
+ fi
+ elif [[ "$PROBE_TYPE" == "tcpSocket" ]]; then
+ PROBE_PORT=$(echo "$CONTAINER_DATA" | jq -r '.livenessProbe.tcpSocket.port')
+ print_info " Liveness Probe: TCP Socket on port $PROBE_PORT (tested in port health check)"
+ elif [[ "$PROBE_TYPE" == "exec" ]]; then
+ PROBE_COMMAND=$(echo "$CONTAINER_DATA" | jq -r '.livenessProbe.exec.command | join(" ")')
+ print_info " Liveness Probe: Exec [$PROBE_COMMAND] (cannot test directly)"
+ fi
+ fi
+
+ # Check Startup Probe
+ if [[ -n "$HAS_STARTUP" ]]; then
+ PROBE_TYPE=$(echo "$CONTAINER_DATA" | jq -r 'if .startupProbe.httpGet then "httpGet" elif .startupProbe.tcpSocket then "tcpSocket" elif .startupProbe.exec then "exec" else "unknown" end')
+
+ if [[ "$PROBE_TYPE" == "httpGet" ]]; then
+ PROBE_PATH=$(echo "$CONTAINER_DATA" | jq -r '.startupProbe.httpGet.path')
+ PROBE_PORT=$(echo "$CONTAINER_DATA" | jq -r '.startupProbe.httpGet.port')
+ PROBE_SCHEME=$(echo "$CONTAINER_DATA" | jq -r '.startupProbe.httpGet.scheme // "HTTP"')
+ PROBE_URL="${PROBE_SCHEME,,}://$POD_IP:$PROBE_PORT$PROBE_PATH"
+
+ # Try curl first from agent, then wget
+ if command -v curl >/dev/null 2>&1; then
+ if [[ "${PROBE_SCHEME^^}" == "HTTPS" ]]; then
+ PROBE_RESPONSE=$(curl -k -s -o /dev/null -w '%{http_code}' --max-time 2 "$PROBE_URL" 2>&1)
+ else
+ PROBE_RESPONSE=$(curl -s -o /dev/null -w '%{http_code}' --max-time 2 "$PROBE_URL" 2>&1)
+ fi
+ PROBE_EXIT_CODE=$?
+ elif command -v wget >/dev/null 2>&1; then
+ if [[ "${PROBE_SCHEME^^}" == "HTTPS" ]]; then
+ PROBE_RESPONSE=$(wget --no-check-certificate -O /dev/null --timeout=2 "$PROBE_URL" 2>&1)
+ else
+ PROBE_RESPONSE=$(wget -O /dev/null --timeout=2 "$PROBE_URL" 2>&1)
+ fi
+ PROBE_EXIT_CODE=$?
+ # Parse wget output to extract HTTP status or error
+ if [[ $PROBE_EXIT_CODE -eq 0 ]]; then
+ PROBE_RESPONSE="200"
+ else
+ # Extract error from wget output - try multiple patterns
+ ERROR_MSG=$(echo "$PROBE_RESPONSE" | grep -iE "failed:|connection refused|timed? ?out|cannot connect|unable to|network|unreachable" | head -1)
+ if [[ -n "$ERROR_MSG" ]]; then
+ # Shorten the message if too long
+ PROBE_RESPONSE=$(echo "$ERROR_MSG" | cut -c1-80)
+ else
+ # If no specific error found, show exit code
+ PROBE_RESPONSE="wget failed with exit code $PROBE_EXIT_CODE"
+ fi
+ fi
+ else
+ print_warning " Startup Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: Cannot test (curl/wget not available in agent)"
+ continue
+ fi
+
+ if [[ $PROBE_EXIT_CODE -eq 0 && "$PROBE_RESPONSE" =~ ^[2-3][0-9][0-9]$ ]]; then
+ print_success " Startup Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ✓ HTTP $PROBE_RESPONSE"
+ else
+ # Probe failed - check if it's config issue or app issue
+ if [[ "$PROBE_RESPONSE" =~ ^4[0-9][0-9]$ ]]; then
+ # 4xx error: endpoint not found or bad config
+ HAS_PROBE_ISSUES=1
+ CONTAINER_HAS_4XX_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Startup"
+ print_error " Startup Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ✗ HTTP $PROBE_RESPONSE - Health check endpoint not found"
+ elif [[ "$PROBE_RESPONSE" =~ ^5[0-9][0-9]$ ]]; then
+ # 5xx error: app has internal issues
+ HAS_PROBE_WARNINGS=1
+ CONTAINER_HAS_5XX_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Startup"
+ print_warning " Startup Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ⚠ HTTP $PROBE_RESPONSE - Application error"
+ else
+ # Connection failed or other error (port not listening, network issue, etc)
+ HAS_PROBE_WARNINGS=1
+ CONTAINER_HAS_CONNECTION_ISSUES=1
+ FAILED_PROBES_LIST="$FAILED_PROBES_LIST Startup"
+ print_warning " Startup Probe on $PROBE_SCHEME://$PROBE_PORT$PROBE_PATH: ⚠ Connection failed (response: $PROBE_RESPONSE, exit code: $PROBE_EXIT_CODE)"
+ fi
+ fi
+ elif [[ "$PROBE_TYPE" == "tcpSocket" ]]; then
+ PROBE_PORT=$(echo "$CONTAINER_DATA" | jq -r '.startupProbe.tcpSocket.port')
+ print_info " Startup Probe: TCP Socket on port $PROBE_PORT"
+ elif [[ "$PROBE_TYPE" == "exec" ]]; then
+ PROBE_COMMAND=$(echo "$CONTAINER_DATA" | jq -r '.startupProbe.exec.command | join(" ")')
+ print_info " Startup Probe: Exec [$PROBE_COMMAND] (cannot test directly)"
+ fi
+ fi
+
+ # Print consolidated action message for this container (avoid repetition)
+ if [[ -n "$FAILED_PROBES_LIST" ]]; then
+ echo ""
+ # Trim leading space from the list
+ FAILED_PROBES_LIST=$(echo "$FAILED_PROBES_LIST" | xargs)
+
+ if [[ $CONTAINER_HAS_CONNECTION_ISSUES -eq 1 ]]; then
+ print_action "For $FAILED_PROBES_LIST probe(s): Verify port is listening and accessible from within cluster"
+ fi
+
+ if [[ $CONTAINER_HAS_4XX_ISSUES -eq 1 ]]; then
+ print_action "For $FAILED_PROBES_LIST probe(s): Update probe path or implement the endpoint in application"
+ fi
+
+ if [[ $CONTAINER_HAS_5XX_ISSUES -eq 1 ]]; then
+ print_action "For $FAILED_PROBES_LIST probe(s): Check application logs and fix internal errors or dependencies"
+ fi
+ fi
+ done
+done
+
+echo ""
+if [[ $CONTAINERS_TESTED -eq 0 ]]; then
+ # No containers were tested - all were skipped
+ print_info "All containers skipped - no probe checks could be performed"
+ update_check_result --status "skipped" --evidence "{\"tested\":$CONTAINERS_TESTED,\"skipped\":$CONTAINERS_SKIPPED}"
+elif [[ $HAS_PROBE_ISSUES -gt 0 ]]; then
+ # Some containers were tested and had issues
+ if [[ $CONTAINERS_SKIPPED -gt 0 ]]; then
+ print_warning "Probe issues found ($CONTAINERS_TESTED tested, $CONTAINERS_SKIPPED skipped)"
+ fi
+ update_check_result --status "failed" --evidence "{\"tested\":$CONTAINERS_TESTED,\"skipped\":$CONTAINERS_SKIPPED}"
+elif [[ $HAS_PROBE_WARNINGS -gt 0 ]]; then
+ # Some containers were tested and had warnings
+ if [[ $CONTAINERS_SKIPPED -gt 0 ]]; then
+ print_info "Probe warnings found ($CONTAINERS_TESTED tested, $CONTAINERS_SKIPPED skipped)"
+ fi
+ update_check_result --status "warning" --evidence "{\"tested\":$CONTAINERS_TESTED,\"skipped\":$CONTAINERS_SKIPPED}"
+else
+ # All tested containers passed
+ print_success "Health probes verified on $CONTAINERS_TESTED container(s)"
+ update_check_result --status "success" --evidence "{\"tested\":$CONTAINERS_TESTED,\"skipped\":$CONTAINERS_SKIPPED}"
+fi
diff --git a/k8s/diagnose/scope/image_pull_status b/k8s/diagnose/scope/image_pull_status
index 7025c55c..31e9d340 100644
--- a/k8s/diagnose/scope/image_pull_status
+++ b/k8s/diagnose/scope/image_pull_status
@@ -26,10 +26,13 @@ for POD_NAME in $PODS; do
print_warning " Image: $IMAGE"
print_warning " Reason: $MESSAGE"
done
+ print_action "Verify image exists and imagePullSecrets are configured for private registries"
fi
done
if [[ $HAS_ERRORS -eq 0 ]]; then
+ POD_COUNT=$(echo "$PODS" | wc -w)
+ print_success "All $POD_COUNT pod(s) have images pulled successfully"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/scope/memory_limits_check b/k8s/diagnose/scope/memory_limits_check
index 7d129f24..90e280b8 100644
--- a/k8s/diagnose/scope/memory_limits_check
+++ b/k8s/diagnose/scope/memory_limits_check
@@ -27,12 +27,14 @@ for POD_NAME in $PODS; do
print_warning " Container: $CONTAINER"
print_warning " Memory Limit: $MEMORY_LIMIT"
print_warning " Memory Request: $MEMORY_REQUEST"
- print_info " Action: Increase memory limits or optimize application memory usage"
+ print_action "Increase memory limits or optimize application memory usage"
done
fi
done
if [[ $HAS_OOM -eq 0 ]]; then
+ POD_COUNT=$(echo "$PODS" | wc -w)
+ print_success "No OOMKilled containers detected in $POD_COUNT pod(s)"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/scope/pod_existence b/k8s/diagnose/scope/pod_existence
new file mode 100644
index 00000000..b9919a9e
--- /dev/null
+++ b/k8s/diagnose/scope/pod_existence
@@ -0,0 +1,16 @@
+#!/bin/bash
+# Check: Pod Existence
+# Verifies that pods resources exist in the namespace
+
+PODS=$(jq -r '.items[].metadata.name' "$PODS_FILE" 2>/dev/null | tr '\n' ' ')
+
+if [[ -z "$PODS" ]]; then
+ print_error "No pods found with labels $LABEL_SELECTOR in namespace $NAMESPACE"
+ print_action "Check deployment status and verify label selectors match"
+ update_check_result --status "failed" --evidence "{}"
+ return 1
+fi
+
+PODS_COUNT=$(echo "$PODS" | wc -w)
+print_success "Found $PODS_COUNT pod(s): $PODS"
+update_check_result --status "success" --evidence "{}"
diff --git a/k8s/diagnose/scope/pod_readiness b/k8s/diagnose/scope/pod_readiness
index 01dc43f6..eecdcb32 100644
--- a/k8s/diagnose/scope/pod_readiness
+++ b/k8s/diagnose/scope/pod_readiness
@@ -8,40 +8,165 @@ require_pods || return 0
# Read pods from pre-collected data
PODS=$(jq -r '.items[].metadata.name' "$PODS_FILE" 2>/dev/null | tr '\n' ' ')
-ALL_READY=1
+# Counters for summary
+TOTAL_PODS=0
+READY_PODS=0
+SUCCEEDED_PODS=0
+NOT_READY_PODS=0
+TERMINATING_PODS=0
+STARTING_PODS=0
+
+# Deployment state detection
+HAS_TERMINATING_PODS=0
+HAS_STARTING_PODS=0
for POD_NAME in $PODS; do
+ TOTAL_PODS=$((TOTAL_PODS + 1))
+
# Get pod info from pre-collected data
POD_INFO=$(jq --arg name "$POD_NAME" '.items[] | select(.metadata.name == $name)' "$PODS_FILE" 2>/dev/null)
POD_PHASE=$(echo "$POD_INFO" | jq -r '.status.phase')
POD_READY=$(echo "$POD_INFO" | jq -r '.status.conditions[] | select(.type=="Ready") | .status')
+ # Check if pod is terminating
+ DELETION_TIMESTAMP=$(echo "$POD_INFO" | jq -r '.metadata.deletionTimestamp // empty')
+ if [[ -n "$DELETION_TIMESTAMP" ]]; then
+ TERMINATING_PODS=$((TERMINATING_PODS + 1))
+ HAS_TERMINATING_PODS=1
+ print_info "Pod $POD_NAME: Terminating (rollout in progress)"
+ continue
+ fi
+
if [[ "$POD_PHASE" == "Running" && "$POD_READY" == "True" ]]; then
+ READY_PODS=$((READY_PODS + 1))
print_success "Pod $POD_NAME: Running and Ready"
elif [[ "$POD_PHASE" == "Succeeded" ]]; then
+ SUCCEEDED_PODS=$((SUCCEEDED_PODS + 1))
print_success "Pod $POD_NAME: Completed successfully"
else
- ALL_READY=0
- print_warning "Pod $POD_NAME: Phase=$POD_PHASE, Ready=$POD_READY"
+ NOT_READY_PODS=$((NOT_READY_PODS + 1))
+
+ # Detect if pod is in normal startup state and collect reasons
+ IS_STARTING=0
+ STARTUP_INFO=""
+
+ # Check if pod is in Pending phase (normal during startup)
+ if [[ "$POD_PHASE" == "Pending" ]]; then
+ IS_STARTING=1
+ fi
+
+ # Check init containers first
+ INIT_CONTAINER_INFO=$(echo "$POD_INFO" | jq -r '
+ .status.initContainerStatuses[]? |
+ select(.state.waiting or .state.running) |
+ if .state.running then
+ "\(.name): Running"
+ else
+ "\(.name): \(.state.waiting.reason)"
+ end
+ ' 2>/dev/null)
+
+ if [[ -n "$INIT_CONTAINER_INFO" ]]; then
+ IS_STARTING=1
+ STARTUP_INFO="Init: $(echo "$INIT_CONTAINER_INFO" | paste -sd ',' - | sed 's/,/, /g')"
+ fi
+
+ # Check for normal container startup reasons with details
+ CONTAINER_STARTUP_INFO=$(echo "$POD_INFO" | jq -r '
+ .status.containerStatuses[]? |
+ select(.state.waiting) |
+ "\(.name): \(.state.waiting.reason)"
+ ' 2>/dev/null)
- # Check readiness probe failures
- READINESS_FAILURES=$(kubectl get events -n "$NAMESPACE" --field-selector involvedObject.name="$POD_NAME" 2>/dev/null | grep -i "readiness probe failed" | tail -n 1)
+ if [[ -n "$CONTAINER_STARTUP_INFO" ]]; then
+ # Check if any are normal startup reasons
+ while IFS= read -r CONTAINER_LINE; do
+ REASON=$(echo "$CONTAINER_LINE" | cut -d':' -f2 | xargs)
+ case "$REASON" in
+ ContainerCreating|PodInitializing|Pulling|ErrImagePull|ImagePullBackOff)
+ IS_STARTING=1
+ ;;
+ esac
+ done <<< "$CONTAINER_STARTUP_INFO"
- if [[ -n "$READINESS_FAILURES" ]]; then
- print_warning " Readiness probe is failing"
+ CONTAINER_INFO_FORMATTED=$(echo "$CONTAINER_STARTUP_INFO" | paste -sd ',' - | sed 's/,/, /g')
+ if [[ -n "$STARTUP_INFO" ]]; then
+ STARTUP_INFO="$STARTUP_INFO | Containers: $CONTAINER_INFO_FORMATTED"
+ else
+ STARTUP_INFO="Containers: $CONTAINER_INFO_FORMATTED"
+ fi
fi
- # Check liveness probe failures
- LIVENESS_FAILURES=$(kubectl get events -n "$NAMESPACE" --field-selector involvedObject.name="$POD_NAME" 2>/dev/null | grep -i "liveness probe failed" | tail -n 1)
+ if [[ $IS_STARTING -eq 1 ]]; then
+ STARTING_PODS=$((STARTING_PODS + 1))
+ HAS_STARTING_PODS=1
+ if [[ -n "$STARTUP_INFO" ]]; then
+ print_info "Pod $POD_NAME: Starting up - $STARTUP_INFO"
+ else
+ print_info "Pod $POD_NAME: Phase=$POD_PHASE (starting up)"
+ fi
+ else
+ print_warning "Pod $POD_NAME: Phase=$POD_PHASE, Ready=$POD_READY"
+ fi
+
+ # Get detailed condition information
+ READY_CONDITION=$(echo "$POD_INFO" | jq -r '.status.conditions[] | select(.type=="Ready")')
+ READY_REASON=$(echo "$READY_CONDITION" | jq -r '.reason // "Unknown"')
+ READY_MESSAGE=$(echo "$READY_CONDITION" | jq -r '.message // "No message available"')
+
+ if [[ -n "$READY_REASON" && "$READY_REASON" != "Unknown" ]]; then
+ print_warning " Reason: $READY_REASON"
+ fi
+
+ if [[ -n "$READY_MESSAGE" && "$READY_MESSAGE" != "No message available" ]]; then
+ print_warning " Message: $READY_MESSAGE"
+ fi
+
+ # Check container statuses
+ CONTAINER_STATUSES=$(echo "$POD_INFO" | jq -r '.status.containerStatuses[]? | "\(.name): Ready=\(.ready), Restarts=\(.restartCount)"' 2>/dev/null)
+
+ if [[ -n "$CONTAINER_STATUSES" ]]; then
+ print_warning " Container Status:"
+ while IFS= read -r line; do
+ print_warning " $line"
+ done <<< "$CONTAINER_STATUSES"
+ fi
+
+ # Check for waiting containers with reasons
+ WAITING_CONTAINERS=$(echo "$POD_INFO" | jq -r '.status.containerStatuses[]? | select(.state.waiting) | " \(.name): \(.state.waiting.reason) - \(.state.waiting.message // "No details")"' 2>/dev/null)
+
+ if [[ -n "$WAITING_CONTAINERS" ]]; then
+ print_warning " Waiting Containers:"
+ echo "$WAITING_CONTAINERS" | while IFS= read -r line; do
+ print_warning "$line"
+ done
+ fi
- if [[ -n "$LIVENESS_FAILURES" ]]; then
- print_warning " Liveness probe is failing"
+ # Only show action if not in normal startup state
+ if [[ $IS_STARTING -eq 0 ]]; then
+ print_action "Check application health endpoint and ensure dependencies are available"
fi
fi
done
-if [[ $ALL_READY -eq 1 ]]; then
- update_check_result --status "success" --evidence "{}"
+# Print summary
+echo ""
+if [[ $TOTAL_PODS -eq 0 ]]; then
+ print_warning "No pods found"
+ update_check_result --status "failed" --evidence "{\"ready\":0,\"total\":0}"
+elif [[ $READY_PODS -eq $TOTAL_PODS ]] || [[ $((READY_PODS + SUCCEEDED_PODS)) -eq $TOTAL_PODS ]]; then
+ print_success "All pods ready: $READY_PODS/$TOTAL_PODS running and ready"
+ update_check_result --status "success" --evidence "{\"ready\":$READY_PODS,\"total\":$TOTAL_PODS}"
+elif [[ $HAS_TERMINATING_PODS -eq 1 ]]; then
+ # Pods are terminating - deployment/rollout in progress
+ print_info "Deployment in progress: $READY_PODS/$TOTAL_PODS pods ready (rollout in progress with terminating pods)"
+ update_check_result --status "warning" --evidence "{\"ready\":$READY_PODS,\"total\":$TOTAL_PODS,\"terminating\":$TERMINATING_PODS,\"deployment_in_progress\":true}"
+elif [[ $HAS_STARTING_PODS -eq 1 ]]; then
+ # Pods are starting up normally - new deployment in progress
+ print_info "Deployment in progress: $READY_PODS/$TOTAL_PODS pods ready, $STARTING_PODS starting up"
+ update_check_result --status "warning" --evidence "{\"ready\":$READY_PODS,\"total\":$TOTAL_PODS,\"starting\":$STARTING_PODS,\"not_ready\":$NOT_READY_PODS,\"deployment_in_progress\":true}"
else
- update_check_result --status "failed" --evidence "{}"
+ # Some pods not ready and no clear sign of deployment in progress - this is a problem
+ print_error "Pods not ready: $READY_PODS/$TOTAL_PODS ready (pods have issues)"
+ update_check_result --status "failed" --evidence "{\"ready\":$READY_PODS,\"total\":$TOTAL_PODS}"
fi
\ No newline at end of file
diff --git a/k8s/diagnose/scope/resource_availability b/k8s/diagnose/scope/resource_availability
index 6b13d9af..53e99f97 100644
--- a/k8s/diagnose/scope/resource_availability
+++ b/k8s/diagnose/scope/resource_availability
@@ -30,11 +30,15 @@ for POD_NAME in $PODS; do
if echo "$UNSCHEDULABLE" | grep -qi "insufficient memory"; then
print_warning " Issue: Insufficient memory in cluster"
fi
+
+ print_action "Reduce resource requests or add more nodes to cluster"
fi
fi
done
if [[ $HAS_ISSUES -eq 0 ]]; then
+ POD_COUNT=$(echo "$PODS" | wc -w)
+ print_success "All $POD_COUNT pod(s) successfully scheduled with sufficient resources"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/scope/storage_mounting b/k8s/diagnose/scope/storage_mounting
index c2497c30..09e17d16 100644
--- a/k8s/diagnose/scope/storage_mounting
+++ b/k8s/diagnose/scope/storage_mounting
@@ -30,7 +30,7 @@ for POD_NAME in $PODS; do
print_warning " Storage Class: $STORAGE_CLASS"
print_warning " Requested Size: $REQUESTED_SIZE"
- print_info " Action: Check if StorageClass exists and has available capacity"
+ print_action "Check if StorageClass exists and has available capacity"
elif [[ "$PVC_STATUS" == "Bound" ]]; then
print_success "Pod $POD_NAME: PVC $PVC is Bound"
else
@@ -48,6 +48,8 @@ for POD_NAME in $PODS; do
done
if [[ $HAS_STORAGE_ISSUES -eq 0 ]]; then
+ POD_COUNT=$(echo "$PODS" | wc -w)
+ print_success "All volumes mounted successfully for $POD_COUNT pod(s)"
update_check_result --status "success" --evidence "{}"
else
update_check_result --status "failed" --evidence "{}"
diff --git a/k8s/diagnose/scope/workflow.yml b/k8s/diagnose/scope/workflow.yml
index f1952f40..b21775ec 100644
--- a/k8s/diagnose/scope/workflow.yml
+++ b/k8s/diagnose/scope/workflow.yml
@@ -1,31 +1,41 @@
steps:
- - name: image_pull_status
+ - name: Pod Existence
+ description: Verifies that pod resources exist in the namespace
+ category: Scope
+ type: script
+ file: "$SERVICE_PATH/diagnose/scope/pod_existence"
+ - name: Image Pull Status
description: Verifies container images can be pulled from registry
- category: scope
+ category: Scope
type: script
file: "$SERVICE_PATH/diagnose/scope/image_pull_status"
- - name: container_crash_detection
+ - name: Container Crash Detection
description: Checks if containers are crashing on startup
- category: scope
+ category: Scope
type: script
file: "$SERVICE_PATH/diagnose/scope/container_crash_detection"
- - name: resource_availability
+ - name: Resource Availability
description: Validates pod can be scheduled with requested resources
- category: scope
+ category: Scope
type: script
file: "$SERVICE_PATH/diagnose/scope/resource_availability"
- - name: memory_limits_check
+ - name: Memory Limits
description: Checks for out-of-memory container terminations
- category: scope
+ category: Scope
type: script
file: "$SERVICE_PATH/diagnose/scope/memory_limits_check"
- - name: storage_mounting
+ - name: Storage Mounting
description: Verifies persistent volumes are bound and mounted
- category: scope
+ category: Scope
type: script
file: "$SERVICE_PATH/diagnose/scope/storage_mounting"
- - name: pod_readiness
- description: Confirms pod is running and ready to serve traffic
- category: scope
+ - name: Container Port Health
+ description: Validates containers are listening on declared ports
+ category: Scope
+ type: script
+ file: "$SERVICE_PATH/diagnose/scope/container_port_health"
+ - name: Health Probe Endpoints
+ description: Validates health probe endpoints are configured and responding
+ category: Scope
type: script
- file: "$SERVICE_PATH/diagnose/scope/pod_readiness"
\ No newline at end of file
+ file: "$SERVICE_PATH/diagnose/scope/health_probe_endpoints"
\ No newline at end of file
diff --git a/k8s/diagnose/service/service_endpoints b/k8s/diagnose/service/service_endpoints
index c3c983f3..a6fe12c5 100644
--- a/k8s/diagnose/service/service_endpoints
+++ b/k8s/diagnose/service/service_endpoints
@@ -20,12 +20,17 @@ for SERVICE_NAME in $SERVICES; do
continue
fi
- # Check if endpoints has any addresses
- ADDRESSES=$(echo "$ENDPOINTS_INFO" | jq -r '.subsets[]?.addresses[]?.ip' 2>/dev/null)
- NOT_READY_ADDRESSES=$(echo "$ENDPOINTS_INFO" | jq -r '.subsets[]?.notReadyAddresses[]?.ip' 2>/dev/null)
+ # Check if endpoints has any addresses with detailed info
+ # Get ports for this subset
+ PORTS=$(echo "$ENDPOINTS_INFO" | jq -r '.subsets[0]?.ports[]? | "\(.port):\(.name // "unnamed")"' 2>/dev/null | head -1)
+ PORT_NUMBER=$(echo "$PORTS" | cut -d':' -f1)
+ PORT_NAME=$(echo "$PORTS" | cut -d':' -f2)
- READY_COUNT=$(echo "$ADDRESSES" | grep -c '^' 2>/dev/null || echo 0)
- NOT_READY_COUNT=$(echo "$NOT_READY_ADDRESSES" | grep -c '^' 2>/dev/null || echo 0)
+ READY_ENDPOINTS=$(echo "$ENDPOINTS_INFO" | jq -r '.subsets[]?.addresses[]? | "\(.targetRef.name // "unknown"):\(.ip)"' 2>/dev/null)
+ NOT_READY_ENDPOINTS=$(echo "$ENDPOINTS_INFO" | jq -r '.subsets[]?.notReadyAddresses[]? | "\(.targetRef.name // "unknown"):\(.ip)"' 2>/dev/null)
+
+ READY_COUNT=$(echo "$READY_ENDPOINTS" | grep -c '^' 2>/dev/null || echo 0)
+ NOT_READY_COUNT=$(echo "$NOT_READY_ENDPOINTS" | grep -c '^' 2>/dev/null || echo 0)
if [[ $READY_COUNT -eq 0 ]]; then
HAS_ISSUES=1
@@ -33,16 +38,48 @@ for SERVICE_NAME in $SERVICES; do
if [[ $NOT_READY_COUNT -gt 0 ]]; then
print_warning " Not ready endpoints: $NOT_READY_COUNT"
- print_info " Action: Check pod readiness probes and pod status"
+ # Show details of not ready endpoints
+ echo "$NOT_READY_ENDPOINTS" | while IFS=':' read -r POD_NAME IP; do
+ if [[ -n "$IP" ]]; then
+ if [[ -n "$PORT_NUMBER" ]]; then
+ print_warning " - $POD_NAME -> $IP:$PORT_NUMBER"
+ else
+ print_warning " - $POD_NAME -> $IP"
+ fi
+ fi
+ done
+ print_action "Check pod readiness probes and pod status"
else
print_warning " No endpoints at all"
- print_info " Action: Verify service selector matches pod labels"
+ print_action "Verify service selector matches pod labels"
fi
else
print_success "Service $SERVICE_NAME: $READY_COUNT ready endpoint(s)"
+ # Show details of ready endpoints
+ echo "$READY_ENDPOINTS" | while IFS=':' read -r POD_NAME IP; do
+ if [[ -n "$IP" ]]; then
+ if [[ -n "$PORT_NUMBER" ]]; then
+ print_success " - $POD_NAME -> $IP:$PORT_NUMBER"
+ else
+ print_success " - $POD_NAME -> $IP"
+ fi
+ fi
+ done
+
if [[ $NOT_READY_COUNT -gt 0 ]]; then
print_warning " Also has $NOT_READY_COUNT not ready endpoint(s)"
+ # Show details of not ready endpoints
+ echo "$NOT_READY_ENDPOINTS" | while IFS=':' read -r POD_NAME IP; do
+ if [[ -n "$IP" ]]; then
+ if [[ -n "$PORT_NUMBER" ]]; then
+ print_warning " - $POD_NAME -> $IP:$PORT_NUMBER"
+ else
+ print_warning " - $POD_NAME -> $IP"
+ fi
+ fi
+ done
+ print_action "Check pod readiness probes and pod status"
fi
fi
done
diff --git a/k8s/diagnose/service/service_existence b/k8s/diagnose/service/service_existence
index cab93dc4..2ee8783f 100644
--- a/k8s/diagnose/service/service_existence
+++ b/k8s/diagnose/service/service_existence
@@ -2,12 +2,15 @@
# Check: Service Existence
# Verifies that service resources exist in the namespace
-# Validate services exist
-require_services || return 0
-
-# Read services from pre-collected data
SERVICES=$(jq -r '.items[].metadata.name' "$SERVICES_FILE" 2>/dev/null | tr '\n' ' ')
+if [[ -z "$SERVICES" ]]; then
+ print_error "No services found with labels $LABEL_SELECTOR in namespace $NAMESPACE"
+ print_action "Create service resource or verify label selectors"
+ update_check_result --status "failed" --evidence "{}"
+ return 1
+fi
+
SERVICE_COUNT=$(echo "$SERVICES" | wc -w)
print_success "Found $SERVICE_COUNT service(s): $SERVICES"
update_check_result --status "success" --evidence "{}"
diff --git a/k8s/diagnose/service/service_port_configuration b/k8s/diagnose/service/service_port_configuration
index 5881c602..79baa675 100644
--- a/k8s/diagnose/service/service_port_configuration
+++ b/k8s/diagnose/service/service_port_configuration
@@ -24,7 +24,7 @@ for SERVICE_NAME in $SERVICES; do
fi
# Get service selector to find pods
- SERVICE_SELECTORS=$(echo "$SERVICE_INFO" | jq -r '.spec.selector | to_entries | map("\(.key)=\(.value)") | join(",")')
+ SERVICE_SELECTORS=$(echo "$SERVICE_INFO" | jq -c '.spec.selector')
if [[ -z "$SERVICE_SELECTORS" || "$SERVICE_SELECTORS" == "null" ]]; then
print_warning "Service $SERVICE_NAME: No selector, skipping port validation"
@@ -32,7 +32,16 @@ for SERVICE_NAME in $SERVICES; do
fi
# Find pods from pre-collected data that match service selectors
- PODS=$(jq -r --arg selectors "$SERVICE_SELECTORS" '.items[] | select(.metadata.labels | to_entries | map("\(.key)=\(.value)") | join(",") | contains($selectors)) | .metadata.name' "$PODS_FILE" 2>/dev/null | tr '\n' ' ')
+ PODS=$(jq -r --argjson selectors "$SERVICE_SELECTORS" '
+ .items[] |
+ . as $pod |
+ select(
+ $selectors | to_entries | all(.key as $k | .value as $v |
+ $pod.metadata.labels[$k] == $v
+ )
+ ) |
+ .metadata.name
+ ' "$PODS_FILE" 2>/dev/null | tr '\n' ' ')
if [[ -z "$PODS" ]]; then
print_warning "Service $SERVICE_NAME: No pods found to validate ports"
@@ -43,34 +52,71 @@ for SERVICE_NAME in $SERVICES; do
FIRST_POD=$(echo "$PODS" | awk '{print $1}')
POD_INFO=$(jq --arg name "$FIRST_POD" '.items[] | select(.metadata.name == $name)' "$PODS_FILE" 2>/dev/null)
- # Get container ports
- CONTAINER_PORTS=$(echo "$POD_INFO" | jq -r '.spec.containers[].ports[]?.containerPort' 2>/dev/null | sort -u)
-
print_info "Service $SERVICE_NAME port configuration:"
- echo "$SERVICE_PORTS" | while IFS=':' read -r SERVICE_PORT TARGET_PORT PORT_NAME; do
+ # Validate configuration and test connectivity
+ # Use process substitution to avoid subshell and preserve HAS_PORT_ISSUES updates
+ while IFS=':' read -r SERVICE_PORT TARGET_PORT PORT_NAME; do
+ ACTUAL_TARGET_PORT="$TARGET_PORT"
+ CONTAINER_NAME=""
+
# Check if targetPort is numeric or named
if [[ "$TARGET_PORT" =~ ^[0-9]+$ ]]; then
- # Numeric targetPort - check if it exists in container ports
- if echo "$CONTAINER_PORTS" | grep -q "^${TARGET_PORT}$"; then
- print_success " Port $SERVICE_PORT -> $TARGET_PORT ($PORT_NAME): OK"
+ # Numeric targetPort - find which container has this port
+ CONTAINER_INFO=$(echo "$POD_INFO" | jq -r --arg port "$TARGET_PORT" '
+ .spec.containers[] |
+ select(.ports[]?.containerPort == ($port | tonumber)) |
+ "\(.name):\(.ports[] | select(.containerPort == ($port | tonumber)) | .containerPort)"
+ ' | head -1)
+
+ if [[ -n "$CONTAINER_INFO" ]]; then
+ CONTAINER_NAME=$(echo "$CONTAINER_INFO" | cut -d':' -f1)
+ print_success " Port $SERVICE_PORT -> $TARGET_PORT ($PORT_NAME): Configuration OK [container: $CONTAINER_NAME]"
else
HAS_PORT_ISSUES=1
+ # Show available ports per container
+ AVAILABLE_PORTS=$(echo "$POD_INFO" | jq -r '.spec.containers[] | "\(.name): \([.ports[]?.containerPort] | join(","))"' | tr '\n' '; ')
print_error " Port $SERVICE_PORT -> $TARGET_PORT ($PORT_NAME): Container port $TARGET_PORT not found"
- print_warning " Available container ports: $(echo $CONTAINER_PORTS | tr '\n' ' ')"
+ print_warning " Available ports by container: $AVAILABLE_PORTS"
+ print_action "Update service targetPort to match container port or fix container port"
+ continue
fi
else
- # Named port - check if it exists in container port names
- NAMED_PORT_VALUE=$(echo "$POD_INFO" | jq -r ".spec.containers[].ports[] | select(.name==\"$TARGET_PORT\") | .containerPort" 2>/dev/null)
-
- if [[ -n "$NAMED_PORT_VALUE" ]]; then
- print_success " Port $SERVICE_PORT -> $TARGET_PORT ($PORT_NAME): Resolves to $NAMED_PORT_VALUE"
+ # Named port - find which container has this named port
+ CONTAINER_INFO=$(echo "$POD_INFO" | jq -r --arg portname "$TARGET_PORT" '
+ .spec.containers[] |
+ select(.ports[]? | select(.name == $portname)) |
+ "\(.name):\(.ports[] | select(.name == $portname) | .containerPort)"
+ ' | head -1)
+
+ if [[ -n "$CONTAINER_INFO" ]]; then
+ CONTAINER_NAME=$(echo "$CONTAINER_INFO" | cut -d':' -f1)
+ ACTUAL_TARGET_PORT=$(echo "$CONTAINER_INFO" | cut -d':' -f2)
+ print_success " Port $SERVICE_PORT -> $TARGET_PORT ($PORT_NAME): Resolves to $ACTUAL_TARGET_PORT [container: $CONTAINER_NAME]"
else
HAS_PORT_ISSUES=1
print_error " Port $SERVICE_PORT -> $TARGET_PORT ($PORT_NAME): Named port not found in containers"
+ print_action "Define named port in container spec or use numeric targetPort"
+ continue
fi
fi
- done
+
+ # Active connectivity check - verify application is listening on the port
+ print_info " Testing connectivity to port $ACTUAL_TARGET_PORT in container '$CONTAINER_NAME'..."
+
+ # Try to connect to the port from inside the specific container
+ CONNECTIVITY_TEST=$(kubectl exec "$FIRST_POD" -n "$NAMESPACE" -c "$CONTAINER_NAME" -- timeout 2 sh -c "command -v nc >/dev/null 2>&1 && nc -z localhost $ACTUAL_TARGET_PORT || (command -v curl >/dev/null 2>&1 && curl -s --max-time 1 localhost:$ACTUAL_TARGET_PORT >/dev/null)" 2>&1)
+ CONNECTIVITY_EXIT_CODE=$?
+
+ if [[ $CONNECTIVITY_EXIT_CODE -eq 0 ]]; then
+ print_success " ✓ Port $ACTUAL_TARGET_PORT is accepting connections"
+ else
+ HAS_PORT_ISSUES=1
+ print_error " ✗ Port $ACTUAL_TARGET_PORT is NOT accepting connections"
+ print_warning " Configuration is correct but application may not be listening on port $ACTUAL_TARGET_PORT"
+ print_info " Check logs: kubectl logs $FIRST_POD -n $NAMESPACE -c $CONTAINER_NAME"
+ fi
+ done < <(echo "$SERVICE_PORTS")
done
if [[ $HAS_PORT_ISSUES -eq 0 ]]; then
diff --git a/k8s/diagnose/service/service_selector_match b/k8s/diagnose/service/service_selector_match
index e608b71a..84667a7e 100644
--- a/k8s/diagnose/service/service_selector_match
+++ b/k8s/diagnose/service/service_selector_match
@@ -90,7 +90,7 @@ for SERVICE_NAME in $SERVICES; do
print_info " Debug result: $DEBUG_RESULT"
fi
- print_info " Action: Verify pod labels match service selector"
+ print_action "Verify pod labels match service selector"
fi
else
POD_COUNT=$(echo "$MATCHING_PODS" | wc -w)
diff --git a/k8s/diagnose/service/service_type_validation b/k8s/diagnose/service/service_type_validation
index b18e8c9b..8381c2d7 100644
--- a/k8s/diagnose/service/service_type_validation
+++ b/k8s/diagnose/service/service_type_validation
@@ -50,6 +50,7 @@ for SERVICE_NAME in $SERVICES; do
print_info " Recent events:"
echo "$LB_EVENTS" | sed 's/^/ /'
fi
+ print_action "Wait for provisioning or check cloud provider logs for errors"
else
print_success " LoadBalancer available at: $EXTERNAL_IP"
fi
@@ -63,6 +64,7 @@ for SERVICE_NAME in $SERVICES; do
*)
HAS_ISSUES=1
print_error " Unknown service type: $SERVICE_TYPE"
+ print_action "Use valid service type (ClusterIP, NodePort, LoadBalancer, or ExternalName)"
;;
esac
done
diff --git a/k8s/diagnose/service/workflow.yml b/k8s/diagnose/service/workflow.yml
index 86032fb6..fec85604 100644
--- a/k8s/diagnose/service/workflow.yml
+++ b/k8s/diagnose/service/workflow.yml
@@ -1,26 +1,26 @@
steps:
- - name: service_existence
+ - name: Service Existence
description: Verifies that service resources exist in the namespace
- category: service
+ category: K8s Service
type: script
file: "$SERVICE_PATH/diagnose/service/service_existence"
- - name: service_selector_match
+ - name: Service Selector Match
description: Validates service selectors match pod labels
- category: service
+ category: K8s Service
type: script
file: "$SERVICE_PATH/diagnose/service/service_selector_match"
- - name: service_endpoints
+ - name: Service Endpoints
description: Checks if service has healthy endpoints
- category: service
+ category: K8s Service
type: script
file: "$SERVICE_PATH/diagnose/service/service_endpoints"
- - name: service_port_configuration
+ - name: Service Port Configuration
description: Validates service and container port alignment
- category: service
+ category: K8s Service
type: script
file: "$SERVICE_PATH/diagnose/service/service_port_configuration"
- - name: service_type_validation
+ - name: Service Type Validation
description: Verifies service type is correctly configured
- category: service
+ category: K8s Service
type: script
file: "$SERVICE_PATH/diagnose/service/service_type_validation"
\ No newline at end of file
diff --git a/k8s/diagnose/utils/diagnose_utils b/k8s/diagnose/utils/diagnose_utils
index 4f524edd..836bc67e 100644
--- a/k8s/diagnose/utils/diagnose_utils
+++ b/k8s/diagnose/utils/diagnose_utils
@@ -23,6 +23,10 @@ print_info() {
echo -e "${CYAN}ℹ${NC} $1"
}
+print_action() {
+ echo -e "${CYAN}🔧${NC} $1"
+}
+
# Validate that required resources exist before running checks
# Returns 0 if validation passes, 1 if fails (and sets check to failed)
require_resources() {
@@ -32,8 +36,8 @@ require_resources() {
local namespace="$4"
if [[ -z "$resource_names" ]]; then
- print_error "No ${resource_type} found with labels $label_selector in namespace $namespace"
- update_check_result --status "failed" --evidence "{}"
+ print_warning "No ${resource_type} found with labels $label_selector in namespace $namespace, check was skipped."
+ update_check_result --status "skipped" --evidence "{}"
return 1
fi
@@ -161,7 +165,9 @@ notify_results() {
pending: (map(select((.status // "UNKNOWN") == "pending")) | length),
running: (map(select((.status // "UNKNOWN") == "running")) | length),
success: (map(select((.status // "UNKNOWN") == "success")) | length),
- failed: (map(select((.status // "UNKNOWN") == "failed")) | length)
+ failed: (map(select((.status // "UNKNOWN") == "failed")) | length),
+ warning: (map(select((.status // "UNKNOWN") == "warning")) | length),
+ skipped: (map(select((.status // "UNKNOWN") == "skipped")) | length)
},
checks: .
})
diff --git a/k8s/log/kube-logger-go/bin/linux/exec-x86_64 b/k8s/log/kube-logger-go/bin/linux/exec-x86_64
index 84924e66..327e2c97 100755
Binary files a/k8s/log/kube-logger-go/bin/linux/exec-x86_64 and b/k8s/log/kube-logger-go/bin/linux/exec-x86_64 differ
diff --git a/k8s/log/kube-logger-go/go.mod b/k8s/log/kube-logger-go/go.mod
index b2ebfeaa..a767988a 100644
--- a/k8s/log/kube-logger-go/go.mod
+++ b/k8s/log/kube-logger-go/go.mod
@@ -1,49 +1,48 @@
module kube-logger-go
-go 1.21
+go 1.25.5
require (
- k8s.io/api v0.27.2
- k8s.io/apimachinery v0.27.2
- k8s.io/client-go v0.27.2
+ k8s.io/api v0.35.0
+ k8s.io/apimachinery v0.35.0
+ k8s.io/client-go v0.35.0
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/emicklei/go-restful/v3 v3.9.0 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
- github.com/go-openapi/jsonreference v0.20.1 // indirect
- github.com/go-openapi/swag v0.22.3 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/gnostic v0.5.7-v3refs // indirect
- github.com/google/go-cmp v0.5.9 // indirect
- github.com/google/gofuzz v1.1.0 // indirect
- github.com/google/uuid v1.3.0 // indirect
- github.com/imdario/mergo v0.3.6 // indirect
+ github.com/emicklei/go-restful/v3 v3.12.2 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
+ github.com/go-openapi/jsonpointer v0.21.0 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
+ github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/google/gnostic-models v0.7.0 // indirect
+ github.com/google/uuid v1.6.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/spf13/pflag v1.0.5 // indirect
- golang.org/x/net v0.8.0 // indirect
- golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
- golang.org/x/sys v0.6.0 // indirect
- golang.org/x/term v0.6.0 // indirect
- golang.org/x/text v0.8.0 // indirect
- golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
+ github.com/spf13/pflag v1.0.9 // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ go.yaml.in/yaml/v2 v2.4.3 // indirect
+ go.yaml.in/yaml/v3 v3.0.4 // indirect
+ golang.org/x/net v0.47.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sys v0.38.0 // indirect
+ golang.org/x/term v0.37.0 // indirect
+ golang.org/x/text v0.31.0 // indirect
+ golang.org/x/time v0.9.0 // indirect
+ google.golang.org/protobuf v1.36.8 // indirect
+ gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/klog/v2 v2.90.1 // indirect
- k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect
- k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
- sigs.k8s.io/yaml v1.3.0 // indirect
+ k8s.io/klog/v2 v2.130.1 // indirect
+ k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
+ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
+ sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
+ sigs.k8s.io/yaml v1.6.0 // indirect
)
diff --git a/k8s/log/kube-logger-go/go.sum b/k8s/log/kube-logger-go/go.sum
index 085c7d6c..b5f9dbe2 100644
--- a/k8s/log/kube-logger-go/go.sum
+++ b/k8s/log/kube-logger-go/go.sum
@@ -1,150 +1,41 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
-cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
-cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
-cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
-cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
-cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
-cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
-cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
-cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
-cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
-cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
-cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
-cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
-cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
-cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
-cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
-cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
-cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
-cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
-cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
-cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
-cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
-cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
-cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
-cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
-github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
-github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
+github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
-github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
+github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
+github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
-github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
-github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
-github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
-github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
+github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
+github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
-github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
-github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
-github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
+github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
-github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -154,338 +45,85 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
-github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
-github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
-github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
+github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
+github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
+github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
+github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
-go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
-golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
-golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
-golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
-golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
-golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
-golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
-golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
-golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
-golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
-golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
-golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
-golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
-golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
-golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
-google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
-google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
-google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
-google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
-google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
-google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
-google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
-google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
-google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
-google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
+golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
+golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
+google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
+gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo=
-k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4=
-k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg=
-k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
-k8s.io/client-go v0.27.2 h1:vDLSeuYvCHKeoQRhCXjxXO45nHVv2Ip4Fe0MfioMrhE=
-k8s.io/client-go v0.27.2/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ=
-k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
-k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
-k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
-k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
-k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
-rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
+k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
+k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
+k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
+k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
+k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
+k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/k8s/scope/build_context b/k8s/scope/build_context
index cb44f842..e60aa4ae 100755
--- a/k8s/scope/build_context
+++ b/k8s/scope/build_context
@@ -1,8 +1,12 @@
#!/bin/bash
-K8S_NAMESPACE=$(echo "$CONTEXT" | jq -r --arg default "$K8S_NAMESPACE" '
- .providers["container-orchestration"].cluster.namespace // $default
-')
+if [ -n "${NAMESPACE_OVERRIDE:-}" ]; then
+ K8S_NAMESPACE="$NAMESPACE_OVERRIDE"
+else
+ K8S_NAMESPACE=$(echo "$CONTEXT" | jq -r --arg default "$K8S_NAMESPACE" '
+ .providers["container-orchestration"].cluster.namespace // $default
+ ')
+fi
echo "Validating namespace $K8S_NAMESPACE exists"
diff --git a/k8s/scope/workflows/diagnose.yaml b/k8s/scope/workflows/diagnose.yaml
index 481281f2..66223726 100644
--- a/k8s/scope/workflows/diagnose.yaml
+++ b/k8s/scope/workflows/diagnose.yaml
@@ -2,14 +2,6 @@ continue_on_error: true
include:
- "$SERVICE_PATH/values.yaml"
steps:
- - name: build context
- type: script
- file: "$SERVICE_PATH/diagnose/build_context"
- output:
- - name: CONTEXT
- type: environment
- - name: LABEL_SELECTOR
- type: environment
- name: load_functions
type: script
file: "$SERVICE_PATH/diagnose/utils/diagnose_utils"
@@ -21,6 +13,14 @@ steps:
evidence: object
- name: notify_results
type: function
+ - name: build context
+ type: script
+ file: "$SERVICE_PATH/diagnose/build_context"
+ output:
+ - name: CONTEXT
+ type: environment
+ - name: LABEL_SELECTOR
+ type: environment
- name: diagnose
type: executor
before_each:
diff --git a/k8s/specs/actions/diagnose-deployment.json.tpl b/k8s/specs/actions/diagnose-deployment.json.tpl
index faa96285..91a02434 100644
--- a/k8s/specs/actions/diagnose-deployment.json.tpl
+++ b/k8s/specs/actions/diagnose-deployment.json.tpl
@@ -13,10 +13,14 @@
],
"properties": {
"scope_id": {
- "type": "string"
+ "type": "number",
+ "readOnly": true,
+ "visibleOn": ["read"]
},
"deployment_id": {
- "type": "string"
+ "type": "number",
+ "readOnly": true,
+ "visibleOn": ["read"]
}
}
},
diff --git a/k8s/specs/actions/diagnose-scope.json.tpl b/k8s/specs/actions/diagnose-scope.json.tpl
index d3e6e70a..ec2d2586 100644
--- a/k8s/specs/actions/diagnose-scope.json.tpl
+++ b/k8s/specs/actions/diagnose-scope.json.tpl
@@ -12,7 +12,9 @@
],
"properties": {
"scope_id": {
- "type": "string"
+ "type": "number",
+ "readOnly": true,
+ "visibleOn": ["read"]
}
}
},
@@ -28,7 +30,7 @@
},
"annotations": {
"show_on": [
- "scope", "performance"
+ "manage", "performance"
],
"runs_over": "scope"
}
diff --git a/k8s/values.yaml b/k8s/values.yaml
index 652e2abc..56edaa68 100644
--- a/k8s/values.yaml
+++ b/k8s/values.yaml
@@ -19,6 +19,7 @@ configuration:
BLUE_GREEN_INGRESS_PATH: "$SERVICE_PATH/deployment/templates/blue-green-ingress.yaml.tpl"
SERVICE_ACCOUNT_TEMPLATE: "$SERVICE_PATH/scope/templates/service-account.yaml.tpl"
# TRAFFIC_CONTAINER_IMAGE: "public.ecr.aws/nullplatform/k8s-traffic-manager:latest"
+# TRAFFIC_MANAGER_CONFIG_MAP: traffic-manager-configuration
IMAGE_PULL_SECRETS:
ENABLED: true
SECRETS: