diff --git a/.golangci.yaml b/.golangci.yaml index d8ad4cb082f..f9f13476bd2 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -77,6 +77,7 @@ linters: - third_party$ - builtin$ - examples$ + - tests/olmv1 issues: max-issues-per-linter: 0 max-same-issues: 0 diff --git a/Makefile b/Makefile index 9d10c0d358b..027571e7883 100644 --- a/Makefile +++ b/Makefile @@ -201,7 +201,7 @@ vet: check-go ## Run go vet against code. .PHONY: test test: check-go vet envtest ## Run unit tests; run Go linters checks; check if api and bundle folders are up to date; and check if go dependencies are valid @make versions - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e) -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test -mod=mod $(shell go list -mod=mod ./... | grep -v /tests/e2e | grep -v /tests/olmv1) -coverprofile cover.out @make lint @make api-isupdated @make bundle-isupdated @@ -1017,6 +1017,45 @@ test-e2e-cleanup: login-required $(OC_CLI) delete ns mysql-persistent --ignore-not-found=true rm -rf $(SETTINGS_TMP) +##@ OLMv1 Tests + +OLMV1_PACKAGE ?= oadp-operator +OLMV1_NAMESPACE ?= $(OADP_TEST_NAMESPACE) +OLMV1_CHANNEL ?= +OLMV1_VERSION ?= +OLMV1_UPGRADE_VERSION ?= +OLMV1_CATALOG ?= oadp-olmv1-test-catalog +OLMV1_CATALOG_IMAGE ?= +OLMV1_SERVICE_ACCOUNT ?= oadp-olmv1-installer +OLMV1_FAIL_FAST ?= true + +OLMV1_GINKGO_FLAGS = --vv \ + --no-color=$(OPENSHIFT_CI) \ + --label-filter="olmv1" \ + --junit-report="$(ARTIFACT_DIR)/junit_olmv1_report.xml" \ + --fail-fast=$(OLMV1_FAIL_FAST) \ + --timeout=30m + +.PHONY: test-olmv1 +test-olmv1: login-required install-ginkgo ## Run OLMv1 lifecycle tests (install, verify, upgrade, cleanup) against a cluster with OLMv1 enabled. + ginkgo run -mod=mod $(OLMV1_GINKGO_FLAGS) $(GINKGO_ARGS) tests/olmv1/ -- \ + -namespace=$(OLMV1_NAMESPACE) \ + -package=$(OLMV1_PACKAGE) \ + -channel=$(OLMV1_CHANNEL) \ + -version=$(OLMV1_VERSION) \ + -upgrade-version=$(OLMV1_UPGRADE_VERSION) \ + -catalog=$(OLMV1_CATALOG) \ + -catalog-image=$(OLMV1_CATALOG_IMAGE) \ + -service-account=$(OLMV1_SERVICE_ACCOUNT) \ + -artifact_dir=$(ARTIFACT_DIR) + +.PHONY: test-olmv1-cleanup +test-olmv1-cleanup: login-required ## Cleanup resources created by OLMv1 tests. + $(OC_CLI) delete clusterextension oadp-operator --ignore-not-found=true + $(OC_CLI) delete clustercatalog $(OLMV1_CATALOG) --ignore-not-found=true + $(OC_CLI) delete clusterrolebinding $(OLMV1_SERVICE_ACCOUNT)-cluster-admin --ignore-not-found=true + $(OC_CLI) delete sa $(OLMV1_SERVICE_ACCOUNT) -n $(OLMV1_NAMESPACE) --ignore-not-found=true + .PHONY: update-non-admin-manifests update-non-admin-manifests: NON_ADMIN_CONTROLLER_IMG?=quay.io/konveyor/oadp-non-admin:latest update-non-admin-manifests: yq ## Update Non Admin Controller (NAC) manifests shipped with OADP, from NON_ADMIN_CONTROLLER_PATH diff --git a/tests/olmv1/.gitignore b/tests/olmv1/.gitignore new file mode 100644 index 00000000000..3fec32c8427 --- /dev/null +++ b/tests/olmv1/.gitignore @@ -0,0 +1 @@ +tmp/ diff --git a/tests/olmv1/olmv1_install_test.go b/tests/olmv1/olmv1_install_test.go new file mode 100644 index 00000000000..e5671f740e1 --- /dev/null +++ b/tests/olmv1/olmv1_install_test.go @@ -0,0 +1,264 @@ +package olmv1_test + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + clusterExtensionName = "oadp-operator" + + oadpCRDName = "dataprotectionapplications.oadp.openshift.io" + veleroCRDName = "backups.velero.io" + restoreCRDName = "restores.velero.io" + + managerLabelSelector = "control-plane=controller-manager" +) + +var _ = ginkgo.Describe("OADP OLMv1 lifecycle", ginkgo.Ordered, ginkgo.Label("olmv1"), func() { + ctx := context.Background() + + ginkgo.BeforeAll(func() { + ginkgo.By("Cleaning up orphaned OADP/Velero CRDs from previous installs") + cleanupOrphanedCRDs(ctx) + + ginkgo.By("Setting up namespace, ServiceAccount, and RBAC") + ensureNamespace(ctx, namespace) + ensureServiceAccount(ctx, serviceAccountName, namespace) + ensureClusterAdminBinding(ctx, serviceAccountName, namespace) + + if catalogImage != "" { + ginkgo.By(fmt.Sprintf("Creating ClusterCatalog %s from image %s", catalogName, catalogImage)) + ensureClusterCatalog(ctx, catalogName, catalogImage) + waitForClusterCatalogServing(ctx, catalogName) + } + }) + + ginkgo.AfterAll(func() { + ginkgo.By("Cleaning up OLMv1 test resources") + err := deleteClusterExtension(ctx, clusterExtensionName) + if err != nil { + log.Printf("Warning: failed to delete ClusterExtension: %v", err) + } + + gomega.Eventually(func() bool { + _, err := getClusterExtension(ctx, clusterExtensionName) + return apierrors.IsNotFound(err) + }, 3*time.Minute, 5*time.Second).Should(gomega.BeTrue(), "ClusterExtension should be deleted") + + if createdCatalog { + ginkgo.By(fmt.Sprintf("Deleting ClusterCatalog %s", catalogName)) + deleteClusterCatalog(ctx, catalogName) + } + + cleanupClusterRoleBinding(ctx, serviceAccountName) + }) + + ginkgo.It("should install OADP operator via ClusterExtension", func() { + ginkgo.By("Creating the ClusterExtension") + ce := buildClusterExtension(clusterExtensionName, packageName, namespace, serviceAccountName) + _, err := dynamicClient.Resource(clusterExtensionGVR).Create(ctx, ce, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ClusterExtension %s (package=%s, namespace=%s)", clusterExtensionName, packageName, namespace) + + ginkgo.By("Waiting for ClusterExtension to be installed") + terminalReasons := map[string]bool{ + "InvalidConfiguration": true, + "Failed": true, + } + gomega.Eventually(func(g gomega.Gomega) { + obj, err := getClusterExtension(ctx, clusterExtensionName) + g.Expect(err).NotTo(gomega.HaveOccurred(), "ClusterExtension should exist") + + logAllConditions(obj) + + progCond, progFound := getCondition(obj, "Progressing") + if progFound { + reason, _ := progCond["reason"].(string) + message, _ := progCond["message"].(string) + g.Expect(terminalReasons[reason]).NotTo(gomega.BeTrue(), + "ClusterExtension has terminal error on Progressing: reason=%s message=%s", reason, message) + } + + instCond, instFound := getCondition(obj, "Installed") + g.Expect(instFound).To(gomega.BeTrue(), "Installed condition should be present") + status, _ := instCond["status"].(string) + g.Expect(status).To(gomega.Equal("True"), "Installed condition should be True") + }, 10*time.Minute, 10*time.Second).Should(gomega.Succeed()) + + ginkgo.By("Checking installed bundle info") + obj, err := getClusterExtension(ctx, clusterExtensionName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + bundleName, bundleVersion, found := getInstalledBundle(obj) + gomega.Expect(found).To(gomega.BeTrue(), "installed bundle should be present in status") + log.Printf("Installed bundle: name=%s version=%s", bundleName, bundleVersion) + }) + + ginkgo.It("should have the OADP controller-manager pod running", func() { + ginkgo.By("Waiting for controller-manager pod to be Running") + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: managerLabelSelector, + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + log.Printf("Controller-manager pod %s is Running", pod.Name) + return true, nil + } + log.Printf("Controller-manager pod %s phase: %s", pod.Name, pod.Status.Phase) + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "controller-manager pod should be Running") + }) + + ginkgo.It("should have OADP CRDs installed", func() { + expectedCRDs := []string{ + oadpCRDName, + veleroCRDName, + restoreCRDName, + "schedules.velero.io", + "backupstoragelocations.velero.io", + "volumesnapshotlocations.velero.io", + } + + for _, crdName := range expectedCRDs { + ginkgo.By(fmt.Sprintf("Checking CRD %s exists", crdName)) + exists, err := crdExists(ctx, crdName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(exists).To(gomega.BeTrue(), fmt.Sprintf("CRD %s should exist", crdName)) + log.Printf("CRD %s exists", crdName) + } + }) + + ginkgo.It("should not report deprecation warnings", func() { + obj, err := getClusterExtension(ctx, clusterExtensionName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + for _, condType := range []string{"Deprecated", "PackageDeprecated", "ChannelDeprecated", "BundleDeprecated"} { + cond, found := getCondition(obj, condType) + if found { + status, _ := cond["status"].(string) + gomega.Expect(status).To(gomega.Equal("False"), + fmt.Sprintf("%s condition should be False, got %s", condType, status)) + } + } + }) + + ginkgo.When("upgrading the operator", func() { + ginkgo.BeforeAll(func() { + if upgradeVersion == "" { + ginkgo.Skip("No --upgrade-version specified, skipping upgrade tests") + } + }) + + ginkgo.It("should upgrade the ClusterExtension to the target version", func() { + ginkgo.By(fmt.Sprintf("Patching ClusterExtension version to %s", upgradeVersion)) + obj, err := getClusterExtension(ctx, clusterExtensionName) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + previousBundleName, previousVersion, _ := getInstalledBundle(obj) + log.Printf("Current installed bundle: name=%s version=%s", previousBundleName, previousVersion) + + catalogSpec, _, _ := unstructuredNestedMap(obj.Object, "spec", "source", "catalog") + gomega.Expect(catalogSpec).NotTo(gomega.BeNil()) + catalogSpec["version"] = upgradeVersion + catalogSpec["upgradeConstraintPolicy"] = "SelfCertified" + err = unstructuredSetNestedMap(obj.Object, catalogSpec, "spec", "source", "catalog") + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + _, err = dynamicClient.Resource(clusterExtensionGVR).Update(ctx, obj, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Patched ClusterExtension version to %s", upgradeVersion) + + ginkgo.By("Waiting for upgrade to complete") + gomega.Eventually(func() string { + updated, err := getClusterExtension(ctx, clusterExtensionName) + if err != nil { + return "" + } + + cond, found := getCondition(updated, "Installed") + if !found { + return "" + } + status, _ := cond["status"].(string) + if status != "True" { + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + log.Printf("Installed condition: status=%s reason=%s message=%s", status, reason, message) + return "" + } + + _, bundleVer, found := getInstalledBundle(updated) + if !found { + return "" + } + log.Printf("Installed bundle version: %s", bundleVer) + return bundleVer + }, 10*time.Minute, 10*time.Second).ShouldNot(gomega.Equal(previousVersion), + "Installed bundle version should change after upgrade") + + ginkgo.By("Verifying controller-manager pod is running after upgrade") + gomega.Eventually(func() (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: managerLabelSelector, + }) + if err != nil { + return false, err + } + for _, pod := range pods.Items { + if pod.Status.Phase == corev1.PodRunning { + return true, nil + } + } + return false, nil + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue()) + }) + }) +}) + +func unstructuredNestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + var current interface{} = obj + for _, field := range fields { + m, ok := current.(map[string]interface{}) + if !ok { + return nil, false, fmt.Errorf("expected map at field %s", field) + } + current, ok = m[field] + if !ok { + return nil, false, nil + } + } + result, ok := current.(map[string]interface{}) + if !ok { + return nil, false, fmt.Errorf("final value is not a map") + } + return result, true, nil +} + +func unstructuredSetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error { + if len(fields) == 0 { + return fmt.Errorf("no fields specified") + } + current := obj + for _, field := range fields[:len(fields)-1] { + next, ok := current[field].(map[string]interface{}) + if !ok { + return fmt.Errorf("expected map at field %s", field) + } + current = next + } + current[fields[len(fields)-1]] = value + return nil +} diff --git a/tests/olmv1/olmv1_suite_test.go b/tests/olmv1/olmv1_suite_test.go new file mode 100644 index 00000000000..7494879baaf --- /dev/null +++ b/tests/olmv1/olmv1_suite_test.go @@ -0,0 +1,357 @@ +package olmv1_test + +import ( + "context" + "flag" + "log" + "strings" + "testing" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +var ( + namespace string + packageName string + channel string + version string + upgradeVersion string + catalogName string + catalogImage string + serviceAccountName string + artifactDir string + + createdCatalog bool + + kubeClient *kubernetes.Clientset + dynamicClient dynamic.Interface + + clusterExtensionGVR = schema.GroupVersionResource{ + Group: "olm.operatorframework.io", + Version: "v1", + Resource: "clusterextensions", + } + + clusterCatalogGVR = schema.GroupVersionResource{ + Group: "olm.operatorframework.io", + Version: "v1", + Resource: "clustercatalogs", + } +) + +func init() { + flag.StringVar(&namespace, "namespace", "openshift-adp", "Namespace to install the operator into") + flag.StringVar(&packageName, "package", "oadp-operator", "OLM package name for the operator") + flag.StringVar(&channel, "channel", "", "Catalog channel (optional)") + flag.StringVar(&version, "version", "", "Version to install (optional, e.g. '1.5.1' or '1.5.x')") + flag.StringVar(&upgradeVersion, "upgrade-version", "", "Version to upgrade to (optional)") + flag.StringVar(&catalogName, "catalog", "oadp-olmv1-test-catalog", "ClusterCatalog name to create or reference") + flag.StringVar(&catalogImage, "catalog-image", "", "Catalog image to use for creating a ClusterCatalog (required when package is not in default catalogs)") + flag.StringVar(&serviceAccountName, "service-account", "oadp-olmv1-installer", "ServiceAccount name for ClusterExtension") + flag.StringVar(&artifactDir, "artifact_dir", "/tmp", "Directory for test artifacts") +} + +func TestOADPOLMv1(t *testing.T) { + flag.Parse() + gomega.RegisterFailHandler(ginkgo.Fail) + + kubeConfig := config.GetConfigOrDie() + kubeConfig.QPS = 50 + kubeConfig.Burst = 100 + + var err error + kubeClient, err = kubernetes.NewForConfig(kubeConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + dynamicClient, err = dynamic.NewForConfig(kubeConfig) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.RunSpecs(t, "OADP OLMv1 Suite") +} + +// --- Helpers --- + +func ensureNamespace(ctx context.Context, name string) { + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} + _, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created namespace %s", name) +} + +func ensureServiceAccount(ctx context.Context, name, ns string) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + } + _, err := kubeClient.CoreV1().ServiceAccounts(ns).Create(ctx, sa, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ServiceAccount %s/%s", ns, name) +} + +// ensureClusterAdminBinding grants cluster-admin to the installer SA. +// This is intentionally broad for testing; production should use least-privilege RBAC. +func ensureClusterAdminBinding(ctx context.Context, saName, ns string) { + bindingName := saName + "-cluster-admin" + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: bindingName}, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{ + {Kind: "ServiceAccount", Name: saName, Namespace: ns}, + }, + } + _, err := kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, crb, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + log.Printf("Created ClusterRoleBinding %s", bindingName) +} + +func buildClusterExtension(name, pkg, ns, sa string) *unstructured.Unstructured { + spec := map[string]interface{}{ + "namespace": ns, + "serviceAccount": map[string]interface{}{ + "name": sa, + }, + "source": map[string]interface{}{ + "sourceType": "Catalog", + "catalog": map[string]interface{}{ + "packageName": pkg, + }, + }, + // OwnNamespace operators require watchNamespace to tell OLMv1 + // which namespace the operator should watch. Set it to the + // install namespace so it mirrors OLMv0 OwnNamespace behavior. + "config": map[string]interface{}{ + "configType": "Inline", + "inline": map[string]interface{}{ + "watchNamespace": ns, + }, + }, + } + + ce := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "olm.operatorframework.io/v1", + "kind": "ClusterExtension", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": spec, + }, + } + + catalogSpec := spec["source"].(map[string]interface{})["catalog"].(map[string]interface{}) + if catalogImage != "" { + catalogSpec["selector"] = map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "olm.operatorframework.io/metadata.name": catalogName, + }, + } + } + if channel != "" { + catalogSpec["channels"] = []interface{}{channel} + } + if version != "" { + catalogSpec["version"] = version + } + + return ce +} + +func getClusterExtension(ctx context.Context, name string) (*unstructured.Unstructured, error) { + return dynamicClient.Resource(clusterExtensionGVR).Get(ctx, name, metav1.GetOptions{}) +} + +func deleteClusterExtension(ctx context.Context, name string) error { + err := dynamicClient.Resource(clusterExtensionGVR).Delete(ctx, name, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + return err +} + +func getCondition(obj *unstructured.Unstructured, condType string) (map[string]interface{}, bool) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil || !found { + return nil, false + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + if cond["type"] == condType { + return cond, true + } + } + return nil, false +} + +func logAllConditions(obj *unstructured.Unstructured) { + conditions, found, err := unstructured.NestedSlice(obj.Object, "status", "conditions") + if err != nil || !found { + log.Print(" No conditions present yet") + return + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + condType, _ := cond["type"].(string) + status, _ := cond["status"].(string) + reason, _ := cond["reason"].(string) + message, _ := cond["message"].(string) + if len(message) > 120 { + message = message[:120] + "..." + } + log.Printf(" %s: status=%s reason=%s message=%s", condType, status, reason, message) + } +} + +func getInstalledBundle(obj *unstructured.Unstructured) (name string, ver string, found bool) { + bundleName, _, _ := unstructured.NestedString(obj.Object, "status", "install", "bundle", "name") + bundleVersion, _, _ := unstructured.NestedString(obj.Object, "status", "install", "bundle", "version") + if bundleName != "" { + return bundleName, bundleVersion, true + } + return "", "", false +} + +func crdExists(ctx context.Context, name string) (bool, error) { + crdGVR := schema.GroupVersionResource{ + Group: apiextensionsv1.SchemeGroupVersion.Group, + Version: apiextensionsv1.SchemeGroupVersion.Version, + Resource: "customresourcedefinitions", + } + _, err := dynamicClient.Resource(crdGVR).Get(ctx, name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func cleanupClusterRoleBinding(ctx context.Context, saName string) { + bindingName := saName + "-cluster-admin" + err := kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, bindingName, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete ClusterRoleBinding %s: %v", bindingName, err) + } +} + +// cleanupOrphanedCRDs deletes any OADP or Velero CRDs left behind by a +// previous OLMv0 deployment or a prior test run. OLMv1 cannot adopt CRDs +// it did not create, so these must be removed before a fresh install. +func cleanupOrphanedCRDs(ctx context.Context) { + crdGVR := schema.GroupVersionResource{ + Group: apiextensionsv1.SchemeGroupVersion.Group, + Version: apiextensionsv1.SchemeGroupVersion.Version, + Resource: "customresourcedefinitions", + } + crdList, err := dynamicClient.Resource(crdGVR).List(ctx, metav1.ListOptions{}) + if err != nil { + log.Printf("Warning: failed to list CRDs: %v", err) + return + } + var deleted int + for _, crd := range crdList.Items { + name := crd.GetName() + if strings.HasSuffix(name, ".oadp.openshift.io") || strings.HasSuffix(name, ".velero.io") { + if err := dynamicClient.Resource(crdGVR).Delete(ctx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete CRD %s: %v", name, err) + } else { + deleted++ + } + } + } + if deleted > 0 { + log.Printf("Deleted %d orphaned OADP/Velero CRDs", deleted) + } +} + +func ensureClusterCatalog(ctx context.Context, name, image string) { + cc := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "olm.operatorframework.io/v1", + "kind": "ClusterCatalog", + "metadata": map[string]interface{}{ + "name": name, + }, + "spec": map[string]interface{}{ + "source": map[string]interface{}{ + "type": "Image", + "image": map[string]interface{}{ + "ref": image, + }, + }, + }, + }, + } + _, err := dynamicClient.Resource(clusterCatalogGVR).Create(ctx, cc, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + log.Printf("ClusterCatalog %s already exists", name) + return + } + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + createdCatalog = true + log.Printf("Created ClusterCatalog %s with image %s", name, image) +} + +func waitForClusterCatalogServing(ctx context.Context, name string) { + gomega.Eventually(func() bool { + obj, err := dynamicClient.Resource(clusterCatalogGVR).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + log.Printf("Error getting ClusterCatalog %s: %v", name, err) + return false + } + conditions, found, _ := unstructured.NestedSlice(obj.Object, "status", "conditions") + if !found { + return false + } + for _, c := range conditions { + cond, ok := c.(map[string]interface{}) + if !ok { + continue + } + if cond["type"] == "Serving" { + status, _ := cond["status"].(string) + reason, _ := cond["reason"].(string) + log.Printf("ClusterCatalog %s Serving: status=%s reason=%s", name, status, reason) + return status == "True" + } + } + return false + }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "ClusterCatalog should be Serving") +} + +func deleteClusterCatalog(ctx context.Context, name string) { + err := dynamicClient.Resource(clusterCatalogGVR).Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + log.Printf("Warning: failed to delete ClusterCatalog %s: %v", name, err) + } +}