diff --git a/.travis.yml b/.travis.yml index 66402ccc18..1f1673ebcb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,9 +2,9 @@ sudo: required dist: xenial language: go go: - - 1.17.3 + - 1.17.11 before_install: - - sudo apt-get update -yq + - sudo apt-get update -yq || true - sudo apt-get install go-md2man -y - sudo apt-get install -y awscli cache: @@ -12,10 +12,10 @@ cache: - $HOME/.cache/go-build script: - | - if [ "${TRAVIS_BRANCH}" == "master" ]; then - export DOCKER_HUB_STORK_TAG=master - export DOCKER_HUB_STORK_TEST_TAG=latest - export DOCKER_HUB_CMD_EXECUTOR_TAG=master + if [ "${TRAVIS_BRANCH}" == "2.12-nfs" ]; then + export DOCKER_HUB_STORK_TAG="${TRAVIS_BRANCH}"-dev + export DOCKER_HUB_STORK_TEST_TAG="${TRAVIS_BRANCH}"-dev + export DOCKER_HUB_CMD_EXECUTOR_TAG="${TRAVIS_BRANCH}"-dev else export DOCKER_HUB_STORK_TAG=`git rev-parse --short HEAD` export DOCKER_HUB_STORK_TEST_TAG=`git rev-parse --short HEAD` diff --git a/Dockerfile b/Dockerfile index dacc9c82c5..2459c284d7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ RUN python3 -m pip install awscli && python3 -m pip install rsa --upgrade RUN curl -q -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-07-26/bin/linux/amd64/aws-iam-authenticator && \ chmod +x /usr/local/bin/aws-iam-authenticator -ARG GCLOUD_SDK=google-cloud-sdk-269.0.0-linux-x86_64.tar.gz +ARG GCLOUD_SDK=google-cloud-sdk-399.0.0-linux-x86_64.tar.gz # Remove the test directories # Also don't need gsutil RUN curl -q -o $GCLOUD_SDK https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/$GCLOUD_SDK && \ diff --git a/Makefile b/Makefile index 994f972741..339856989d 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ ifeq ($(BUILD_TYPE),debug) BUILDFLAGS += -gcflags "-N -l" endif -RELEASE_VER := 2.7.0 +RELEASE_VER := 2.12.0 BASE_DIR := $(shell git rev-parse --show-toplevel) GIT_SHA := $(shell git rev-parse --short HEAD) BIN :=$(BASE_DIR)/bin diff --git a/cmd/stork/stork.go b/cmd/stork/stork.go index b685119de7..58a06d13c0 100644 --- a/cmd/stork/stork.go +++ b/cmd/stork/stork.go @@ -29,6 +29,7 @@ import ( "github.com/libopenstorage/stork/pkg/k8sutils" "github.com/libopenstorage/stork/pkg/metrics" "github.com/libopenstorage/stork/pkg/migration" + "github.com/libopenstorage/stork/pkg/migration/controllers" "github.com/libopenstorage/stork/pkg/monitor" "github.com/libopenstorage/stork/pkg/objectcontroller" "github.com/libopenstorage/stork/pkg/pvcwatcher" @@ -41,9 +42,11 @@ import ( "github.com/libopenstorage/stork/pkg/webhookadmission" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" "github.com/portworx/kdmp/pkg/controllers/dataexport" + "github.com/portworx/kdmp/pkg/controllers/resourceexport" "github.com/portworx/kdmp/pkg/drivers" "github.com/portworx/kdmp/pkg/jobratelimit" kdmpversion "github.com/portworx/kdmp/pkg/version" + "github.com/portworx/sched-ops/k8s/common" schedops "github.com/portworx/sched-ops/k8s/core" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" @@ -143,7 +146,7 @@ func main() { Name: "application-controller", Usage: "Start the controllers for managing applications (default: true)", }, - cli.BoolTFlag{ + cli.BoolFlag{ Name: "px-object-controller", Usage: "Start the px object controller.", }, @@ -184,12 +187,12 @@ func main() { }, cli.IntFlag{ Name: "k8s-api-qps", - Value: 100, + Value: 1000, Usage: "Restrict number of k8s api requests from stork (default: 100 QPS)", }, cli.IntFlag{ Name: "k8s-api-burst", - Value: 100, + Value: 2000, Usage: "Restrict number of k8s api requests from stork (default: 100 Burst)", }, cli.BoolTFlag{ @@ -201,6 +204,10 @@ func main() { Value: 4, Usage: "Max threads for apply resources during migration (default: 4)", }, + cli.BoolTFlag{ + Name: controllers.ResourceTransformationControllerName, + Usage: "Start the resource transformation controller (default: true)", + }, } if err := app.Run(os.Args); err != nil { @@ -285,6 +292,18 @@ func run(c *cli.Context) { eventBroadcaster.StartRecordingToSink(&core_v1.EventSinkImpl{Interface: k8sClient.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, api_v1.EventSource{Component: eventComponentName}) + // Create operator-sdk manager that will manage all controllers. + // Setup the controller manager before starting any watches / other controllers + mgr, err := manager.New(config, manager.Options{}) + if err != nil { + log.Fatalf("Setup controller manager: %v", err) + } + + // Setup scheme for all stork resources + if err := apis.AddToScheme(mgr.GetScheme()); err != nil { + log.Fatalf("Setup scheme failed for stork resources: %v", err) + } + var d volume.Driver if driverName != "" { log.Infof("Using driver %v", driverName) @@ -333,16 +352,6 @@ func run(c *cli.Context) { } } } - // Create operator-sdk manager that will manage all controllers. - mgr, err := manager.New(config, manager.Options{}) - if err != nil { - log.Fatalf("Setup controller manager: %v", err) - } - - // Setup scheme for all stork resources - if err := apis.AddToScheme(mgr.GetScheme()); err != nil { - log.Fatalf("Setup scheme failed for stork resources: %v", err) - } runFunc := func(context.Context) { runStork(mgr, d, recorder, c) @@ -413,6 +422,12 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde if err := resourceCollector.Init(nil); err != nil { log.Fatalf("Error initializing ResourceCollector: %v", err) } + if err := os.Setenv(common.BurstRate, strconv.Itoa(burst)); err != nil { + log.Fatalf("Error setting Burst Rate: %v", err) + } + if err := os.Setenv(common.QPSRate, strconv.Itoa(qps)); err != nil { + log.Fatalf("Error setting Burst Rate: %v", err) + } adminNamespace := c.String("admin-namespace") if adminNamespace == "" { adminNamespace = c.String("migration-admin-namespace") @@ -468,6 +483,13 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde } } + if c.Bool(controllers.ResourceTransformationControllerName) { + rt := controllers.NewResourceTransformation(mgr, d, recorder, resourceCollector) + if err := rt.Init(mgr); err != nil { + log.Fatalf("Error initializing resource transformation controller: %v", err) + } + } + if c.Bool("cluster-domain-controllers") { clusterDomains := clusterdomains.ClusterDomains{ Driver: d, @@ -493,7 +515,7 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde if c.Bool("px-object-controller") { objectController := &objectcontroller.ObjectController{} if err := objectController.Init(); err != nil { - log.Fatalf("Error initializing px-object-controller : %v", err) + log.Warnf("Error initializing px-object-controller : %v", err) } } if c.Bool("kdmp-controller") { @@ -508,6 +530,14 @@ func runStork(mgr manager.Manager, d volume.Driver, recorder record.EventRecorde if err := dataexport.Init(mgr); err != nil { log.Fatalf("Error initializing kdmp controller: %v", err) } + resourceexport, err := resourceexport.NewController(mgr) + if err != nil { + log.Fatalf("Error initializing resource export controller: %v", err) + } + + if err := resourceexport.Init(mgr); err != nil { + log.Fatalf("Error initializing resource export controller manager: %v", err) + } } ctx := context.Background() diff --git a/drivers/volume/aws/aws.go b/drivers/volume/aws/aws.go index c4fed686bf..e5c46b673a 100644 --- a/drivers/volume/aws/aws.go +++ b/drivers/volume/aws/aws.go @@ -116,11 +116,17 @@ func (a *aws) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { if cmBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map, default to generic always return false } + // For AWS volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } return a.OwnsPVC(coreOps, pvc) } @@ -397,6 +403,7 @@ func (a *aws) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (a *aws) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { if pv.Spec.CSI != nil { pv.Spec.CSI.VolumeHandle = pv.Name diff --git a/drivers/volume/azure/azure.go b/drivers/volume/azure/azure.go index 08f02a7c3d..72d50f214d 100644 --- a/drivers/volume/azure/azure.go +++ b/drivers/volume/azure/azure.go @@ -157,11 +157,17 @@ func (a *azure) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { if cmBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map, default to generic always return false } + // For Azure based volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } return a.OwnsPVC(coreOps, pvc) } @@ -408,6 +414,7 @@ func (a *azure) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (a *azure) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { disk, err := a.diskClient.Get(context.TODO(), a.resourceGroup, pv.Name) if err != nil { diff --git a/drivers/volume/csi/csi.go b/drivers/volume/csi/csi.go index 491fac2203..9060ca7569 100644 --- a/drivers/volume/csi/csi.go +++ b/drivers/volume/csi/csi.go @@ -89,7 +89,7 @@ type BackupObjectv1beta1Csi struct { V1SnapshotRequired bool } -// GetVolumeSnapshotContent retrieves a backed up volume snapshot +// GetVolumeSnapshotContent retrieves a backed up volume snapshot func (cbo *csiBackupObject) GetVolumeSnapshot(snapshotID string) (interface{}, error) { var vs interface{} var ok bool @@ -309,7 +309,12 @@ func (c *csi) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { + // For CSI volume and backuplocation type is NFS, It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } if cmBackupType == storkapi.ApplicationBackupGeneric || crBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map or applicationbackup CR, default to generic always return false @@ -1170,6 +1175,7 @@ func (c *csi) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (c *csi) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil } diff --git a/drivers/volume/gcp/gcp.go b/drivers/volume/gcp/gcp.go index d0e33f4fbd..3f1e4179e5 100644 --- a/drivers/volume/gcp/gcp.go +++ b/drivers/volume/gcp/gcp.go @@ -98,11 +98,17 @@ func (g *gcp) OwnsPVCForBackup( pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) bool { if cmBackupType == storkapi.ApplicationBackupGeneric { // If user has forced the backupType in config map, default to generic always return false } + // For gcp volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } return g.OwnsPVC(coreOps, pvc) } @@ -347,6 +353,12 @@ func (g *gcp) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { } _, err := service.Snapshots.Delete(vInfo.Options["projectID"], vInfo.BackupID).Do() if err != nil { + if gceErr, ok := err.(*googleapi.Error); ok { + if gceErr.Code == http.StatusNotFound { + // snapshot is already deleted + continue + } + } return true, err } } @@ -356,6 +368,7 @@ func (g *gcp) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { func (g *gcp) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { if pv.Spec.CSI != nil { key, err := common.VolumeIDToKey(pv.Spec.CSI.VolumeHandle) diff --git a/drivers/volume/kdmp/kdmp.go b/drivers/volume/kdmp/kdmp.go index d44ffc9a95..2ea3c832e7 100644 --- a/drivers/volume/kdmp/kdmp.go +++ b/drivers/volume/kdmp/kdmp.go @@ -2,12 +2,12 @@ package kdmp import ( "fmt" + "github.com/libopenstorage/stork/pkg/utils" "os" "reflect" "strings" "time" - "github.com/aquilax/truncate" snapv1 "github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1" snapshotVolume "github.com/kubernetes-incubator/external-storage/snapshot/pkg/volume" stork_driver "github.com/libopenstorage/stork/drivers" @@ -31,20 +31,17 @@ import ( k8serror "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/wait" k8shelper "k8s.io/component-helpers/storage/volume" ) const ( - prefixRepo = "generic-backup" - prefixRestore = "restore" - prefixBackup = "backup" - prefixDelete = "delete" - skipResourceAnnotation = "stork.libopenstorage.org/skip-resource" - volumeinitialDelay = 2 * time.Second - volumeFactor = 1.5 - volumeSteps = 20 + prefixRepo = "generic-backup" + prefixRestore = "restore" + prefixDelete = "delete" + volumeinitialDelay = 2 * time.Second + volumeFactor = 1.5 + volumeSteps = 20 // StorkAPIVersion current api version supported by stork StorkAPIVersion = "stork.libopenstorage.org/v1alpha1" // KdmpAPIVersion current api version supported by KDMP @@ -60,32 +57,19 @@ const ( // StorkAnnotation for pvcs created by stork-kdmp driver StorkAnnotation = "stork.libopenstorage.org/kdmp" // backupUID annotation key - backupUIDKey = "portworx.io/backup-uid" - - pxbackupAnnotationPrefix = "portworx.io/" - pxbackupAnnotationCreateByKey = pxbackupAnnotationPrefix + "created-by" - pxbackupAnnotationCreateByValue = "px-backup" - pxbackupObjectUIDKey = pxbackupAnnotationPrefix + "backup-uid" - pxbackupObjectNameKey = pxbackupAnnotationPrefix + "backup-name" - pxRestoreObjectUIDKey = pxbackupAnnotationPrefix + "restore-uid" - pxRestoreObjectNameKey = pxbackupAnnotationPrefix + "restore-name" + backupUIDKey = "portworx.io/backup-uid" + pxRestoreObjectUIDKey = utils.PxbackupAnnotationPrefix + "restore-uid" + pxRestoreObjectNameKey = utils.PxbackupAnnotationPrefix + "restore-name" //kdmp related labels - kdmpAnnotationPrefix = "kdmp.portworx.com/" - // backup related Labels - applicationBackupCRNameKey = kdmpAnnotationPrefix + "applicationbackup-cr-name" - applicationBackupCRUIDKey = kdmpAnnotationPrefix + "applicationbackup-cr-uid" - backupObjectNameKey = kdmpAnnotationPrefix + "backupobject-name" - backupObjectUIDKey = kdmpAnnotationPrefix + "backupobject-uid" - // restore related Labels - applicationRestoreCRNameKey = kdmpAnnotationPrefix + "applicationrestore-cr-name" - applicationRestoreCRUIDKey = kdmpAnnotationPrefix + "applicationrestore-cr-uid" - restoreObjectNameKey = kdmpAnnotationPrefix + "restoreobject-name" - restoreObjectUIDKey = kdmpAnnotationPrefix + "restoreobject-uid" + applicationRestoreCRNameKey = utils.KdmpAnnotationPrefix + "applicationrestore-cr-name" + applicationRestoreCRUIDKey = utils.KdmpAnnotationPrefix + "applicationrestore-cr-uid" + restoreObjectNameKey = utils.KdmpAnnotationPrefix + "restoreobject-name" + restoreObjectUIDKey = utils.KdmpAnnotationPrefix + "restoreobject-uid" - pvcNameKey = kdmpAnnotationPrefix + "pvc-name" - pvcUIDKey = kdmpAnnotationPrefix + "pvc-uid" + pvcNameKey = utils.KdmpAnnotationPrefix + "pvc-name" + pvcUIDKey = utils.KdmpAnnotationPrefix + "pvc-uid" // pvcProvisionerAnnotation is the annotation on PVC which has the // provisioner name pvcProvisionerAnnotation = "volume.beta.kubernetes.io/storage-provisioner" @@ -134,7 +118,7 @@ func (k *kdmp) Stop() error { return nil } -func (k *kdmp) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { +func (k *kdmp) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { // KDMP can handle any PVC type. KDMP driver will always be a fallback // option when none of the other supported drivers by stork own the PVC return true @@ -153,8 +137,8 @@ func (k *kdmp) OwnsPV(pv *v1.PersistentVolume) bool { } func getGenericCRName(opsPrefix, crUID, pvcUID, ns string) string { - name := fmt.Sprintf("%s-%s-%s-%s", opsPrefix, getShortUID(crUID), getShortUID(pvcUID), ns) - name = getValidLabel(name) + name := fmt.Sprintf("%s-%s-%s-%s", opsPrefix, utils.GetShortUID(crUID), utils.GetShortUID(pvcUID), ns) + name = utils.GetValidLabel(name) return name } @@ -296,15 +280,15 @@ func (k *kdmp) StartBackup(backup *storkapi.ApplicationBackup, dataExport := &kdmpapi.DataExport{} // Adding required label for debugging labels := make(map[string]string) - labels[applicationBackupCRNameKey] = getValidLabel(backup.Name) - labels[applicationBackupCRUIDKey] = getValidLabel(getShortUID(string(backup.UID))) - labels[pvcNameKey] = getValidLabel(pvc.Name) - labels[pvcUIDKey] = getValidLabel(getShortUID(string(pvc.UID))) + labels[utils.ApplicationBackupCRNameKey] = utils.GetValidLabel(backup.Name) + labels[utils.ApplicationBackupCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(backup.UID))) + labels[pvcNameKey] = utils.GetValidLabel(pvc.Name) + labels[pvcUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(pvc.UID))) // If backup from px-backup, update the backup object details in the label - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[backupObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) - labels[backupObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.BackupObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) } } @@ -317,10 +301,10 @@ func (k *kdmp) StartBackup(backup *storkapi.ApplicationBackup, } dataExport.Spec.TriggeredFromNs = storkPodNs dataExport.Annotations = make(map[string]string) - dataExport.Annotations[skipResourceAnnotation] = "true" - dataExport.Annotations[backupObjectUIDKey] = string(backup.Annotations[pxbackupObjectUIDKey]) + dataExport.Annotations[utils.SkipResourceAnnotation] = "true" + dataExport.Annotations[utils.BackupObjectUIDKey] = string(backup.Annotations[utils.PxbackupObjectUIDKey]) dataExport.Annotations[pvcUIDKey] = string(pvc.UID) - dataExport.Name = getGenericCRName(prefixBackup, string(backup.UID), string(pvc.UID), pvc.Namespace) + dataExport.Name = getGenericCRName(utils.PrefixBackup, string(backup.UID), string(pvc.UID), pvc.Namespace) dataExport.Namespace = pvc.Namespace dataExport.Spec.Type = kdmpapi.DataExportKopia dataExport.Spec.Destination = kdmpapi.DataExportObjectReference{ @@ -356,7 +340,7 @@ func (k *kdmp) GetBackupStatus(backup *storkapi.ApplicationBackup) ([]*storkapi. if vInfo.DriverName != storkvolume.KDMPDriverName { continue } - crName := getGenericCRName(prefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) + crName := getGenericCRName(utils.PrefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) dataExport, err := kdmpShedOps.Instance().GetDataExport(crName, vInfo.Namespace) if err != nil { logrus.Errorf("failed to get backup DataExport CR: %v", err) @@ -419,7 +403,7 @@ func isDataExportCompleted(status kdmpapi.ExportStatus) bool { func (k *kdmp) CancelBackup(backup *storkapi.ApplicationBackup) error { for _, vInfo := range backup.Status.Volumes { - crName := getGenericCRName(prefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) + crName := getGenericCRName(utils.PrefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) err := kdmpShedOps.Instance().DeleteDataExport(crName, vInfo.Namespace) if err != nil && k8serror.IsNotFound(err) { errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) @@ -434,9 +418,9 @@ func (k *kdmp) DeleteBackup(backup *storkapi.ApplicationBackup) (bool, error) { // For Applicationbackup CR created by px-backup, we want to handle deleting // successful PVC (of in-progress backup) from px-backup deleteworker() to avoid two entities // doing the delete of snapshot leading to races. - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; !ok { + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; !ok { return deleteKdmpSnapshot(backup) - } else if val != pxbackupAnnotationCreateByValue { + } else if val != utils.PxbackupAnnotationCreateByValue { return deleteKdmpSnapshot(backup) } else { logrus.Infof("skipping snapshot deletion as ApplicationBackup CR [%v] is created by px-backup", backup.Name) @@ -470,15 +454,15 @@ func deleteKdmpSnapshot(backup *storkapi.ApplicationBackup) (bool, error) { if err != nil && k8serror.IsNotFound(err) { // Adding required label for debugging labels := make(map[string]string) - labels[applicationBackupCRNameKey] = getValidLabel(backup.Name) - labels[applicationBackupCRUIDKey] = getValidLabel(string(backup.UID)) - labels[pvcNameKey] = getValidLabel(vInfo.PersistentVolumeClaim) - labels[pvcUIDKey] = getValidLabel(vInfo.PersistentVolumeClaimUID) + labels[utils.ApplicationBackupCRNameKey] = utils.GetValidLabel(backup.Name) + labels[utils.ApplicationBackupCRUIDKey] = utils.GetValidLabel(string(backup.UID)) + labels[pvcNameKey] = utils.GetValidLabel(vInfo.PersistentVolumeClaim) + labels[pvcUIDKey] = utils.GetValidLabel(vInfo.PersistentVolumeClaimUID) // If backup from px-backup, update the backup object details in the label - if val, ok := backup.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[backupObjectNameKey] = getValidLabel(backup.Annotations[pxbackupObjectNameKey]) - labels[backupObjectUIDKey] = getValidLabel(backup.Annotations[pxbackupObjectUIDKey]) + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.BackupObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) } } err := dataexport.CreateCredentialsSecret(secretName, backup.Spec.BackupLocation, backup.Namespace, backup.Namespace, labels) @@ -576,6 +560,7 @@ func doKdmpDeleteJob(id string, driver drivers.Interface) (bool, error) { func (k *kdmp) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil } @@ -629,7 +614,8 @@ func (k *kdmp) getRestorePVCs( delete(pvc.Annotations, bindCompletedKey) delete(pvc.Annotations, boundByControllerKey) delete(pvc.Annotations, storageClassKey) - delete(pvc.Annotations, storageProvisioner) + delete(pvc.Annotations, k8shelper.AnnBetaStorageProvisioner) + delete(pvc.Annotations, k8shelper.AnnStorageProvisioner) delete(pvc.Annotations, storageNodeAnnotation) pvc.Annotations[KdmpAnnotation] = StorkAnnotation } @@ -652,6 +638,7 @@ func (k *kdmp) StartRestore( volumeBackupInfos []*storkapi.ApplicationBackupVolumeInfo, objects []runtime.Unstructured, ) ([]*storkapi.ApplicationRestoreVolumeInfo, error) { + funct := "kdmp.StartRestore" log.ApplicationRestoreLog(restore).Debugf("started generic restore: %v", restore.Name) volumeInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) nodes, err := core.Instance().GetNodes() @@ -696,27 +683,33 @@ func (k *kdmp) StartRestore( destFullZoneName = strings.Join(splitDestRegion, "-") volumeInfo.Zones = append(volumeInfo.Zones, destFullZoneName) } - - // get corresponding pvc object from objects list - pvc, err := storkvolume.GetPVCFromObjects(objects, bkpvInfo) - if err != nil { - return nil, err - } - if !nonSupportedProvider { - for _, node := range nodes.Items { - zone := node.Labels[v1.LabelTopologyZone] - if zone == destFullZoneName { - pvc.Annotations[storageNodeAnnotation] = node.Name - } - } - } - val, ok := restore.Spec.NamespaceMapping[bkpvInfo.Namespace] if !ok { return nil, fmt.Errorf("restore namespace mapping not found: %s", bkpvInfo.Namespace) } restoreNamespace := val + pvc := &v1.PersistentVolumeClaim{} + + if objects != nil { + // get corresponding pvc object from objects list + pvc, err = storkvolume.GetPVCFromObjects(objects, bkpvInfo) + if err != nil { + return nil, err + } + if !nonSupportedProvider { + for _, node := range nodes.Items { + zone := node.Labels[v1.LabelTopologyZone] + if zone == destFullZoneName { + pvc.Annotations[storageNodeAnnotation] = node.Name + } + } + } + pvc.Namespace = restoreNamespace + } else { + pvc.Name = bkpvInfo.PersistentVolumeClaim + pvc.Namespace = restoreNamespace + } volumeInfo.PersistentVolumeClaim = bkpvInfo.PersistentVolumeClaim volumeInfo.PersistentVolumeClaimUID = bkpvInfo.PersistentVolumeClaimUID volumeInfo.SourceNamespace = bkpvInfo.Namespace @@ -726,22 +719,21 @@ func (k *kdmp) StartRestore( // create VolumeBackup CR // Adding required label for debugging labels := make(map[string]string) - labels[applicationRestoreCRNameKey] = getValidLabel(restore.Name) - labels[applicationRestoreCRUIDKey] = getValidLabel(string(restore.UID)) - labels[pvcNameKey] = getValidLabel(bkpvInfo.PersistentVolumeClaim) - labels[pvcUIDKey] = getValidLabel(bkpvInfo.PersistentVolumeClaimUID) + labels[applicationRestoreCRNameKey] = utils.GetValidLabel(restore.Name) + labels[applicationRestoreCRUIDKey] = utils.GetValidLabel(string(restore.UID)) + labels[pvcNameKey] = utils.GetValidLabel(bkpvInfo.PersistentVolumeClaim) + labels[pvcUIDKey] = utils.GetValidLabel(bkpvInfo.PersistentVolumeClaimUID) // If restorefrom px-backup, update the restore object details in the label - if val, ok := restore.Annotations[pxbackupAnnotationCreateByKey]; ok { - if val == pxbackupAnnotationCreateByValue { - labels[restoreObjectNameKey] = getValidLabel(restore.Annotations[pxbackupObjectNameKey]) - labels[restoreObjectUIDKey] = getValidLabel(restore.Annotations[pxbackupObjectUIDKey]) + if val, ok := restore.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[restoreObjectNameKey] = utils.GetValidLabel(restore.Annotations[utils.PxbackupObjectNameKey]) + labels[restoreObjectUIDKey] = utils.GetValidLabel(restore.Annotations[utils.PxbackupObjectUIDKey]) } } - volBackup := &kdmpapi.VolumeBackup{} volBackup.Labels = labels volBackup.Annotations = make(map[string]string) - volBackup.Annotations[skipResourceAnnotation] = "true" + volBackup.Annotations[utils.SkipResourceAnnotation] = "true" volBackup.Name = getGenericCRName(prefixRestore, string(restore.UID), bkpvInfo.PersistentVolumeClaimUID, restoreNamespace) volBackup.Namespace = restoreNamespace volBackup.Spec.BackupLocation = kdmpapi.DataExportObjectReference{ @@ -757,8 +749,6 @@ func (k *kdmp) StartRestore( return nil, err } - pvc.Namespace = restoreNamespace - backup, err := storkops.Instance().GetApplicationBackup(restore.Spec.BackupName, restore.Namespace) if err != nil { return nil, fmt.Errorf("unable to get applicationbackup cr %s/%s: %v", restore.Namespace, restore.Spec.BackupName, err) @@ -780,8 +770,8 @@ func (k *kdmp) StartRestore( } dataExport.Spec.TriggeredFromNs = storkPodNs dataExport.Annotations = make(map[string]string) - dataExport.Annotations[skipResourceAnnotation] = "true" - dataExport.Annotations[backupObjectUIDKey] = backupUID + dataExport.Annotations[utils.SkipResourceAnnotation] = "true" + dataExport.Annotations[utils.BackupObjectUIDKey] = backupUID dataExport.Annotations[pvcUIDKey] = bkpvInfo.PersistentVolumeClaimUID dataExport.Name = getGenericCRName(prefixRestore, string(restore.UID), bkpvInfo.PersistentVolumeClaimUID, restoreNamespace) dataExport.Namespace = restoreNamespace @@ -804,6 +794,7 @@ func (k *kdmp) StartRestore( Namespace: restoreNamespace, APIVersion: "v1", } + logrus.Tracef("%s de cr name [%v/%v]", funct, dataExport.Namespace, dataExport.Name) if _, err := kdmpShedOps.Instance().CreateDataExport(dataExport); err != nil { logrus.Errorf("failed to create DataExport CR: %v", err) return volumeInfos, err @@ -863,7 +854,6 @@ func (k *kdmp) GetRestoreStatus(restore *storkapi.ApplicationRestore) ([]*storka volumeInfos = append(volumeInfos, vInfo) continue } - if dataExport.Status.TransferID == "" { vInfo.Status = storkapi.ApplicationRestoreStatusInitial vInfo.Reason = "Volume restore not started yet" @@ -950,7 +940,7 @@ func (k *kdmp) CleanupBackupResources(backup *storkapi.ApplicationBackup) error if vInfo.DriverName != storkvolume.KDMPDriverName { continue } - crName := getGenericCRName(prefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) + crName := getGenericCRName(utils.PrefixBackup, string(backup.UID), vInfo.PersistentVolumeClaimUID, vInfo.Namespace) logrus.Infof("deleting data export CR: %s%s", vInfo.Namespace, crName) de, err := kdmpShedOps.Instance().GetDataExport(crName, vInfo.Namespace) if err != nil && !k8serror.IsNotFound(err) { @@ -1028,27 +1018,6 @@ func init() { } } -// getValidLabel - will validate the label to make sure the length is less 63 and contains valid label format. -// If the length is greater then 63, it will truncate to 63 character. -func getValidLabel(labelVal string) string { - if len(labelVal) > validation.LabelValueMaxLength { - labelVal = truncate.Truncate(labelVal, validation.LabelValueMaxLength, "", truncate.PositionEnd) - // make sure the truncated value does not end with the hyphen. - labelVal = strings.Trim(labelVal, "-") - // make sure the truncated value does not end with the dot. - labelVal = strings.Trim(labelVal, ".") - } - return labelVal -} - -// getShortUID returns the first part of the UID -func getShortUID(uid string) string { - if len(uid) < 8 { - return "" - } - return uid[0:7] -} - // getVolumeSnapshotClassFromBackupVolumeInfo returns the volumesnapshotclass if it is present func getVolumeSnapshotClassFromBackupVolumeInfo(bkvpInfo *storkapi.ApplicationBackupVolumeInfo) string { var vsClass string diff --git a/drivers/volume/linstor/linstor.go b/drivers/volume/linstor/linstor.go index 52ed06cf59..af84182f52 100644 --- a/drivers/volume/linstor/linstor.go +++ b/drivers/volume/linstor/linstor.go @@ -328,7 +328,7 @@ func (l *linstor) GetVolumeClaimTemplates(templates []v1.PersistentVolumeClaim) return linstorTemplates, nil } -func (l *linstor) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { +func (l *linstor) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { return l.OwnsPVC(coreOps, pvc) } @@ -395,6 +395,7 @@ func (l *linstor) Stop() error { func (l *linstor) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil diff --git a/drivers/volume/mock/mock.go b/drivers/volume/mock/mock.go index 7a75360280..ed6252a43b 100644 --- a/drivers/volume/mock/mock.go +++ b/drivers/volume/mock/mock.go @@ -275,7 +275,7 @@ func (m Driver) GetPodVolumes(podSpec *v1.PodSpec, namespace string, includePend } // OwnsPVCForBackup returns true because it owns all PVCs created by tests -func (m *Driver) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { +func (m *Driver) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { return m.OwnsPVC(coreOps, pvc) } @@ -309,6 +309,7 @@ func (m *Driver) GetSnapshotType(snap *snapv1.VolumeSnapshot) (string, error) { func (m *Driver) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { return pv, nil diff --git a/drivers/volume/portworx/portworx.go b/drivers/volume/portworx/portworx.go index 307e981cfe..c0217f75d7 100644 --- a/drivers/volume/portworx/portworx.go +++ b/drivers/volume/portworx/portworx.go @@ -165,17 +165,24 @@ const ( noToken = "notoken" // templatizedNamespace is the CSI templatized parameter for namespace templatizedNamespace = "${pvc.namespace}" - proxyEndpoint = "proxy_endpoint" - proxyPath = "proxy_nfs_exportpath" - pureBackendParam = "backend" - pureFileParam = "pure_file" - pureBlockParam = "pure_block" + // templatizedName is the CSI templatized parameter for name + templatizedName = "${pvc.name}" + proxyEndpoint = "proxy_endpoint" + proxyPath = "proxy_nfs_exportpath" + pureBackendParam = "backend" + pureFileParam = "pure_file" + pureBlockParam = "pure_block" statfsSOName = "px_statfs.so" statfsSODirInStork = "/" statfsSODirInVirtLauncher = "/etc" statfsConfigMapName = "px-statfs" statfsVolName = "px-statfs" + + nodePublishSecretName = "csi.storage.k8s.io/node-publish-secret-name" + controllerExpandSecretName = "csi.storage.k8s.io/controller-expand-secret-name" + nodePublishSecretNamespace = "csi.storage.k8s.io/node-publish-secret-namespace" + controllerExpandSecretNamespace = "csi.storage.k8s.io/controller-expand-secret-namespace" ) type cloudSnapStatus struct { @@ -375,9 +382,9 @@ func (p *portworx) initPortworxClients() error { return err } -// tokenGenerator generates authorization token for system.admin -// when shared secret is not configured authz token is empty string -// this let Openstorage API clients be bootstrapped with no authorization (by accepting empty token) +// tokenGenerator generates authorization token for system.admin +// when shared secret is not configured authz token is empty string +// this let Openstorage API clients be bootstrapped with no authorization (by accepting empty token) func (p *portworx) tokenGenerator() (string, error) { if len(p.jwtSharedSecret) == 0 { return "", nil @@ -669,7 +676,12 @@ func (p *portworx) GetClusterID() (string, error) { return cluster.Id, nil } -func (p *portworx) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool { +func (p *portworx) OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool { + // For portworx volume and backuplocation type is NFS, we will not own. + // It will default to kdmp + if blType == storkapi.BackupLocationNFS { + return false + } return p.IsSupportedPVC(coreOps, pvc, true) } @@ -1966,9 +1978,31 @@ func (p *portworx) DescribeSnapshot(snapshotData *crdv1.VolumeSnapshotData) (*[] return &snapConditions, true, err } -// TODO: Implement FindSnapshot +// FindSnapshot return snapshotdata source for created portworx snapshot +// Note: we wait for underlying driver snapshot creation in SnapshotCreate() only, +// This will be called by snapshotter every time new snapshot is created func (p *portworx) FindSnapshot(tags *map[string]string) (*crdv1.VolumeSnapshotDataSource, *[]crdv1.VolumeSnapshotCondition, error) { - return nil, nil, &errors.ErrNotImplemented{} + if tags == nil || len(*tags) == 0 { + return nil, nil, fmt.Errorf("empty tags received for snapshots") + } + name := (*tags)[snapshotter.CloudSnapshotCreatedForVolumeSnapshotNameTag] + namespace := (*tags)[snapshotter.CloudSnapshotCreatedForVolumeSnapshotNamespaceTag] + logrus.Infof("Find snapshotdata for snapshot: %s/%s", namespace, name) + if name == "" || namespace == "" { + return nil, nil, fmt.Errorf("empty snapshot metadata %s/%s", namespace, name) + } + snap, err := k8sextops.Instance().GetSnapshot(name, namespace) + if err != nil { + return nil, nil, fmt.Errorf("unable to retrieve snapshot %s/%s, err: %v", namespace, name, err) + } + snapData, err := k8sextops.Instance().GetSnapshotData(snap.Spec.SnapshotDataName) + if err != nil { + // this means that snapshotdata object by external snapshotter is not created + // lets return nil so that reconciler call findsnapshot() again + return nil, nil, fmt.Errorf("unable to retrieve snapshotdata object %s/%s, err: %v", namespace, name, err) + } + logrus.Debugf("Found snapshotdata for snapshot: %s/%s", namespace, name) + return &snapData.Spec.VolumeSnapshotDataSource, &snap.Status.Conditions, nil } func (p *portworx) GetSnapshotType(snap *crdv1.VolumeSnapshot) (string, error) { @@ -2421,7 +2455,7 @@ func (p *portworx) StartMigration(migration *storkapi.Migration) ([]*storkapi.Mi return nil, fmt.Errorf("error getting list of volumes to migrate: %v", err) } for _, pvc := range pvcList.Items { - if !p.OwnsPVC(core.Instance(), &pvc) { + if !p.IsSupportedPVC(core.Instance(), &pvc, true) { continue } if resourcecollector.SkipResource(pvc.Annotations) { @@ -2570,7 +2604,77 @@ func (p *portworx) CancelMigration(migration *storkapi.Migration) error { func (p *portworx) UpdateMigratedPersistentVolumeSpec( pv *v1.PersistentVolume, vInfo *storkapi.ApplicationRestoreVolumeInfo, + namespaceMapping map[string]string, ) (*v1.PersistentVolume, error) { + // Get the pv storageclass and get the provision detail and decide on csi section. + if len(pv.Spec.StorageClassName) != 0 { + sc, err := storage.Instance().GetStorageClass(pv.Spec.StorageClassName) + if err != nil { + logrus.Warnf("failed in getting the storage class [%v]: %v", pv.Spec.StorageClassName, err) + } + if sc != nil { + if isCsiProvisioner(sc.Provisioner) { + // add csi section in the pv spec + if pv.Spec.CSI == nil { + pv.Spec.CSI = &v1.CSIPersistentVolumeSource{} + } + // get the destinationNamespace + var dstNamespace string + var exists bool + if dstNamespace, exists = namespaceMapping[vInfo.SourceNamespace]; !exists { + dstNamespace = vInfo.SourceNamespace + } + // Update the controller expand secret + if val, ok := sc.Parameters[controllerExpandSecretName]; ok { + if pv.Spec.CSI.ControllerExpandSecretRef == nil { + pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + } + if val == templatizedName { + pv.Spec.CSI.ControllerExpandSecretRef.Name = vInfo.PersistentVolumeClaim + } else { + pv.Spec.CSI.ControllerExpandSecretRef.Name = val + } + } + if val, ok := sc.Parameters[controllerExpandSecretNamespace]; ok { + if pv.Spec.CSI.ControllerExpandSecretRef == nil { + pv.Spec.CSI.ControllerExpandSecretRef = &v1.SecretReference{} + } + if val == templatizedNamespace { + pv.Spec.CSI.ControllerExpandSecretRef.Namespace = dstNamespace + } else { + pv.Spec.CSI.ControllerExpandSecretRef.Namespace = val + } + } + + // Update the node publish secret + if val, ok := sc.Parameters[nodePublishSecretName]; ok { + if pv.Spec.CSI.NodePublishSecretRef == nil { + pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} + } + if val == templatizedName { + pv.Spec.CSI.NodePublishSecretRef.Name = vInfo.PersistentVolumeClaim + } else { + pv.Spec.CSI.NodePublishSecretRef.Name = val + } + } + if val, ok := sc.Parameters[nodePublishSecretNamespace]; ok { + if pv.Spec.CSI.NodePublishSecretRef == nil { + pv.Spec.CSI.NodePublishSecretRef = &v1.SecretReference{} + } + if val == templatizedNamespace { + pv.Spec.CSI.NodePublishSecretRef.Namespace = dstNamespace + } else { + pv.Spec.CSI.NodePublishSecretRef.Namespace = val + } + } + + // Update driver (provisioner) name + pv.Spec.CSI.Driver = sc.Provisioner + // In the case of csi, will set pv.Spec.portworxVolume to nil as we will have csi section now. + pv.Spec.PortworxVolume = nil + } + } + } if pv.Spec.CSI != nil { pv.Spec.CSI.VolumeHandle = pv.Name diff --git a/drivers/volume/volume.go b/drivers/volume/volume.go index 916194901e..9f6eac2111 100644 --- a/drivers/volume/volume.go +++ b/drivers/volume/volume.go @@ -120,7 +120,7 @@ type Driver interface { // OwnsPVCForBackup returns true if the PVC is owned by the driver // Since we have extra check need to done for backup case, added seperate version of API. - OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string) bool + OwnsPVCForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, blType storkapi.BackupLocationType) bool // OwnsPV returns true if the PV is owned by the driver OwnsPV(pvc *v1.PersistentVolume) bool @@ -192,7 +192,7 @@ type MigratePluginInterface interface { CancelMigration(*storkapi.Migration) error // Update the PVC spec to point to the migrated volume on the destination // cluster - UpdateMigratedPersistentVolumeSpec(*v1.PersistentVolume, *storkapi.ApplicationRestoreVolumeInfo) (*v1.PersistentVolume, error) + UpdateMigratedPersistentVolumeSpec(*v1.PersistentVolume, *storkapi.ApplicationRestoreVolumeInfo, map[string]string) (*v1.PersistentVolume, error) } // ClusterDomainsPluginInterface Interface to manage cluster domains @@ -342,13 +342,14 @@ func GetPVCDriverForBackup(coreOps core.Ops, pvc *v1.PersistentVolumeClaim, cmBackupType string, crBackupType string, + blType storkapi.BackupLocationType, ) (string, error) { for _, driverName := range orderedListOfDrivers { d, ok := volDrivers[driverName] if !ok { continue } - if d.OwnsPVCForBackup(coreOps, pvc, cmBackupType, crBackupType) { + if d.OwnsPVCForBackup(coreOps, pvc, cmBackupType, crBackupType, blType) { return driverName, nil } } diff --git a/go.mod b/go.mod index 4a4b6ad47e..8ce6af6402 100644 --- a/go.mod +++ b/go.mod @@ -27,9 +27,9 @@ require ( github.com/openshift/api v0.0.0-20210105115604-44119421ec6b github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47 github.com/pborman/uuid v1.2.0 - github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 - github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652 - github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 + github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80 + github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 + github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo v0.20.4-rc1.0.20210325154352-eb81b0cdd145 github.com/prometheus/client_golang v1.11.0 github.com/sirupsen/logrus v1.8.1 @@ -42,21 +42,20 @@ require ( gocloud.dev v0.20.0 golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a google.golang.org/api v0.30.0 - google.golang.org/grpc v1.43.0 - google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 // indirect + google.golang.org/grpc v1.48.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.24.0 - k8s.io/apiextensions-apiserver v0.21.4 + k8s.io/apiextensions-apiserver v0.21.5 k8s.io/apimachinery v0.24.3 - k8s.io/apiserver v0.21.4 - k8s.io/cli-runtime v0.21.4 + k8s.io/apiserver v0.21.5 + k8s.io/cli-runtime v0.21.5 k8s.io/client-go v12.0.0+incompatible - k8s.io/code-generator v0.21.4 - k8s.io/component-helpers v0.21.4 + k8s.io/code-generator v0.22.1 + k8s.io/component-helpers v0.24.0 k8s.io/kube-scheduler v0.0.0 - k8s.io/kubectl v0.21.4 - k8s.io/kubernetes v1.21.4 - sigs.k8s.io/controller-runtime v0.9.0 + k8s.io/kubectl v0.21.5 + k8s.io/kubernetes v1.21.5 + sigs.k8s.io/controller-runtime v0.9.7 sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 ) @@ -72,41 +71,41 @@ replace ( //github.com/heptio/ark => github.com/heptio/ark v1.0.0 github.com/heptio/velero => github.com/heptio/velero v1.0.0 github.com/kubernetes-csi/external-snapshotter/client/v4 => github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0 - github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 - github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 + github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 + github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc10 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 - github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca + github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 - helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.0 + helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 - k8s.io/api => k8s.io/api v0.21.4 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 - k8s.io/apiserver => k8s.io/apiserver v0.21.4 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.4 - k8s.io/client-go => k8s.io/client-go v0.21.4 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.4 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.4 - k8s.io/code-generator => k8s.io/code-generator v0.21.4 - k8s.io/component-base => k8s.io/component-base v0.21.4 - k8s.io/component-helpers => k8s.io/component-helpers v0.21.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.21.4 - k8s.io/cri-api => k8s.io/cri-api v0.21.4 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.4 - k8s.io/klog/v2 => k8s.io/klog/v2 v2.4.0 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.4 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.4 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.4 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.4 - k8s.io/kubectl => k8s.io/kubectl v0.21.4 - k8s.io/kubelet => k8s.io/kubelet v0.21.4 - k8s.io/kubernetes => k8s.io/kubernetes v1.21.4 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.4 - k8s.io/metrics => k8s.io/metrics v0.21.4 - k8s.io/mount-utils => k8s.io/mount-utils v0.21.4 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.4 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.0 + k8s.io/api => k8s.io/api v0.21.5 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.5 + k8s.io/apimachinery => k8s.io/apimachinery v0.21.5 + k8s.io/apiserver => k8s.io/apiserver v0.21.5 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.5 + k8s.io/client-go => k8s.io/client-go v0.21.5 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.5 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.5 + k8s.io/code-generator => k8s.io/code-generator v0.21.5 + k8s.io/component-base => k8s.io/component-base v0.21.5 + k8s.io/component-helpers => k8s.io/component-helpers v0.24.0 + k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 + k8s.io/cri-api => k8s.io/cri-api v0.21.5 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.5 + k8s.io/klog/v2 => k8s.io/klog/v2 v2.8.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.5 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.5 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.5 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.5 + k8s.io/kubectl => k8s.io/kubectl v0.21.5 + k8s.io/kubelet => k8s.io/kubelet v0.21.5 + k8s.io/kubernetes => k8s.io/kubernetes v1.21.5 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.5 + k8s.io/metrics => k8s.io/metrics v0.21.5 + k8s.io/mount-utils => k8s.io/mount-utils v0.21.5 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.5 + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.6 sigs.k8s.io/sig-storage-lib-external-provisioner/v6 => sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 ) diff --git a/go.sum b/go.sum index 83fda5f772..fc2b6f26ec 100644 --- a/go.sum +++ b/go.sum @@ -266,6 +266,8 @@ github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZw github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/banzaicloud/k8s-objectmatcher v1.5.1/go.mod h1:9MWY5HsM/OaTmoTirczhlO8UALbH722WgdpaaR7Y8OE= github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -275,6 +277,7 @@ github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3 github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -317,7 +320,7 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= @@ -335,6 +338,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= @@ -398,7 +402,6 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pq github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -481,6 +484,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= @@ -1104,8 +1108,8 @@ github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114/go. github.com/libopenstorage/cloudops v0.0.0-20190815012442-6e0d676b6c3e/go.mod h1:quSDXGC3Fhc+pBwMRIi1Gk+kaSfBDZo5rRsftapTzGE= github.com/libopenstorage/cloudops v0.0.0-20200604165016-9cc0977d745e/go.mod h1:5Qie78eVLLXqLkLCq1+0HyJzjpdRCHyeg9LWlU0WPfU= github.com/libopenstorage/cloudops v0.0.0-20220420143942-8bdd341e5b41/go.mod h1:zigCEUGrJZbK/1FN6+SHMuMjS6vjeSKxuo0G4Ars4Cg= -github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 h1:mHp7bfGyHwG4P8dhHEMJ775KLmcjv3tcA2Uc+5nGpXg= -github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7/go.mod h1:nffpoeodwwp+wwngmBGbLBCd7TZ9GxHLtxKoaLRW6K4= +github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 h1:q21CLGSi9DhNBBuJuitquA/T6FwLV3KNZxaJpxQbOLc= +github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10/go.mod h1:nffpoeodwwp+wwngmBGbLBCd7TZ9GxHLtxKoaLRW6K4= github.com/libopenstorage/gossip v0.0.0-20190507031959-c26073a01952/go.mod h1:TjXt2Iz2bTkpfc4Q6xN0ttiNipTVwEEYoZSMZHlfPek= github.com/libopenstorage/gossip v0.0.0-20200808224301-d5287c7c8b24/go.mod h1:TjXt2Iz2bTkpfc4Q6xN0ttiNipTVwEEYoZSMZHlfPek= github.com/libopenstorage/gossip v0.0.0-20220309192431-44c895e0923e h1:4l9N2Sw8VGGUqe50yC2BnTFMRJuHJGpIGZcCUZ2S6gg= @@ -1131,6 +1135,9 @@ github.com/libopenstorage/stork v1.4.1-0.20211103064004-088d8fdeaa37/go.mod h1:I github.com/libopenstorage/stork v1.4.1-0.20211113171730-e02f28e240e9/go.mod h1:NTt7xK9DqWpXLEBJI4WEz/XTUG3EkW0zcqyOMO5Xp2w= github.com/libopenstorage/stork v1.4.1-0.20220323180113-0ea773109d05/go.mod h1:h+tscSChqPpry+lUHJYFqC+Gk0JY/qi6eCkUJYBo0wQ= github.com/libopenstorage/stork v1.4.1-0.20220414104250-3c18fd21ed95/go.mod h1:yE94X0xBFSBQ9LvvJ/zppc4+XeiCAXtsHfYHm15dlcA= +github.com/libopenstorage/stork v1.4.1-0.20220902043617-635e642468d0/go.mod h1:oQ0lteROzRCxHMvESCSyOiY/9oqgO3Qrvfs5LI/jVCA= +github.com/libopenstorage/stork v1.4.1-0.20220902111346-9dbf76d2db2c/go.mod h1:KNG/pkhMCdKXXFr0nKtYybWCx2ggLCoi+I7Onylwl64= +github.com/libopenstorage/stork v1.4.1-0.20221103082056-65abc8cc4e80/go.mod h1:yX+IlCrUsZekC6zxL6zHE7sBPKIudubHB3EcImzeRbI= github.com/libopenstorage/systemutils v0.0.0-20160208220149-44ac83be3ce1 h1:5vqfYYWm4b+lbkMtvvWtWBiqLbmLN6dNvWaa7wVsz/Q= github.com/libopenstorage/systemutils v0.0.0-20160208220149-44ac83be3ce1/go.mod h1:xwNGC7xiz/BQ/wbMkvHujL8Gjgseg+x41xMek7sKRRQ= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= @@ -1311,7 +1318,6 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= @@ -1327,7 +1333,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= @@ -1343,7 +1349,7 @@ github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1351,6 +1357,7 @@ github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.m github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= @@ -1414,18 +1421,23 @@ github.com/portworx/kdmp v0.4.1-0.20211103043446-cc5455f203d0/go.mod h1:BZ9ApnLF github.com/portworx/kdmp v0.4.1-0.20211108115338-ba2bebf06ffb/go.mod h1:cbaFBCLFTtF0taXtGR2zGD89k0gl7fNl+n4Vi9p4gmI= github.com/portworx/kdmp v0.4.1-0.20220309093511-f7b925b9e53e/go.mod h1:RAXbeaO/JmwQPRJCDdOoY/UsmGPY/awWsL4FbDOqAVk= github.com/portworx/kdmp v0.4.1-0.20220414053457-962507678379/go.mod h1:EAVroITfYd50a0vi/ScAILl6h5RYJteuO/pg1y3vNNw= -github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 h1:KaRMV5hWbl7raiTFo20AZaXSIBBKCadzBmrXfwU+Id0= github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149/go.mod h1:nb5AupP/63ByyqAYfZ+E32LDEnP0PjgH6w+yKXxWIgE= +github.com/portworx/kdmp v0.4.1-0.20220902105026-dc14791e1508/go.mod h1:sTO9LkPkExEVE6BqowIzkrQsyBtGdaC4Vh1AcKQ4xZA= +github.com/portworx/kdmp v0.4.1-0.20220905153748-e0bb69e59f38/go.mod h1:NI2UgLITtggRvvaRA7lE4+Np4htDp+06Jf1LWksKyGA= +github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560 h1:VfFUh5ZwZ+4xhDEuG2Rh/p9Rm+9mXicRuVd4j7nNIEw= +github.com/portworx/kdmp v0.4.1-0.20230523115608-d3dc39fab560/go.mod h1:6XtJRBuPdSrnKuPD2vKLsVHbkKpF/5M/N2mAKP5hnqw= +github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80 h1:XxGZ/txfyifpEUXb5ecMakrcZfYjzPHHHqS00VdRdww= +github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80/go.mod h1:6XtJRBuPdSrnKuPD2vKLsVHbkKpF/5M/N2mAKP5hnqw= github.com/portworx/kvdb v0.0.0-20190105022415-cccaa09abfc9/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200723230726-2734b7f40194/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467 h1:jkqzdbOnejgSN5HG/FLt4enNrozWT/K+nlmaRm3P1II= github.com/portworx/kvdb v0.0.0-20200929023115-b312c7519467/go.mod h1:Q8YyrNDvPp3DVF96BDcQuaC7fAYUCuUX+l58S7OnD2M= github.com/portworx/px-backup-api v1.2.2-0.20210917042806-f2b0725444af/go.mod h1:3+gfGSSmuF1pO9qkOuKiLWpiTKDXpijSg4VNgluGUX0= -github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652 h1:Kv8k3Zw4hVHcw1zGsWgDlIaG/gUUXLu47PWIz6CCF2g= -github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652/go.mod h1:g3pw2lI2AjqAixUCRhaBdKTY98znsCPR7NGRrlpimVU= +github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 h1:VNBTmIPjJRZ2QP64zdsrif3ELDHiMzoyNNX74VNHgZ8= +github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987/go.mod h1:g3pw2lI2AjqAixUCRhaBdKTY98znsCPR7NGRrlpimVU= github.com/portworx/pxc v0.33.0/go.mod h1:Tl7hf4K2CDr0XtxzM08sr9H/KsMhscjf9ydb+MnT0U4= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca h1:jrjwiQdqgDRsQZuiRDaWsbvx/z5t1icQPf7dgJOQUKE= -github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca/go.mod h1:0IQvado0rnmbRMORaCqCDrrzjBrX5sU+Sz2+vQwEsjM= +github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 h1:fPdQkWEXZt+kE4o/wm6KlhwhYNDhJJpoRakcI4LcE48= +github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8/go.mod h1:8XBwmcbDuhW0TWFKCaHH4oS5xsfGFU5miSyqb0fvl3U= github.com/portworx/talisman v0.0.0-20210302012732-8af4564777f7/go.mod h1:e8a6uFpSbOlRpZQlW9aXYogC+GWAo065G0RL9hDkD4Q= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 h1:P4Lo6jDUUKglz7rkqlK8Hg4gLXqIIrgQaEeWxcXrV8U= github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1/go.mod h1:I2wJjwLvCub+L1eNHWyHIIe6SrCreMVgwym4dCsR1WE= @@ -1730,8 +1742,8 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= gocloud.dev v0.20.0 h1:mbEKMfnyPV7W1Rj35R1xXfjszs9dXkwSOq2KoFr25g8= gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -2044,8 +2056,9 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2262,8 +2275,9 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 h1:YfILpEPJFqQb3n/IN6k0VAtsoEdNWB246xetOdhu7Kw= google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2337,15 +2351,16 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -helm.sh/helm/v3 v3.6.0 h1:/9IMxJ2lXJHbvTMHcW1AO71lXQHqDC+3bcpGp7yCsb8= -helm.sh/helm/v3 v3.6.0/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= +helm.sh/helm/v3 v3.6.1 h1:TQ6q4pAatXr7qh2fbLcb0oNd0I3J7kv26oo5cExKTtc= +helm.sh/helm/v3 v3.6.1/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2357,30 +2372,30 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.2/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc= -k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= -k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU= -k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk= -k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw= -k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= -k8s.io/apiserver v0.21.4 h1:egJgdhW0ueq5iJSY0c5YedPvRM2Ft/D3dcXOgwvs9jY= -k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= -k8s.io/cli-runtime v0.21.4 h1:kvOzx6dKg+9wRuHTzSqo8tfTV6ixZCkmi+ag54s7mn8= -k8s.io/cli-runtime v0.21.4/go.mod h1:eRbLHYkdVWzvG87yrkgGd8CqX6/+fAG9DTdAqTXmlRY= -k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc= -k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew= -k8s.io/cloud-provider v0.21.4 h1:BPGDdyz49/ohnK3QMDWBtm39QnDm+bXIP5L7mj8AHUQ= -k8s.io/cloud-provider v0.21.4/go.mod h1:9ogsWpFKWcYC0sGPu0YZ3FMLZIlaGBSFDCNXxhlCF1o= -k8s.io/cluster-bootstrap v0.21.4/go.mod h1:GtXGuiEtdV4XQJcscR6qQCm/vtQWkhUi3qnl9KL9jzw= -k8s.io/code-generator v0.21.4 h1:vO8jVuEGV4UF+/2s/88Qg05MokE/1QUFi/Q2YDgz++A= -k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= -k8s.io/component-base v0.21.4 h1:Bc0AttSyhJFVXEIHz+VX+D11j/5z7SPPhl6whiXaRzs= -k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= -k8s.io/component-helpers v0.21.4 h1:Q6L3sQ+L5uaaUcsJkhlzU5UchcIYBZ56Y2Bq5k4qOtk= -k8s.io/component-helpers v0.21.4/go.mod h1:/5TBNWmxaAymZweO1JWv3Pt5rcYJV1LbWWY0x1rDdVU= -k8s.io/controller-manager v0.21.4/go.mod h1:a/iL7W19zkyirHDaupk9cyC11nejVznGwZI6I8tbyQY= -k8s.io/cri-api v0.21.4/go.mod h1:ukzeKnOkrG9/+ghKZA57WeZbQfRtqlGLF5GcF3RtHZ8= -k8s.io/csi-translation-lib v0.21.4/go.mod h1:WtxJW4/3XGhllbRCO4SRkL/MyLhjaRsL6Ds+q0pDHTg= +k8s.io/api v0.21.5 h1:9zp3SslPRB+rqxhGKqqTo6VsN3HX0Ype1nWV6UQQ+Sk= +k8s.io/api v0.21.5/go.mod h1:Un8C5Hemo2r3MfPOjZvwQQ9KkBbiTBUCGrjlivo9uJ0= +k8s.io/apiextensions-apiserver v0.21.5 h1:sCUpiB47ba59J57ZsqOvoxD3voc2nnR+sylAzHIwI8w= +k8s.io/apiextensions-apiserver v0.21.5/go.mod h1:iiakfVazpXLW8OkF2sH/p9XGgfE7XFSQuZFJ10QlXB4= +k8s.io/apimachinery v0.21.5 h1:56bnsHcUNboSCbD779GGi4Lh5kHTDFUoDrnHbhLTiaw= +k8s.io/apimachinery v0.21.5/go.mod h1:3PfBV+4PPXNs0aueD+7fHcGyhdkFFYqXeshQtsKCi+4= +k8s.io/apiserver v0.21.5 h1:iEPvJ2uwmyb7C4eScOj1fgPKCyCUGgMQU5+UREE87vE= +k8s.io/apiserver v0.21.5/go.mod h1:0bWmrAx3dxUUFSEw71U91Si5obhIvBcAmf8oVZUO58E= +k8s.io/cli-runtime v0.21.5 h1:ZpPmrRsHvzdMzXrcr1/ZSBHLKrhS1aHyMr2hGJNlNpI= +k8s.io/cli-runtime v0.21.5/go.mod h1:TKlcXsRVImtcPDGEe72pyZtD9UgBJNupIf3hmsIeekE= +k8s.io/client-go v0.21.5 h1:zkVidiWVgciPKYqWpMFMjCUF+4rRXcfkKoyQS1Ue21k= +k8s.io/client-go v0.21.5/go.mod h1:EUornVlr3rBrPKXUoMPNggJdEQmvFNMpYO3Kb6432kw= +k8s.io/cloud-provider v0.21.5 h1:wLWaGA3VrHNqP8J3eimmxDdmCfLnNl0JcpRRYhKsrrU= +k8s.io/cloud-provider v0.21.5/go.mod h1:8HT2WVbR6Xr6cc/B1+wnra/kgffFtUmPjsmUu9VMyv4= +k8s.io/cluster-bootstrap v0.21.5/go.mod h1:X6MX+aOJx6NzNlEe0iUIIcFKG06qC/fqHAyzAfAgaYo= +k8s.io/code-generator v0.21.5 h1:7X6dJG4hzKFHChYpP02iF0XrXhenqQHc76QoKYzDZfI= +k8s.io/code-generator v0.21.5/go.mod h1:0K1k6o2ef8JD/j8LF3ZuqWLGFMHvO5psNzLLmxf7ZVE= +k8s.io/component-base v0.21.5 h1:icFqcFDrO9S+FQpGohzVm6qce9vlo131K0r3NhElxiQ= +k8s.io/component-base v0.21.5/go.mod h1:UyRaqQfPkBL/haEFaMWgVQvtom5TqAT+jqlFGlh6LuU= +k8s.io/component-helpers v0.24.0 h1:hZIHGfdd55thhqd9oxjDTw68OAPauDMJ+8hC69aNw1I= +k8s.io/component-helpers v0.24.0/go.mod h1:Q2SlLm4h6g6lPTC9GMMfzdywfLSvJT2f1hOnnjaWD8c= +k8s.io/controller-manager v0.21.5/go.mod h1:65guJmOoIEklTPwwYOmiHOsbEoAnQ7xKSZ9v+RdJC6g= +k8s.io/cri-api v0.21.5/go.mod h1:hYY+ZI/gXC3XMHIvuzRzDtb5BCEyoAOf44Z4a8GxoTk= +k8s.io/csi-translation-lib v0.21.5/go.mod h1:3ypbZqeM13aqwC1CpovssPkMhLgITWumH3n9PkdhDEA= k8s.io/gengo v0.0.0-20190306031000-7a1b7fb0289f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= @@ -2390,27 +2405,27 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-aggregator v0.21.4/go.mod h1:SykygeaVEQfqYH5IV8ve7Ia3dEGOGpGrdfD5NBi5yYI= -k8s.io/kube-controller-manager v0.21.4/go.mod h1:/wPS1gIX++/WjsIiimESnkpMqsjiIAMOpjVwjqLo7ng= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/kube-aggregator v0.21.5/go.mod h1:Zs74KHeA5RYNQw88cjfMtp46VCTIgQIX56FcxDE5NFo= +k8s.io/kube-controller-manager v0.21.5/go.mod h1:adzsSLzeO3vkaxOTdbvHIe5WJZ7naB+s6080uCToGs0= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kube-proxy v0.21.4/go.mod h1:eUxSO/0Z/0JjKYz/aCZdwGea7lazumkTFrqS+OWcVNI= -k8s.io/kube-scheduler v0.21.4 h1:oUVUCM+v6rum1i5vn5C3ZrqPNkp7exWiy7/Tfzbs9ZQ= -k8s.io/kube-scheduler v0.21.4/go.mod h1:zFiUfgeM/dJajfHYG8Bx5fSrNAcLxMHFgN7ARdSJXqQ= -k8s.io/kubectl v0.21.4 h1:ODXpSKpi5C6XnJmGg96E/36KAry513v4Jr9Efg3ePJI= -k8s.io/kubectl v0.21.4/go.mod h1:rRYB5HeScoGQKxZDQmus17pTSVIuqfm0D31ApET/qSM= -k8s.io/kubelet v0.21.4/go.mod h1:kgXUz8upYNIngMSEZP1rpg2kp4gfUrsB7ir5u9Cm4HE= -k8s.io/kubernetes v1.21.4 h1:uKnn+MDBG4Bsed/iD3L6gMkq/szAnMqeHuSjkc3WOzQ= -k8s.io/kubernetes v1.21.4/go.mod h1:yNRsD2sfx76jpLKTgr0lJdVnILFWRo7b+HCo94tD48c= -k8s.io/legacy-cloud-providers v0.21.4/go.mod h1:WzvDvkWfD7lKQSaSqqaYsoY3VQeAjhXYN2telpMx8co= -k8s.io/metrics v0.21.4/go.mod h1:uhWoVuVumUMSeCa1B1p2tm4Y4XuZIg0n24QEtB54wuA= -k8s.io/mount-utils v0.21.4 h1:T24Y4FJ9IRkXgA+UkQHr+F+f/nm7sqdkdmdSxTtF+lw= -k8s.io/mount-utils v0.21.4/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= -k8s.io/sample-apiserver v0.21.4/go.mod h1:rpVLxky91DoN2OehmyZf/IE+sgop/BBoZl78VJrrs0I= +k8s.io/kube-proxy v0.21.5/go.mod h1:brL44h883BThxzRIcIGUiOCJpTXq5Bbq/InSMYAsdB4= +k8s.io/kube-scheduler v0.21.5 h1:yjm5Z3pIRwORBcR7HovteRhhC58+I/gCc07wO/HMYUI= +k8s.io/kube-scheduler v0.21.5/go.mod h1:7hWWLzvl0yEr+gm2Kfvt1wikhXwQb2BNylvOwzSlSMM= +k8s.io/kubectl v0.21.5 h1:Ov5ivI1SanAoVPI/n6/Sik+MQTaeGp7U2S02loXBB/s= +k8s.io/kubectl v0.21.5/go.mod h1:1dDgqGZdQWH6IOLozcxQ3Tyvc5CnEL1Int6St4XEV8w= +k8s.io/kubelet v0.21.5/go.mod h1:yVKsH4usaXy40Z3cZ8jknE70obOF/4aFNB7bittEEZ0= +k8s.io/kubernetes v1.21.5 h1:PpXs+a5FdF5Nwy+9vPjs5svULcTH923QCOjzdLqZmyw= +k8s.io/kubernetes v1.21.5/go.mod h1:o8QsgtH5UB3z9BYhcUZt9S6zjcJ4vdFsj2ACinL44Ss= +k8s.io/legacy-cloud-providers v0.21.5/go.mod h1:VGdzalKK13Q8eJuhbrmPbuwyjc9vVaQ8T0asHpSJNBg= +k8s.io/metrics v0.21.5/go.mod h1:Ew+6obDfJiQVsi6J2NkoI5jNMio/CCPC5v3pLXH8vos= +k8s.io/mount-utils v0.21.5 h1:2aapn4dg0L/naSvr9vze7vIjW6nelq3hNHxb2nLselc= +k8s.io/mount-utils v0.21.5/go.mod h1:dwXbIPxKtTjrBEaX1aK/CMEf1KZ8GzMHpe3NEBfdFXI= +k8s.io/sample-apiserver v0.21.5/go.mod h1:XqwON+6Rv40cwSe+Sr6ihQEcMI1MCvin8sDFAPFVQHc= k8s.io/sample-controller v0.20.4/go.mod h1:PAxO4dMU0MA62CB6ZyHM2rng/7oMOBLyF4qrDVA0Tcc= k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/test-infra v0.0.0-20181019233642-2e10a0bbe9b3/go.mod h1:2NzXB13Ji0nqpyublHeiPC4FZwU0TknfvyaaNfl/BTA= @@ -2422,8 +2437,10 @@ k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= knative.dev/pkg v0.0.0-20191101194912-56c2594e4f11/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q= layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -2442,8 +2459,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyz sigs.k8s.io/cluster-api v0.2.11 h1:sUngHVvh/DyHhERR1fo7eH2N/xS5qfnK7pCtwrErs68= sigs.k8s.io/cluster-api v0.2.11/go.mod h1:BCw+Pqy1sc8mQ/3d2NZM/f5BApKFCMPsnGvKolvDcA0= sigs.k8s.io/container-object-storage-interface-spec v0.0.0-20220211001052-50e143052de8/go.mod h1:kafkL5l/lTUrZXhVi/9p1GzpEE/ts29BkWkL3Ao33WU= -sigs.k8s.io/controller-runtime v0.9.0 h1:ZIZ/dtpboPSbZYY7uUz2OzrkaBTOThx2yekLtpGB+zY= -sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.9.6 h1:EevVMlgUj4fC1NVM4+DB3iPkWkmGRNarA66neqv9Qew= +sigs.k8s.io/controller-runtime v0.9.6/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0 h1:mvSbjzrnOd+3AB/7jvz7UNdZs5fhYorhm2H0A2HcIVg= sigs.k8s.io/gcp-compute-persistent-disk-csi-driver v0.7.0/go.mod h1:aSyCjg9bNQQxY9hnnNo10vjhZsQTkLliruvRXp3N9B4= sigs.k8s.io/kustomize/api v0.8.8 h1:G2z6JPSSjtWWgMeWSoHdXqyftJNmMmyxXpwENGoOtGE= diff --git a/pkg/apis/stork/v1alpha1/backuplocation.go b/pkg/apis/stork/v1alpha1/backuplocation.go index 99e81d94af..06f0f6e6c4 100644 --- a/pkg/apis/stork/v1alpha1/backuplocation.go +++ b/pkg/apis/stork/v1alpha1/backuplocation.go @@ -41,6 +41,7 @@ type BackupLocationItem struct { S3Config *S3Config `json:"s3Config,omitempty"` AzureConfig *AzureConfig `json:"azureConfig,omitempty"` GoogleConfig *GoogleConfig `json:"googleConfig,omitempty"` + NfsConfig *NfsConfig `json:"nfsConfig,omitempty"` SecretConfig string `json:"secretConfig"` Sync bool `json:"sync"` RepositoryPassword string `json:"repositoryPassword"` @@ -73,6 +74,8 @@ const ( BackupLocationAzure BackupLocationType = "azure" // BackupLocationGoogle stores the backup in Google Cloud Storage BackupLocationGoogle BackupLocationType = "google" + // BackupLocationNFS stores the backup in NFS backed Storage + BackupLocationNFS BackupLocationType = "nfs" ) // ClusterType is the type of the cluster @@ -123,6 +126,12 @@ type GoogleConfig struct { AccountKey string `json:"accountKey"` } +type NfsConfig struct { + ServerAddr string `json:"serverAddr"` + SubPath string `json:"subPath"` + MountOption string `json:"mountOption"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // BackupLocationList is a list of ApplicationBackups @@ -154,6 +163,8 @@ func (bl *BackupLocation) UpdateFromSecret(client kubernetes.Interface) error { return bl.getMergedAzureConfig(client) case BackupLocationGoogle: return bl.getMergedGoogleConfig(client) + case BackupLocationNFS: + return bl.getMergedNfsConfig(client) default: return fmt.Errorf("Invalid BackupLocation type %v", bl.Location.Type) } @@ -176,6 +187,28 @@ func (bl *BackupLocation) UpdateFromClusterSecret(client kubernetes.Interface) e return nil } +func (bl *BackupLocation) getMergedNfsConfig(client kubernetes.Interface) error { + if bl.Location.NfsConfig == nil { + bl.Location.NfsConfig = &NfsConfig{} + } + if bl.Location.SecretConfig != "" { + secretConfig, err := client.CoreV1().Secrets(bl.Namespace).Get(context.TODO(), bl.Location.SecretConfig, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error getting secretConfig for backupLocation: %v", err) + } + if val, ok := secretConfig.Data["serverAddr"]; ok && val != nil { + bl.Location.NfsConfig.ServerAddr = strings.TrimSuffix(string(val), "\n") + } + if val, ok := secretConfig.Data["subPath"]; ok && val != nil { + bl.Location.NfsConfig.SubPath = strings.TrimSuffix(string(val), "\n") + } + if val, ok := secretConfig.Data["mountOption"]; ok && val != nil { + bl.Location.NfsConfig.MountOption = strings.TrimSuffix(string(val), "\n") + } + } + return nil +} + func (bl *BackupLocation) getMergedS3Config(client kubernetes.Interface) error { if bl.Location.S3Config == nil { bl.Location.S3Config = &S3Config{} diff --git a/pkg/apis/stork/v1alpha1/migration.go b/pkg/apis/stork/v1alpha1/migration.go index e3ec6cab72..3e6633d3e9 100644 --- a/pkg/apis/stork/v1alpha1/migration.go +++ b/pkg/apis/stork/v1alpha1/migration.go @@ -27,6 +27,7 @@ type MigrationSpec struct { PostExecRule string `json:"postExecRule"` IncludeOptionalResourceTypes []string `json:"includeOptionalResourceTypes"` SkipDeletedNamespaces *bool `json:"skipDeletedNamespaces"` + TransformSpecs []string `json:"transformSpecs"` } // MigrationStatus is the status of a migration operation @@ -49,6 +50,7 @@ type MigrationResourceInfo struct { meta.GroupVersionKind `json:",inline"` Status MigrationStatusType `json:"status"` Reason string `json:"reason"` + TransformedBy string `json:"transformedBy"` } // MigrationSummary provides a short summary on the migration diff --git a/pkg/apis/stork/v1alpha1/register.go b/pkg/apis/stork/v1alpha1/register.go index 408649670f..ffa01bad0a 100644 --- a/pkg/apis/stork/v1alpha1/register.go +++ b/pkg/apis/stork/v1alpha1/register.go @@ -66,6 +66,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ApplicationBackupScheduleList{}, &DataExport{}, &DataExportList{}, + &ResourceTransformation{}, + &ResourceTransformationList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/pkg/apis/stork/v1alpha1/resourcetransformation.go b/pkg/apis/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..0dd7c06ec0 --- /dev/null +++ b/pkg/apis/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,146 @@ +package v1alpha1 + +import ( + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ResourceTransformationResourceName is name for "ResourceTransformation" resource + ResourceTransformationResourceName = "resourcetransformation" + // ResourceTransformationResourcePlural is plural for "ResourceTransformation" resource + ResourceTransformationResourcePlural = "resourcetransformations" +) + +// ResourceTransformationOperationType is type of operation supported for +// resource transformation +type ResourceTransformationOperationType string + +const ( + // AddPathValue is used to add path+value in specified resource spec + // if path+value already exist this operation will override value + // at given path + AddResourcePath ResourceTransformationOperationType = "add" + // ModifyResourcePathValue is used to merge value at speficied resource path + // in case of a slice, entries will be appended. + // in case of a keypair, entries will be merged + ModifyResourcePathValue ResourceTransformationOperationType = "modify" + // DeletePath from resource specification + DeleteResourcePath ResourceTransformationOperationType = "delete" + // JsonResourcePatch will patch json in given resource spec + JsonResourcePatch ResourceTransformationOperationType = "jsonpatch" +) + +// ResourceTransformationValueType is types of value supported on +// path in resource specs +type ResourceTransformationValueType string + +type KindResourceTransform map[string][]TransformResourceInfo + +const ( + // IntResourceType is to update integer value to specified resource path + IntResourceType ResourceTransformationValueType = "int" + // StringResourceType is to update string value to specified resource path + StringResourceType ResourceTransformationValueType = "string" + // BoolResourceType is to update boolean value to specified resource path + BoolResourceType ResourceTransformationValueType = "bool" + // SliceResourceType is to update slice value to specified resource path + SliceResourceType ResourceTransformationValueType = "slice" + // KeyPairResourceType is to update keypair value to specified resource path + KeyPairResourceType ResourceTransformationValueType = "keypair" +) + +// ResourceTransformationStatsusType is status of resource transformation CR +type ResourceTransformationStatusType string + +const ( + // ResourceTransformationStatusInitial represents initial state of resource + // transformation CR + ResourceTransformationStatusInitial ResourceTransformationStatusType = "" + // ResourceTransformationStatusInProgress represents dry run in progress state + // of resource transformation + ResourceTransformationStatusInProgress ResourceTransformationStatusType = "InProgress" + // ResourceTransformationStatusReady represents ready state of resource + // transformation CR + ResourceTransformationStatusReady ResourceTransformationStatusType = "Ready" + // ResourceTransformationStatusFailed represents dry-run failed state of resource + // transformation CR + ResourceTransformationStatusFailed ResourceTransformationStatusType = "Failed" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceTransformation represents a ResourceTransformation CR object +type ResourceTransformation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceTransformationSpec `json:"specs"` + Status ResourceTransformationStatus `json:"status"` +} + +type ResourceTransformationStatus struct { + Status ResourceTransformationStatusType `json:"status"` + Resources []*TransformResourceInfo `json:"resources"` +} + +// TransformResourceInfo is the info of resources selected +// for transformation +type TransformResourceInfo struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + meta.GroupVersionKind `json:",inline"` + Status ResourceTransformationStatusType `json:"status"` + Reason string `json:"reason"` + Specs TransformSpecs `json:"specs"` +} + +// ResourceTransformationSpec is used to update k8s resources +// before migration/restore +type ResourceTransformationSpec struct { + Objects []TransformSpecs `json:"transformSpecs"` +} + +// TransformSpecs specifies the patch to update selected resource +// before migration/restore +type TransformSpecs struct { + // Resource is GroupVersionKind for k8s resources + // should be in format `group/version/kind" + Resource string `json:"resource"` + // Selectors label selector to filter out resource for + // patching + Selectors map[string]string `json:"selectors"` + // Paths collection of resource path to update + Paths []ResourcePaths `json:"paths"` +} + +type TransformSpecPatch struct { + GVK map[string]PatchStruct +} +type PatchStruct struct { + // namespace - resource in namespace + Resources map[string]TransformResourceInfo +} + +// ResourcePaths specifies the patch to modify resource +// before migration/restore +type ResourcePaths struct { + // Path k8s resource for operation + Path string `json:"path"` + // Value for given k8s path + Value string `json:"value"` + // Type of value specified int/bool/string/slice/keypair + Type ResourceTransformationValueType `json:"type"` + // Operation to be performed on path + // add/modify/delete/replace/jsonPatch + Operation ResourceTransformationOperationType `json:"operation"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceTransformationList is a list of ResourceTransformations +type ResourceTransformationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ResourceTransformation `json:"items"` +} diff --git a/pkg/apis/stork/v1alpha1/schedulepolicy.go b/pkg/apis/stork/v1alpha1/schedulepolicy.go index f2282ed79a..9cbc77dbca 100644 --- a/pkg/apis/stork/v1alpha1/schedulepolicy.go +++ b/pkg/apis/stork/v1alpha1/schedulepolicy.go @@ -124,6 +124,8 @@ type DailyPolicy struct { // Options to be passed in to the driver. These will be passed in // to the object being triggered Options map[string]string `json:"options"` + // ForceFullSnapshotDay specifies day of the week for full snapshot to take place + ForceFullSnapshotDay string `json:"forceFullSnapshotDay"` } // GetHourMinute parses and return the hour and minute specified in the policy diff --git a/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go index c48026b5ff..0f09faac55 100644 --- a/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/stork/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -524,7 +525,9 @@ func (in *ApplicationRegistration) DeepCopyInto(out *ApplicationRegistration) { if in.Resources != nil { in, out := &in.Resources, &out.Resources *out = make([]ApplicationResource, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -585,6 +588,11 @@ func (in *ApplicationResource) DeepCopyInto(out *ApplicationResource) { *out = *in out.GroupVersionKind = in.GroupVersionKind out.SuspendOptions = in.SuspendOptions + if in.NestedSuspendOptions != nil { + in, out := &in.NestedSuspendOptions, &out.NestedSuspendOptions + *out = make([]SuspendOptions, len(*in)) + copy(*out, *in) + } return } @@ -806,6 +814,7 @@ func (in *BackupLocation) DeepCopyInto(out *BackupLocation) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Location.DeepCopyInto(&out.Location) + in.Cluster.DeepCopyInto(&out.Cluster) return } @@ -1081,6 +1090,37 @@ func (in *ClusterDomainsStatusList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterItem) DeepCopyInto(out *ClusterItem) { + *out = *in + if in.AWSClusterConfig != nil { + in, out := &in.AWSClusterConfig, &out.AWSClusterConfig + *out = new(S3Config) + **out = **in + } + if in.AzureClusterConfig != nil { + in, out := &in.AzureClusterConfig, &out.AzureClusterConfig + *out = new(AzureConfig) + **out = **in + } + if in.GCPClusterConfig != nil { + in, out := &in.GCPClusterConfig, &out.GCPClusterConfig + *out = new(GoogleConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterItem. +func (in *ClusterItem) DeepCopy() *ClusterItem { + if in == nil { + return nil + } + out := new(ClusterItem) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterPair) DeepCopyInto(out *ClusterPair) { *out = *in @@ -1153,6 +1193,7 @@ func (in *ClusterPairSpec) DeepCopyInto(out *ClusterPairSpec) { (*out)[key] = val } } + in.PlatformOptions.DeepCopyInto(&out.PlatformOptions) return } @@ -1498,6 +1539,38 @@ func (in *IntervalPolicy) DeepCopy() *IntervalPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in KindResourceTransform) DeepCopyInto(out *KindResourceTransform) { + { + in := &in + *out = make(KindResourceTransform, len(*in)) + for key, val := range *in { + var outVal []TransformResourceInfo + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]TransformResourceInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KindResourceTransform. +func (in KindResourceTransform) DeepCopy() KindResourceTransform { + if in == nil { + return nil + } + out := new(KindResourceTransform) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Migration) DeepCopyInto(out *Migration) { *out = *in @@ -1729,6 +1802,11 @@ func (in *MigrationSpec) DeepCopyInto(out *MigrationSpec) { *out = new(bool) **out = **in } + if in.IncludeNetworkPolicyWithCIDR != nil { + in, out := &in.IncludeNetworkPolicyWithCIDR, &out.IncludeNetworkPolicyWithCIDR + *out = new(bool) + **out = **in + } if in.Selectors != nil { in, out := &in.Selectors, &out.Selectors *out = make(map[string]string, len(*in)) @@ -1741,6 +1819,16 @@ func (in *MigrationSpec) DeepCopyInto(out *MigrationSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.SkipDeletedNamespaces != nil { + in, out := &in.SkipDeletedNamespaces, &out.SkipDeletedNamespaces + *out = new(bool) + **out = **in + } + if in.TransformSpecs != nil { + in, out := &in.TransformSpecs, &out.TransformSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -1780,6 +1868,13 @@ func (in *MigrationStatus) DeepCopyInto(out *MigrationStatus) { } } in.FinishTimestamp.DeepCopyInto(&out.FinishTimestamp) + in.VolumeMigrationFinishTimestamp.DeepCopyInto(&out.VolumeMigrationFinishTimestamp) + in.ResourceMigrationFinishTimestamp.DeepCopyInto(&out.ResourceMigrationFinishTimestamp) + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(MigrationSummary) + **out = **in + } return } @@ -1793,6 +1888,22 @@ func (in *MigrationStatus) DeepCopy() *MigrationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationSummary) DeepCopyInto(out *MigrationSummary) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationSummary. +func (in *MigrationSummary) DeepCopy() *MigrationSummary { + if in == nil { + return nil + } + out := new(MigrationSummary) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MigrationTemplateSpec) DeepCopyInto(out *MigrationTemplateSpec) { *out = *in @@ -1945,6 +2056,216 @@ func (in *PVCSelectorSpec) DeepCopy() *PVCSelectorSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchStruct) DeepCopyInto(out *PatchStruct) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make(map[string]TransformResourceInfo, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchStruct. +func (in *PatchStruct) DeepCopy() *PatchStruct { + if in == nil { + return nil + } + out := new(PatchStruct) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.Rancher != nil { + in, out := &in.Rancher, &out.Rancher + *out = new(RancherSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RancherSecret) DeepCopyInto(out *RancherSecret) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RancherSecret. +func (in *RancherSecret) DeepCopy() *RancherSecret { + if in == nil { + return nil + } + out := new(RancherSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RancherSpec) DeepCopyInto(out *RancherSpec) { + *out = *in + if in.ProjectMappings != nil { + in, out := &in.ProjectMappings, &out.ProjectMappings + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RancherSpec. +func (in *RancherSpec) DeepCopy() *RancherSpec { + if in == nil { + return nil + } + out := new(RancherSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourcePaths) DeepCopyInto(out *ResourcePaths) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcePaths. +func (in *ResourcePaths) DeepCopy() *ResourcePaths { + if in == nil { + return nil + } + out := new(ResourcePaths) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformation) DeepCopyInto(out *ResourceTransformation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformation. +func (in *ResourceTransformation) DeepCopy() *ResourceTransformation { + if in == nil { + return nil + } + out := new(ResourceTransformation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceTransformation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformationList) DeepCopyInto(out *ResourceTransformationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceTransformation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformationList. +func (in *ResourceTransformationList) DeepCopy() *ResourceTransformationList { + if in == nil { + return nil + } + out := new(ResourceTransformationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceTransformationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformationSpec) DeepCopyInto(out *ResourceTransformationSpec) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]TransformSpecs, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformationSpec. +func (in *ResourceTransformationSpec) DeepCopy() *ResourceTransformationSpec { + if in == nil { + return nil + } + out := new(ResourceTransformationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceTransformationStatus) DeepCopyInto(out *ResourceTransformationStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*TransformResourceInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(TransformResourceInfo) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceTransformationStatus. +func (in *ResourceTransformationStatus) DeepCopy() *ResourceTransformationStatus { + if in == nil { + return nil + } + out := new(ResourceTransformationStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreVolumeInfo) DeepCopyInto(out *RestoreVolumeInfo) { *out = *in @@ -2253,6 +2574,75 @@ func (in *SuspendOptions) DeepCopy() *SuspendOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformResourceInfo) DeepCopyInto(out *TransformResourceInfo) { + *out = *in + out.GroupVersionKind = in.GroupVersionKind + in.Specs.DeepCopyInto(&out.Specs) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformResourceInfo. +func (in *TransformResourceInfo) DeepCopy() *TransformResourceInfo { + if in == nil { + return nil + } + out := new(TransformResourceInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformSpecPatch) DeepCopyInto(out *TransformSpecPatch) { + *out = *in + if in.GVK != nil { + in, out := &in.GVK, &out.GVK + *out = make(map[string]PatchStruct, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformSpecPatch. +func (in *TransformSpecPatch) DeepCopy() *TransformSpecPatch { + if in == nil { + return nil + } + out := new(TransformSpecPatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransformSpecs) DeepCopyInto(out *TransformSpecs) { + *out = *in + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ResourcePaths, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransformSpecs. +func (in *TransformSpecs) DeepCopy() *TransformSpecs { + if in == nil { + return nil + } + out := new(TransformSpecs) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeSnapshotRestore) DeepCopyInto(out *VolumeSnapshotRestore) { *out = *in diff --git a/pkg/applicationmanager/controllers/applicationbackup.go b/pkg/applicationmanager/controllers/applicationbackup.go index f461edc81f..3a876f95c2 100644 --- a/pkg/applicationmanager/controllers/applicationbackup.go +++ b/pkg/applicationmanager/controllers/applicationbackup.go @@ -25,13 +25,18 @@ import ( "github.com/libopenstorage/stork/pkg/objectstore" "github.com/libopenstorage/stork/pkg/resourcecollector" "github.com/libopenstorage/stork/pkg/rule" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmputils "github.com/portworx/kdmp/pkg/drivers/utils" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" + kdmpShedOps "github.com/portworx/sched-ops/k8s/kdmp" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/sirupsen/logrus" "gocloud.dev/gcerrors" v1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -65,6 +70,9 @@ const ( maxRetry = 10 retrySleep = 10 * time.Second genericBackupKey = "BACKUP_TYPE" + kdmpDriverOnly = "kdmp" + nonKdmpDriverOnly = "nonkdmp" + mixedDriver = "mixed" ) var ( @@ -207,6 +215,10 @@ func (a *ApplicationBackupController) createBackupLocationPath(backup *stork_api if err != nil { return fmt.Errorf("error getting backup location path: %v", err) } + // For NFS skip creating path + if backupLocation.Location.Type == stork_api.BackupLocationNFS { + return nil + } if err := objectstore.CreateBucket(backupLocation); err != nil { return fmt.Errorf("error creating backup location path: %v", err) } @@ -299,8 +311,6 @@ func (a *ApplicationBackupController) handle(ctx context.Context, backup *stork_ return nil } } - - // Try to create the backupLocation path, just log error if it fails err := a.createBackupLocationPath(backup) if err != nil { log.ApplicationBackupLog(backup).Errorf(err.Error()) @@ -554,7 +564,11 @@ func (a *ApplicationBackupController) backupVolumes(backup *stork_api.Applicatio continue } var driverName string - driverName, err = volume.GetPVCDriverForBackup(core.Instance(), &pvc, driverType, backup.Spec.BackupType) + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return err + } + driverName, err = volume.GetPVCDriverForBackup(core.Instance(), &pvc, driverType, backup.Spec.BackupType, backupLocation.Location.Type) if err != nil { // Skip unsupported PVCs if _, ok := err.(*errors.ErrNotSupported); ok { @@ -661,7 +675,9 @@ func (a *ApplicationBackupController) backupVolumes(backup *stork_api.Applicatio terminationChannels = nil // Run any post exec rules once backup is triggered - if backup.Spec.PostExecRule != "" { + driverCombo := a.checkVolumeDriverCombination(backup.Status.Volumes) + // If the driver combination of volumes are all non-kdmp, call the post exec rule immediately + if driverCombo == nonKdmpDriverOnly && backup.Spec.PostExecRule != "" { err = a.runPostExecRule(backup) if err != nil { message := fmt.Sprintf("Error running PostExecRule: %v", err) @@ -766,6 +782,32 @@ func (a *ApplicationBackupController) backupVolumes(backup *stork_api.Applicatio } } + // Run any post exec rules once backup is triggered + driverCombo := a.checkVolumeDriverCombination(backup.Status.Volumes) + // If the driver combination of volumes onlykdmp or mixed of both kdmp and non-kdmp, call post exec rule + // backup of volume is success. + if (driverCombo == kdmpDriverOnly || driverCombo == mixedDriver) && backup.Spec.PostExecRule != "" { + err = a.runPostExecRule(backup) + if err != nil { + message := fmt.Sprintf("Error running PostExecRule: %v", err) + log.ApplicationBackupLog(backup).Errorf(message) + a.recorder.Event(backup, + v1.EventTypeWarning, + string(stork_api.ApplicationBackupStatusFailed), + message) + + backup.Status.Stage = stork_api.ApplicationBackupStageFinal + backup.Status.FinishTimestamp = metav1.Now() + backup.Status.LastUpdateTimestamp = metav1.Now() + backup.Status.Status = stork_api.ApplicationBackupStatusFailed + backup.Status.Reason = message + err = a.client.Update(context.TODO(), backup) + if err != nil { + return err + } + return fmt.Errorf("%v", message) + } + } // If the backup hasn't failed move on to the next stage. if backup.Status.Status != stork_api.ApplicationBackupStatusFailed { backup.Status.Stage = stork_api.ApplicationBackupStageApplications @@ -1018,12 +1060,68 @@ func (a *ApplicationBackupController) uploadCRDResources(backup *stork_api.Appli ruleset.AddPlural("quota", "quotas") ruleset.AddPlural("prometheus", "prometheuses") ruleset.AddPlural("mongodbcommunity", "mongodbcommunity") + v1CrdApiReqrd, err := version.RequiresV1Registration() + if err != nil { + return err + } + if v1CrdApiReqrd { + var crds []*apiextensionsv1.CustomResourceDefinition + crdsGroups := make(map[string]bool) + // First collect the group detail for the CRDs, which has CR + for _, crd := range crdList.Items { + for _, v := range crd.Resources { + if _, ok := resKinds[v.Kind]; !ok { + continue + } + crdsGroups[utils.GetTrimmedGroupName(v.Group)] = true + } + + } + // pick up all the CRDs that belongs to the group in the crdsGroups map + for _, crd := range crdList.Items { + for _, v := range crd.Resources { + if _, ok := crdsGroups[utils.GetTrimmedGroupName(v.Group)]; !ok { + continue + } + crdName := ruleset.Pluralize(strings.ToLower(v.Kind)) + "." + v.Group + res, err := apiextensions.Instance().GetCRD(crdName, metav1.GetOptions{}) + if err != nil { + if k8s_errors.IsNotFound(err) { + continue + } + log.ApplicationBackupLog(backup).Errorf("Unable to get custom resource definition for %s, err: %v", v.Kind, err) + return err + } + crds = append(crds, res) + } + + } + jsonBytes, err := json.MarshalIndent(crds, "", " ") + if err != nil { + return err + } + if err := a.uploadObject(backup, crdObjectName, jsonBytes); err != nil { + return err + } + return nil + } var crds []*apiextensionsv1beta1.CustomResourceDefinition + crdsGroups := make(map[string]bool) + // First collect the group detail for the CRDs, which has CR for _, crd := range crdList.Items { for _, v := range crd.Resources { if _, ok := resKinds[v.Kind]; !ok { continue } + crdsGroups[utils.GetTrimmedGroupName(v.Group)] = true + } + } + // pick up all the CRDs that belongs to the group in the crdsGroups map + for _, crd := range crdList.Items { + for _, v := range crd.Resources { + if _, ok := crdsGroups[utils.GetTrimmedGroupName(v.Group)]; !ok { + continue + } crdName := ruleset.Pluralize(strings.ToLower(v.Kind)) + "." + v.Group res, err := apiextensions.Instance().GetCRDV1beta1(crdName, metav1.GetOptions{}) if err != nil { @@ -1059,11 +1157,35 @@ func (a *ApplicationBackupController) uploadMetadata( return a.uploadObject(backup, metadataObjectName, jsonBytes) } +func IsNFSBackuplocationType( + backup *stork_api.ApplicationBackup, +) (bool, error) { + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return false, fmt.Errorf("error getting backup location path for backup [%v/%v]: %v", backup.Namespace, backup.Name, err) + } + if backupLocation.Location.Type == stork_api.BackupLocationNFS { + return true, nil + } + return false, nil +} + +func getResourceExportCRName(opsPrefix, crUID, ns string) string { + name := fmt.Sprintf("%s-%s-%s", opsPrefix, utils.GetShortUID(crUID), ns) + name = utils.GetValidLabel(name) + return name +} + func (a *ApplicationBackupController) backupResources( backup *stork_api.ApplicationBackup, ) error { var err error var resourceTypes []metav1.APIResource + nfs, err := IsNFSBackuplocationType(backup) + if err != nil { + logrus.Errorf("error in checking backuplocation type: %v", err) + return err + } // Listing all resource types if len(backup.Spec.ResourceTypes) != 0 { optionalResourceTypes := []string{} @@ -1215,6 +1337,114 @@ func (a *ApplicationBackupController) backupResources( return err } + if nfs { + // Check whether ResourceExport is present or not + crName := getResourceExportCRName(utils.PrefixNFSBackup, string(backup.UID), backup.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, a.backupAdminNamespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + // create resource export CR + resourceExport := &kdmpapi.ResourceExport{} + // Adding required label for debugging + labels := make(map[string]string) + labels[utils.ApplicationBackupCRNameKey] = utils.GetValidLabel(backup.Name) + labels[utils.ApplicationBackupCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(backup.UID))) + // If backup from px-backup, update the backup object details in the label + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.BackupObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) + } + } + resourceExport.Labels = labels + resourceExport.Annotations = make(map[string]string) + resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" + resourceExport.Name = getResourceExportCRName(utils.PrefixNFSBackup, string(backup.UID), backup.Namespace) + resourceExport.Namespace = a.backupAdminNamespace + resourceExport.Spec.Type = kdmpapi.ResourceExportBackup + source := &kdmpapi.ResourceExportObjectReference{ + APIVersion: backup.APIVersion, + Kind: backup.Kind, + Namespace: backup.Namespace, + Name: backup.Name, + } + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return fmt.Errorf("error getting backup location path: %v", err) + } + destination := &kdmpapi.ResourceExportObjectReference{ + // TODO: .GetBackupLocation is not returning APIVersion and kind. + // Hardcoding for now. + // APIVersion: backupLocation.APIVersion, + // Kind: backupLocation.Kind, + APIVersion: utils.StorkAPIVersion, + Kind: utils.BackupLocationKind, + Namespace: backupLocation.Namespace, + Name: backupLocation.Name, + } + resourceExport.Spec.TriggeredFrom = kdmputils.TriggeredFromStork + storkPodNs, err := k8sutils.GetStorkPodNamespace() + if err != nil { + logrus.Errorf("error in getting stork pod namespace: %v", err) + return err + } + resourceExport.Spec.TriggeredFromNs = storkPodNs + resourceExport.Spec.Source = *source + resourceExport.Spec.Destination = *destination + + _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) + if err != nil { + logrus.Errorf("failed to create ResourceExport CR[%v/%v]: %v", resourceExport.Namespace, resourceExport.Name, err) + return err + } + return nil + } + logrus.Errorf("failed to get backup resourceExport CR[%v/%v]: %v", resourceExport.Namespace, resourceExport.Name, err) + // Will retry in the next cycle of reconciler. + return nil + } else { + var message string + // Check the status of the resourceExport CR and update it to the applicationBackup CR + switch resourceExport.Status.Status { + case kdmpapi.ResourceExportStatusFailed: + message = fmt.Sprintf("Error uploading resources: %v", err) + backup.Status.Status = stork_api.ApplicationBackupStatusFailed + backup.Status.Stage = stork_api.ApplicationBackupStageFinal + backup.Status.Reason = message + backup.Status.LastUpdateTimestamp = metav1.Now() + backup.Status.FinishTimestamp = metav1.Now() + err = a.client.Update(context.TODO(), backup) + if err != nil { + return err + } + a.recorder.Event(backup, + v1.EventTypeWarning, + string(stork_api.ApplicationBackupStatusFailed), + message) + log.ApplicationBackupLog(backup).Errorf(message) + return err + case kdmpapi.ResourceExportStatusSuccessful: + backup.Status.BackupPath = GetObjectPath(backup) + backup.Status.Stage = stork_api.ApplicationBackupStageFinal + backup.Status.FinishTimestamp = metav1.Now() + backup.Status.Status = stork_api.ApplicationBackupStatusSuccessful + backup.Status.Reason = "Volumes and resources were backed up successfully" + // Only on success compute the total backup size + for _, vInfo := range backup.Status.Volumes { + backup.Status.TotalSize += vInfo.TotalSize + } + case kdmpapi.ResourceExportStatusInitial: + case kdmpapi.ResourceExportStatusPending: + case kdmpapi.ResourceExportStatusInProgress: + backup.Status.LastUpdateTimestamp = metav1.Now() + } + err = a.client.Update(context.TODO(), backup) + if err != nil { + return err + } + return nil + } + } // Upload the resources to the backup location if err = a.uploadResources(backup, allObjects); err != nil { message := fmt.Sprintf("Error uploading resources: %v", err) @@ -1305,6 +1535,11 @@ func (a *ApplicationBackupController) deleteBackup(backup *stork_api.Application } return true, err } + // TODO: for nfs type, we need to invoke job based deletion. + // For now, skipping it. + if backupLocation.Location.Type == stork_api.BackupLocationNFS { + return true, nil + } bucket, err := objectstore.GetBucket(backupLocation) if err != nil { return true, err @@ -1409,5 +1644,33 @@ func (a *ApplicationBackupController) cleanupResources( logrus.Errorf("unable to cleanup post backup resources, err: %v", err) } } + // Directly calling DeleteResourceExport with out checking backuplocation type. + // For other backuplocation type, expecting Notfound + crName := getResourceExportCRName(utils.PrefixNFSBackup, string(backup.UID), backup.Namespace) + err := kdmpShedOps.Instance().DeleteResourceExport(crName, a.backupAdminNamespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete data export CR [%v]: %v", crName, err) + log.ApplicationBackupLog(backup).Errorf("%v", errMsg) + return err + } return nil } + +func (a *ApplicationBackupController) checkVolumeDriverCombination(volumes []*stork_api.ApplicationBackupVolumeInfo) string { + var kdmpCount, totalCount, nonKdmpCount int + totalCount = len(volumes) + for _, vInfo := range volumes { + if vInfo.DriverName == volume.KDMPDriverName { + kdmpCount++ + } else { + nonKdmpCount++ + } + } + + if totalCount == kdmpCount { + return kdmpDriverOnly + } else if totalCount == nonKdmpCount { + return nonKdmpDriverOnly + } + return mixedDriver +} diff --git a/pkg/applicationmanager/controllers/applicationbackupschedule.go b/pkg/applicationmanager/controllers/applicationbackupschedule.go index 7b01192486..ad2615b9c3 100644 --- a/pkg/applicationmanager/controllers/applicationbackupschedule.go +++ b/pkg/applicationmanager/controllers/applicationbackupschedule.go @@ -14,6 +14,7 @@ import ( "github.com/libopenstorage/stork/pkg/log" "github.com/libopenstorage/stork/pkg/objectstore" "github.com/libopenstorage/stork/pkg/schedule" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" "github.com/portworx/sched-ops/k8s/apiextensions" storkops "github.com/portworx/sched-ops/k8s/stork" @@ -41,7 +42,6 @@ const ( // ApplicationBackupObjectLockRetentionAnnotation - object lock retention period annotation // Since this annotation is used in the px-backup, creating with portworx.io annotation prefix. ApplicationBackupObjectLockRetentionAnnotation = "portworx.io/" + "object-lock-retention-period" - incrementalCountAnnotation = "portworx.io/cloudsnap-incremental-count" dayInSec = 86400 //ObjectLockDefaultIncrementalCount default incremental backup count ObjectLockDefaultIncrementalCount = 5 @@ -453,8 +453,8 @@ func (s *ApplicationBackupScheduleController) startApplicationBackup(backupSched backupscheduleCreationTime, diff, elaspedDays, elaspedDaysInSecs, currentDayStartTime) if lastSuccessfulBackupCreateTime < currentDayStartTime { - // forcing it to be full backup, by setting the incrementalCountAnnotation to zero - backup.Spec.Options[incrementalCountAnnotation] = fmt.Sprintf("%v", 0) + // forcing it to be full backup, by setting the PXIncrementalCountAnnotation to zero + backup.Spec.Options[utils.PXIncrementalCountAnnotation] = fmt.Sprintf("%v", 0) } } } diff --git a/pkg/applicationmanager/controllers/applicationclone.go b/pkg/applicationmanager/controllers/applicationclone.go index 9b223ddf3d..e5541e61cc 100644 --- a/pkg/applicationmanager/controllers/applicationclone.go +++ b/pkg/applicationmanager/controllers/applicationclone.go @@ -588,7 +588,7 @@ func (a *ApplicationCloneController) preparePVResource( return err } - _, err := a.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil) + _, err := a.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil, nil) if err != nil { return err } diff --git a/pkg/applicationmanager/controllers/applicationrestore.go b/pkg/applicationmanager/controllers/applicationrestore.go index 66d16c2086..6def2c50e3 100644 --- a/pkg/applicationmanager/controllers/applicationrestore.go +++ b/pkg/applicationmanager/controllers/applicationrestore.go @@ -18,16 +18,20 @@ import ( "github.com/libopenstorage/stork/pkg/log" "github.com/libopenstorage/stork/pkg/objectstore" "github.com/libopenstorage/stork/pkg/resourcecollector" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmputils "github.com/portworx/kdmp/pkg/drivers/utils" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" + kdmpShedOps "github.com/portworx/sched-ops/k8s/kdmp" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -149,7 +153,7 @@ func (a *ApplicationRestoreController) createNamespaces(backup *storkapi.Applica }) log.ApplicationRestoreLog(restore).Tracef("Creating dest namespace %v", ns.Name) if err != nil { - if errors.IsAlreadyExists(err) { + if k8s_errors.IsAlreadyExists(err) { oldNS, err := core.Instance().GetNamespace(ns.GetName()) if err != nil { return err @@ -201,7 +205,7 @@ func (a *ApplicationRestoreController) createNamespaces(backup *storkapi.Applica } for _, namespace := range restore.Spec.NamespaceMapping { if ns, err := core.Instance().GetNamespace(namespace); err != nil { - if errors.IsNotFound(err) { + if k8s_errors.IsNotFound(err) { if _, err := core.Instance().CreateNamespace(&v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: ns.Name, @@ -226,7 +230,7 @@ func (a *ApplicationRestoreController) Reconcile(ctx context.Context, request re restore := &storkapi.ApplicationRestore{} err := a.client.Get(context.TODO(), request.NamespacedName, restore) if err != nil { - if errors.IsNotFound(err) { + if k8s_errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue @@ -276,14 +280,27 @@ func (a *ApplicationRestoreController) handle(ctx context.Context, restore *stor return nil } - err = a.verifyNamespaces(restore) + backup, err := storkops.Instance().GetApplicationBackup(restore.Spec.BackupName, restore.Namespace) if err != nil { - log.ApplicationRestoreLog(restore).Errorf(err.Error()) - a.recorder.Event(restore, - v1.EventTypeWarning, - string(storkapi.ApplicationRestoreStatusFailed), - err.Error()) - return nil + log.ApplicationRestoreLog(restore).Errorf("Error getting backup: %v", err) + return err + } + + nfs, err := IsNFSBackuplocationType(backup) + if err != nil { + logrus.Errorf("error in checking backuplocation type") + } + + if !nfs { + err = a.verifyNamespaces(restore) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf(err.Error()) + a.recorder.Event(restore, + v1.EventTypeWarning, + string(storkapi.ApplicationRestoreStatusFailed), + err.Error()) + return nil + } } switch restore.Status.Stage { @@ -338,7 +355,6 @@ func (a *ApplicationRestoreController) namespaceRestoreAllowed(restore *storkapi } return true } - func (a *ApplicationRestoreController) getDriversForRestore(restore *storkapi.ApplicationRestore) map[string]bool { drivers := make(map[string]bool) for _, volumeInfo := range restore.Status.Volumes { @@ -397,6 +413,7 @@ func (a *ApplicationRestoreController) updateRestoreCRInVolumeStage( if volumeInfos != nil { restore.Status.Volumes = append(restore.Status.Volumes, volumeInfos...) } + err = a.client.Update(context.TODO(), restore) if err != nil { time.Sleep(retrySleep) @@ -411,7 +428,59 @@ func (a *ApplicationRestoreController) updateRestoreCRInVolumeStage( return restore, nil } +func convertResourceVolInfoToAppBkpVolInfo( + volInfo []*kdmpapi.ResourceBackupVolumeInfo, +) (resVolInfo []*storkapi.ApplicationBackupVolumeInfo) { + restoreVolumeInfos := make([]*storkapi.ApplicationBackupVolumeInfo, 0) + for _, vol := range volInfo { + restoreVolInfo := &storkapi.ApplicationBackupVolumeInfo{} + restoreVolInfo.PersistentVolumeClaim = vol.PersistentVolumeClaim + restoreVolInfo.PersistentVolumeClaimUID = vol.PersistentVolumeClaimUID + restoreVolInfo.Namespace = vol.Namespace + restoreVolInfo.Volume = vol.Volume + restoreVolInfo.BackupID = vol.BackupID + restoreVolInfo.DriverName = vol.DriverName + restoreVolInfo.Status = storkapi.ApplicationBackupStatusType(vol.Status) + restoreVolInfo.Zones = vol.Zones + restoreVolInfo.Reason = vol.Reason + restoreVolInfo.Options = vol.Options + restoreVolInfo.TotalSize = vol.TotalSize + restoreVolInfo.ActualSize = vol.ActualSize + restoreVolInfo.StorageClass = vol.StorageClass + restoreVolInfo.Provisioner = vol.Provisioner + restoreVolInfo.VolumeSnapshot = vol.VolumeSnapshot + restoreVolumeInfos = append(restoreVolumeInfos, restoreVolInfo) + } + + return restoreVolumeInfos +} + +func convertResourceVolInfoToAppRestoreVolInfo( + volInfo []*kdmpapi.ResourceRestoreVolumeInfo, +) (resVolInfo []*storkapi.ApplicationRestoreVolumeInfo) { + restoreVolumeInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) + for _, vol := range volInfo { + restoreVolInfo := &storkapi.ApplicationRestoreVolumeInfo{} + restoreVolInfo.PersistentVolumeClaim = vol.PersistentVolumeClaim + restoreVolInfo.PersistentVolumeClaimUID = vol.PersistentVolumeClaimUID + restoreVolInfo.DriverName = vol.DriverName + restoreVolInfo.Status = storkapi.ApplicationRestoreStatusType(vol.Status) + restoreVolInfo.Zones = vol.Zones + restoreVolInfo.Reason = vol.Reason + restoreVolInfo.Options = vol.Options + restoreVolInfo.TotalSize = vol.TotalSize + restoreVolInfo.SourceVolume = vol.SourceVolume + restoreVolInfo.SourceNamespace = vol.SourceNamespace + restoreVolInfo.RestoreVolume = vol.RestoreVolume + + restoreVolumeInfos = append(restoreVolumeInfos, restoreVolInfo) + } + + return restoreVolumeInfos +} + func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.ApplicationRestore) error { + funct := "restoreVolumes" restore.Status.Stage = storkapi.ApplicationRestoreStageVolumes backup, err := storkops.Instance().GetApplicationBackup(restore.Spec.BackupName, restore.Namespace) @@ -477,93 +546,212 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat namespacedName.Namespace = restore.Namespace namespacedName.Name = restore.Name restoreCompleteList := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) + nfs, err := IsNFSBackuplocationType(backup) + if err != nil { + logrus.Errorf("error in checking backuplocation type") + return err + } if len(restore.Status.Volumes) != pvcCount { + // Here backupVolumeInfoMappings is framed based on driver name mapping, hence startRestore() + // gets called once per driver + var sErr error for driverName, vInfos := range backupVolumeInfoMappings { + restoreVolumeInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) backupVolInfos := vInfos - existingRestoreVolInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) driver, err := volume.Get(driverName) - if err != nil { - return err - } - - // For each driver, check if it needs any additional resources to be - // restored before starting the volume restore - objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) - if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) - return err - } - - // Skip pv/pvc if replacepolicy is set to retain to avoid creating - if restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyRetain { - backupVolInfos, existingRestoreVolInfos, err = a.skipVolumesFromRestoreList(restore, objects, driver, vInfos) + // BL NFS + kdmp = nfs code path + // s3 + kdmp = legacy code path + // BL NFS + EBS/GKE/Azure = legacy code path + // s3 + EBS/GKE/Azure = legacy code path + if !nfs || (nfs && driverName != volume.KDMPDriverName) { + existingRestoreVolInfos := make([]*storkapi.ApplicationRestoreVolumeInfo, 0) if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error while checking pvcs: %v", err) return err } - } - - preRestoreObjects, err := driver.GetPreRestoreResources(backup, restore, objects) - if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error getting PreRestore Resources: %v", err) - return err - } - - // pvc creation is not part of kdmp - if driverName != "kdmp" { - if err := a.applyResources(restore, preRestoreObjects); err != nil { + // For each driver, check if it needs any additional resources to be + // restored before starting the volume restore + objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) return err } - } - - // Pre-delete resources for CSI driver - if (driverName == "csi" || driverName == "kdmp") && restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyDelete { - objectMap := storkapi.CreateObjectsMap(restore.Spec.IncludeResources) - objectBasedOnIncludeResources := make([]runtime.Unstructured, 0) - for _, o := range objects { - skip, err := a.resourceCollector.PrepareResourceForApply( - o, - objects, - objectMap, - restore.Spec.NamespaceMapping, - nil, // no need to set storage class mappings at this stage - nil, - restore.Spec.IncludeOptionalResourceTypes, - nil, - ) + // Skip pv/pvc if replacepolicy is set to retain to avoid creating + if restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyRetain { + backupVolInfos, existingRestoreVolInfos, err = a.skipVolumesFromRestoreList(restore, objects, driver, vInfos) if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error while checking pvcs: %v", err) return err } - if !skip { - objectBasedOnIncludeResources = append( - objectBasedOnIncludeResources, + } + preRestoreObjects, err := driver.GetPreRestoreResources(backup, restore, objects) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error getting PreRestore Resources: %v", err) + return err + } + + // Pre-delete resources for CSI driver + if (driverName == "csi" || driverName == volume.KDMPDriverName) && restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyDelete { + objectMap := storkapi.CreateObjectsMap(restore.Spec.IncludeResources) + objectBasedOnIncludeResources := make([]runtime.Unstructured, 0) + for _, o := range objects { + skip, err := a.resourceCollector.PrepareResourceForApply( o, + objects, + objectMap, + restore.Spec.NamespaceMapping, + nil, // no need to set storage class mappings at this stage + nil, + restore.Spec.IncludeOptionalResourceTypes, + nil, ) + if err != nil { + return err + } + if !skip { + objectBasedOnIncludeResources = append( + objectBasedOnIncludeResources, + o, + ) + } + } + tempObjects, err := a.getNamespacedObjectsToDelete( + restore, + objectBasedOnIncludeResources, + ) + if err != nil { + return err + } + err = a.resourceCollector.DeleteResources( + a.dynamicInterface, + tempObjects) + if err != nil { + return err + } + } + // pvc creation is not part of kdmp + if driverName != volume.KDMPDriverName { + if err := a.applyResources(restore, preRestoreObjects); err != nil { + return err } } - tempObjects, err := a.getNamespacedObjectsToDelete( - restore, - objectBasedOnIncludeResources, - ) + restoreCompleteList = append(restoreCompleteList, existingRestoreVolInfos...) + restoreVolumeInfos, sErr = driver.StartRestore(restore, backupVolInfos, preRestoreObjects) if err != nil { return err } - err = a.resourceCollector.DeleteResources( - a.dynamicInterface, - tempObjects) + } + // Check whether ResourceExport is present or not + if nfs && driverName == volume.KDMPDriverName { + err = a.client.Update(context.TODO(), restore) if err != nil { - return err + time.Sleep(retrySleep) + return nil + } + crName := getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + // create resource export CR + resourceExport = &kdmpapi.ResourceExport{} + // Adding required label for debugging + labels := make(map[string]string) + labels[utils.ApplicationRestoreCRNameKey] = utils.GetValidLabel(restore.Name) + labels[utils.ApplicationRestoreCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(restore.UID))) + // If restore from px-backup, update the restore object details in the label + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.RestoreObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.RestoreObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) + } + } + resourceExport.Labels = labels + resourceExport.Annotations = make(map[string]string) + resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" + resourceExport.Name = crName + resourceExport.Namespace = restore.Namespace + resourceExport.Spec.Type = kdmpapi.ResourceExportBackup + // TODO: In the restore path we need to change source and destination ref as it is confusing now + // Usually dest means where it's backed up or restore to + source := &kdmpapi.ResourceExportObjectReference{ + APIVersion: restore.APIVersion, + Kind: restore.Kind, + Namespace: restore.Namespace, + Name: restore.Name, + } + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return fmt.Errorf("error getting backup location path: %v", err) + } + destination := &kdmpapi.ResourceExportObjectReference{ + // TODO: GetBackupLocation is not returning APIVersion and kind. + // Hardcoding for now. + APIVersion: utils.StorkAPIVersion, + Kind: utils.BackupLocationKind, + Namespace: backupLocation.Namespace, + Name: backupLocation.Name, + } + resourceExport.Spec.TriggeredFrom = kdmputils.TriggeredFromStork + storkPodNs, err := k8sutils.GetStorkPodNamespace() + if err != nil { + logrus.Errorf("error in getting stork pod namespace: %v", err) + return err + } + resourceExport.Spec.TriggeredFromNs = storkPodNs + resourceExport.Spec.Source = *source + resourceExport.Spec.Destination = *destination + _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) + if err != nil { + logrus.Errorf("failed to create resourceExport CR %v: %v", crName, err) + return err + } + return nil + } + logrus.Errorf("%s error reading resourceExport CR %v: %v", funct, crName, err) + return nil + } else { + var message string + logrus.Infof("%s re cr %v status %v", funct, crName, resourceExport.Status.Status) + switch resourceExport.Status.Status { + case kdmpapi.ResourceExportStatusFailed: + message = fmt.Sprintf("%s Error creating CR %v for pvc creation: %v", funct, crName, resourceExport.Status.Reason) + restore.Status.Status = storkapi.ApplicationRestoreStatusFailed + restore.Status.Stage = storkapi.ApplicationRestoreStageFinal + restore.Status.Reason = message + restore.Status.LastUpdateTimestamp = metav1.Now() + err = a.client.Update(context.TODO(), restore) + if err != nil { + return err + } + a.recorder.Event(restore, + v1.EventTypeWarning, + string(storkapi.ApplicationRestoreStatusFailed), + message) + log.ApplicationRestoreLog(restore).Errorf(message) + return err + case kdmpapi.ResourceExportStatusInitial: + return nil + case kdmpapi.ResourceExportStatusPending: + return nil + case kdmpapi.ResourceExportStatusInProgress: + return nil + case kdmpapi.ResourceExportStatusSuccessful: + backupVolInfos := convertResourceVolInfoToAppBkpVolInfo(resourceExport.VolumesInfo) + existingRestoreVolInfos := convertResourceVolInfoToAppRestoreVolInfo(resourceExport.ExistingVolumesInfo) + restoreCompleteList = append(restoreCompleteList, existingRestoreVolInfos...) + restoreVolumeInfos, sErr = driver.StartRestore(restore, backupVolInfos, nil) + default: + logrus.Infof("%s still valid re CR[%v]stage not available", funct, crName) + return nil + } } } - - restoreCompleteList = append(restoreCompleteList, existingRestoreVolInfos...) - restoreVolumeInfos, err := driver.StartRestore(restore, backupVolInfos, preRestoreObjects) - if err != nil { - message := fmt.Sprintf("Error starting Application Restore for volumes: %v", err) + if sErr != nil { + logrus.Infof("%s sErr: %v", funct, sErr) + message := fmt.Sprintf("Error starting Application Restore for volumes: %v", sErr) log.ApplicationRestoreLog(restore).Errorf(message) - if _, ok := err.(*volume.ErrStorageProviderBusy); ok { + if _, ok := sErr.(*volume.ErrStorageProviderBusy); ok { msg := fmt.Sprintf("Volume restores are in progress. Restores are failing for some volumes"+ - " since the storage provider is busy. Restore will be retried. Error: %v", err) + " since the storage provider is busy. Restore will be retried. Error: %v", sErr) a.recorder.Event(restore, v1.EventTypeWarning, string(storkapi.ApplicationRestoreStatusInProgress), @@ -591,6 +779,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } restoreCompleteList = append(restoreCompleteList, restoreVolumeInfos...) + logrus.Tracef("restoreCompleteList %+v", restoreCompleteList) } restore, err = a.updateRestoreCRInVolumeStage( namespacedName, @@ -603,6 +792,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } } + inProgress := false // Skip checking status if no volumes are being restored if len(restore.Status.Volumes) != 0 { @@ -656,7 +846,6 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat } } } - // Return if we have any volume restores still in progress if inProgress || len(restore.Status.Volumes) != pvcCount { return nil @@ -679,7 +868,6 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } } - restore.Status.LastUpdateTimestamp = metav1.Now() // Only on success compute the total restore size for _, vInfo := range restore.Status.Volumes { @@ -796,7 +984,7 @@ func (a *ApplicationRestoreController) downloadCRD( for _, crd := range crds { crd.ResourceVersion = "" regCrd[crd.GetName()] = false - if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !errors.IsAlreadyExists(err) { + if _, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !k8s_errors.IsAlreadyExists(err) { regCrd[crd.GetName()] = true logrus.Warnf("error registering crds v1beta1 %v,%v", crd.GetName(), err) continue @@ -813,7 +1001,7 @@ func (a *ApplicationRestoreController) downloadCRD( var updatedVersions []apiextensionsv1.CustomResourceDefinitionVersion // try to apply as v1 crd var err error - if _, err = client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err == nil || errors.IsAlreadyExists(err) { + if _, err = client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err == nil || k8s_errors.IsAlreadyExists(err) { logrus.Infof("registered v1 crds %v,", crd.GetName()) continue } @@ -833,7 +1021,7 @@ func (a *ApplicationRestoreController) downloadCRD( } crd.Spec.Versions = updatedVersions - if _, err := client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !errors.IsAlreadyExists(err) { + if _, err := client.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}); err != nil && !k8s_errors.IsAlreadyExists(err) { logrus.Warnf("error registering crdsv1 %v,%v", crd.GetName(), err) continue } @@ -901,6 +1089,39 @@ func (a *ApplicationRestoreController) updateResourceStatus( return nil } +func (a *ApplicationRestoreController) updateResourceStatusFromRestoreCR( + restore *storkapi.ApplicationRestore, + resource *kdmpapi.ResourceRestoreResourceInfo, + status kdmpapi.ResourceRestoreStatus, + reason string, +) { + var resourceStatus storkapi.ApplicationRestoreStatusType + switch status { + case kdmpapi.ResourceRestoreStatusSuccessful: + resourceStatus = storkapi.ApplicationRestoreStatusSuccessful + case kdmpapi.ResourceRestoreStatusRetained: + resourceStatus = storkapi.ApplicationRestoreStatusRetained + case kdmpapi.ResourceRestoreStatusFailed: + resourceStatus = storkapi.ApplicationRestoreStatusFailed + case kdmpapi.ResourceRestoreStatusInProgress: + resourceStatus = storkapi.ApplicationRestoreStatusInProgress + } + updatedResource := &storkapi.ApplicationRestoreResourceInfo{ + ObjectInfo: storkapi.ObjectInfo{ + Name: resource.Name, + Namespace: resource.Namespace, + GroupVersionKind: metav1.GroupVersionKind{ + Group: resource.Group, + Version: resource.Version, + Kind: resource.Kind, + }, + }, + Status: resourceStatus, + Reason: reason, + } + restore.Status.Resources = append(restore.Status.Resources, updatedResource) +} + func (a *ApplicationRestoreController) getPVNameMappings( restore *storkapi.ApplicationRestore, objects []runtime.Unstructured, @@ -1022,7 +1243,7 @@ func (a *ApplicationRestoreController) skipVolumesFromRestoreList( ns := val pvc, err := core.Instance().GetPersistentVolumeClaim(pvcObject.Name, ns) if err != nil { - if errors.IsNotFound(err) { + if k8s_errors.IsNotFound(err) { newVolInfos = append(newVolInfos, bkupVolInfo) continue } @@ -1188,7 +1409,7 @@ func (a *ApplicationRestoreController) applyResources( err = a.resourceCollector.ApplyResource( a.dynamicInterface, o) - if err != nil && errors.IsAlreadyExists(err) { + if err != nil && k8s_errors.IsAlreadyExists(err) { switch restore.Spec.ReplacePolicy { case storkapi.ApplicationRestoreReplacePolicyDelete: log.ApplicationRestoreLog(restore).Errorf("Error deleting %v %v during restore: %v", objectType.GetKind(), metadata.GetName(), err) @@ -1236,15 +1457,136 @@ func (a *ApplicationRestoreController) restoreResources( log.ApplicationRestoreLog(restore).Errorf("Error getting backup: %v", err) return err } - - objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) + nfs, err := IsNFSBackuplocationType(backup) if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) + logrus.Errorf("error in checking backuplocation type: %v", err) return err } - if err := a.applyResources(restore, objects); err != nil { - return err + doCleanup := true + if !nfs { + objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) + return err + } + + if err := a.applyResources(restore, objects); err != nil { + return err + } + } else { + // Check whether ResourceExport is present or not + crName := getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace) + resourceExport, err := kdmpShedOps.Instance().GetResourceExport(crName, restore.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + // create resource export CR + resourceExport := &kdmpapi.ResourceExport{} + // Adding required label for debugging + labels := make(map[string]string) + labels[utils.ApplicationRestoreCRNameKey] = utils.GetValidLabel(restore.Name) + labels[utils.ApplicationRestoreCRUIDKey] = utils.GetValidLabel(utils.GetShortUID(string(restore.UID))) + // If restore from px-backup, update the restore object details in the label + if val, ok := backup.Annotations[utils.PxbackupAnnotationCreateByKey]; ok { + if val == utils.PxbackupAnnotationCreateByValue { + labels[utils.RestoreObjectNameKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectNameKey]) + labels[utils.RestoreObjectUIDKey] = utils.GetValidLabel(backup.Annotations[utils.PxbackupObjectUIDKey]) + } + } + resourceExport.Labels = labels + resourceExport.Annotations = make(map[string]string) + resourceExport.Annotations[utils.SkipResourceAnnotation] = "true" + resourceExport.Name = crName + resourceExport.Namespace = restore.Namespace + resourceExport.Spec.Type = kdmpapi.ResourceExportBackup + resourceExport.Spec.TriggeredFrom = kdmputils.TriggeredFromStork + storkPodNs, err := k8sutils.GetStorkPodNamespace() + if err != nil { + logrus.Errorf("error in getting stork pod namespace: %v", err) + return err + } + resourceExport.Spec.TriggeredFromNs = storkPodNs + source := &kdmpapi.ResourceExportObjectReference{ + APIVersion: restore.APIVersion, + Kind: restore.Kind, + Namespace: restore.Namespace, + Name: restore.Name, + } + backupLocation, err := storkops.Instance().GetBackupLocation(backup.Spec.BackupLocation, backup.Namespace) + if err != nil { + return fmt.Errorf("error getting backup location path %v: %v", backup.Spec.BackupLocation, err) + } + destination := &kdmpapi.ResourceExportObjectReference{ + // TODO: .GetBackupLocation is not returning APIVersion and kind. + // Hardcoding for now. + // APIVersion: backupLocation.APIVersion, + // Kind: backupLocation.Kind, + APIVersion: utils.StorkAPIVersion, + Kind: utils.BackupLocationKind, + Namespace: backupLocation.Namespace, + Name: backupLocation.Name, + } + resourceExport.Spec.Source = *source + resourceExport.Spec.Destination = *destination + _, err = kdmpShedOps.Instance().CreateResourceExport(resourceExport) + if err != nil { + logrus.Errorf("failed to create ResourceExport CR[%v/%v]: %v", resourceExport.Namespace, resourceExport.Name, err) + return err + } + return nil + } + logrus.Errorf("failed to get restore resourceExport CR: %v", err) + // Will retry in the next cycle of reconciler. + return nil + } else { + var message string + // Check the status of the resourceExport CR and update it to the applicationBackup CR + logrus.Debugf("resource export: %s, status: %s", resourceExport.Name, resourceExport.Status.Status) + switch resourceExport.Status.Status { + case kdmpapi.ResourceExportStatusFailed: + message = fmt.Sprintf("Error applying resources: %v", err) + restore.Status.Status = storkapi.ApplicationRestoreStatusFailed + restore.Status.Stage = storkapi.ApplicationRestoreStageFinal + restore.Status.Reason = message + restore.Status.LastUpdateTimestamp = metav1.Now() + restore.Status.FinishTimestamp = metav1.Now() + err = a.client.Update(context.TODO(), restore) + if err != nil { + return err + } + a.recorder.Event(restore, + v1.EventTypeWarning, + string(storkapi.ApplicationRestoreStatusFailed), + message) + log.ApplicationRestoreLog(restore).Errorf(message) + return err + case kdmpapi.ResourceExportStatusSuccessful: + // Modify to have subresource level updating + for _, resource := range resourceExport.Status.Resources { + a.updateResourceStatusFromRestoreCR( + restore, + resource, + resource.Status, + resource.Reason) + } + case kdmpapi.ResourceExportStatusInitial: + doCleanup = false + case kdmpapi.ResourceExportStatusPending: + doCleanup = false + case kdmpapi.ResourceExportStatusInProgress: + restore.Status.LastUpdateTimestamp = metav1.Now() + doCleanup = false + } + restore.Status.LastUpdateTimestamp = metav1.Now() + err = a.client.Update(context.TODO(), restore) + if err != nil { + return err + } + } + } + + if !doCleanup { + return nil } // Before updating to final stage, cleanup generic backup CRs, if any. err = a.cleanupResources(restore) @@ -1353,6 +1695,19 @@ func (a *ApplicationRestoreController) cleanupRestore(restore *storkapi.Applicat return fmt.Errorf("cancel restore: %s", err) } } + var crNames = []string{} + // Directly calling DeleteResourceExport with out checking backuplocation type. + // For other backuplocation type, expecting Notfound + crNames = append(crNames, getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace)) + crNames = append(crNames, getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace)) + for _, crName := range crNames { + err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete restore resource export CR [%v]: %v", crName, err) + log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) + return err + } + } return nil } @@ -1371,13 +1726,13 @@ func (a *ApplicationRestoreController) createCRD() error { } if ok { err := k8sutils.CreateCRD(resource) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !k8s_errors.IsAlreadyExists(err) { return err } return apiextensions.Instance().ValidateCRD(resource.Plural+"."+resource.Group, validateCRDTimeout, validateCRDInterval) } err = apiextensions.Instance().CreateCRDV1beta1(resource) - if err != nil && !errors.IsAlreadyExists(err) { + if err != nil && !k8s_errors.IsAlreadyExists(err) { return err } return apiextensions.Instance().ValidateCRDV1beta1(resource, validateCRDTimeout, validateCRDInterval) @@ -1395,5 +1750,18 @@ func (a *ApplicationRestoreController) cleanupResources(restore *storkapi.Applic logrus.Errorf("unable to cleanup post restore resources, err: %v", err) } } + var crNames = []string{} + // Directly calling DeleteResourceExport with out checking backuplocation type. + // For other backuplocation type, expecting Notfound + crNames = append(crNames, getResourceExportCRName(utils.PrefixRestore, string(restore.UID), restore.Namespace)) + crNames = append(crNames, getResourceExportCRName(utils.PrefixNFSRestorePVC, string(restore.UID), restore.Namespace)) + for _, crName := range crNames { + err := kdmpShedOps.Instance().DeleteResourceExport(crName, restore.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete restore resource export CR [%v]: %v", crName, err) + log.ApplicationRestoreLog(restore).Errorf("%v", errMsg) + return err + } + } return nil } diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_resourcetransformation.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_resourcetransformation.go new file mode 100644 index 0000000000..397599df33 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_resourcetransformation.go @@ -0,0 +1,142 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeResourceTransformations implements ResourceTransformationInterface +type FakeResourceTransformations struct { + Fake *FakeStorkV1alpha1 + ns string +} + +var resourcetransformationsResource = schema.GroupVersionResource{Group: "stork.libopenstorage.org", Version: "v1alpha1", Resource: "resourcetransformations"} + +var resourcetransformationsKind = schema.GroupVersionKind{Group: "stork.libopenstorage.org", Version: "v1alpha1", Kind: "ResourceTransformation"} + +// Get takes name of the resourceTransformation, and returns the corresponding resourceTransformation object, and an error if there is any. +func (c *FakeResourceTransformations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(resourcetransformationsResource, c.ns, name), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// List takes label and field selectors, and returns the list of ResourceTransformations that match those selectors. +func (c *FakeResourceTransformations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceTransformationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(resourcetransformationsResource, resourcetransformationsKind, c.ns, opts), &v1alpha1.ResourceTransformationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ResourceTransformationList{ListMeta: obj.(*v1alpha1.ResourceTransformationList).ListMeta} + for _, item := range obj.(*v1alpha1.ResourceTransformationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resourceTransformations. +func (c *FakeResourceTransformations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(resourcetransformationsResource, c.ns, opts)) + +} + +// Create takes the representation of a resourceTransformation and creates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *FakeResourceTransformations) Create(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.CreateOptions) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(resourcetransformationsResource, c.ns, resourceTransformation), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// Update takes the representation of a resourceTransformation and updates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *FakeResourceTransformations) Update(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(resourcetransformationsResource, c.ns, resourceTransformation), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeResourceTransformations) UpdateStatus(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (*v1alpha1.ResourceTransformation, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(resourcetransformationsResource, "status", c.ns, resourceTransformation), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} + +// Delete takes name of the resourceTransformation and deletes it. Returns an error if one occurs. +func (c *FakeResourceTransformations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(resourcetransformationsResource, c.ns, name), &v1alpha1.ResourceTransformation{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeResourceTransformations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(resourcetransformationsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ResourceTransformationList{}) + return err +} + +// Patch applies the patch and returns the patched resourceTransformation. +func (c *FakeResourceTransformations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceTransformation, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(resourcetransformationsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ResourceTransformation{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ResourceTransformation), err +} diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go index 3bf46cb4f9..6944d60715 100644 --- a/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/fake/fake_stork_client.go @@ -84,6 +84,10 @@ func (c *FakeStorkV1alpha1) NamespacedSchedulePolicies(namespace string) v1alpha return &FakeNamespacedSchedulePolicies{c, namespace} } +func (c *FakeStorkV1alpha1) ResourceTransformations(namespace string) v1alpha1.ResourceTransformationInterface { + return &FakeResourceTransformations{c, namespace} +} + func (c *FakeStorkV1alpha1) Rules(namespace string) v1alpha1.RuleInterface { return &FakeRules{c, namespace} } diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go index 95cc218140..e14b258d54 100644 --- a/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/generated_expansion.go @@ -46,6 +46,8 @@ type MigrationScheduleExpansion interface{} type NamespacedSchedulePolicyExpansion interface{} +type ResourceTransformationExpansion interface{} + type RuleExpansion interface{} type SchedulePolicyExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/resourcetransformation.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..88c1ee89b2 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,195 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + scheme "github.com/libopenstorage/stork/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResourceTransformationsGetter has a method to return a ResourceTransformationInterface. +// A group's client should implement this interface. +type ResourceTransformationsGetter interface { + ResourceTransformations(namespace string) ResourceTransformationInterface +} + +// ResourceTransformationInterface has methods to work with ResourceTransformation resources. +type ResourceTransformationInterface interface { + Create(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.CreateOptions) (*v1alpha1.ResourceTransformation, error) + Update(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (*v1alpha1.ResourceTransformation, error) + UpdateStatus(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (*v1alpha1.ResourceTransformation, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceTransformation, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceTransformationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceTransformation, err error) + ResourceTransformationExpansion +} + +// resourceTransformations implements ResourceTransformationInterface +type resourceTransformations struct { + client rest.Interface + ns string +} + +// newResourceTransformations returns a ResourceTransformations +func newResourceTransformations(c *StorkV1alpha1Client, namespace string) *resourceTransformations { + return &resourceTransformations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceTransformation, and returns the corresponding resourceTransformation object, and an error if there is any. +func (c *resourceTransformations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceTransformations that match those selectors. +func (c *resourceTransformations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceTransformationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ResourceTransformationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceTransformations. +func (c *resourceTransformations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceTransformation and creates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *resourceTransformations) Create(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.CreateOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceTransformation). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceTransformation and updates it. Returns the server's representation of the resourceTransformation, and an error, if there is any. +func (c *resourceTransformations) Update(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(resourceTransformation.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceTransformation). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceTransformations) UpdateStatus(ctx context.Context, resourceTransformation *v1alpha1.ResourceTransformation, opts v1.UpdateOptions) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(resourceTransformation.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceTransformation). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceTransformation and deletes it. Returns an error if one occurs. +func (c *resourceTransformations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceTransformations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcetransformations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceTransformation. +func (c *resourceTransformations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceTransformation, err error) { + result = &v1alpha1.ResourceTransformation{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcetransformations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go b/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go index 012c2fc77b..4d16ab25e9 100644 --- a/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go +++ b/pkg/client/clientset/versioned/typed/stork/v1alpha1/stork_client.go @@ -40,6 +40,7 @@ type StorkV1alpha1Interface interface { MigrationsGetter MigrationSchedulesGetter NamespacedSchedulePoliciesGetter + ResourceTransformationsGetter RulesGetter SchedulePoliciesGetter VolumeSnapshotRestoresGetter @@ -107,6 +108,10 @@ func (c *StorkV1alpha1Client) NamespacedSchedulePolicies(namespace string) Names return newNamespacedSchedulePolicies(c, namespace) } +func (c *StorkV1alpha1Client) ResourceTransformations(namespace string) ResourceTransformationInterface { + return newResourceTransformations(c, namespace) +} + func (c *StorkV1alpha1Client) Rules(namespace string) RuleInterface { return newRules(c, namespace) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index c444feedbe..b06fee0341 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -81,6 +81,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().MigrationSchedules().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("namespacedschedulepolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().NamespacedSchedulePolicies().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("resourcetransformations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().ResourceTransformations().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("rules"): return &genericInformer{resource: resource.GroupResource(), informer: f.Stork().V1alpha1().Rules().Informer()}, nil case v1alpha1.SchemeGroupVersion.WithResource("schedulepolicies"): diff --git a/pkg/client/informers/externalversions/stork/v1alpha1/interface.go b/pkg/client/informers/externalversions/stork/v1alpha1/interface.go index 10c8fd8791..e093bcc286 100644 --- a/pkg/client/informers/externalversions/stork/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/stork/v1alpha1/interface.go @@ -52,6 +52,8 @@ type Interface interface { MigrationSchedules() MigrationScheduleInformer // NamespacedSchedulePolicies returns a NamespacedSchedulePolicyInformer. NamespacedSchedulePolicies() NamespacedSchedulePolicyInformer + // ResourceTransformations returns a ResourceTransformationInformer. + ResourceTransformations() ResourceTransformationInformer // Rules returns a RuleInformer. Rules() RuleInformer // SchedulePolicies returns a SchedulePolicyInformer. @@ -143,6 +145,11 @@ func (v *version) NamespacedSchedulePolicies() NamespacedSchedulePolicyInformer return &namespacedSchedulePolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// ResourceTransformations returns a ResourceTransformationInformer. +func (v *version) ResourceTransformations() ResourceTransformationInformer { + return &resourceTransformationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Rules returns a RuleInformer. func (v *version) Rules() RuleInformer { return &ruleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/stork/v1alpha1/resourcetransformation.go b/pkg/client/informers/externalversions/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..d2d52993d5 --- /dev/null +++ b/pkg/client/informers/externalversions/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + storkv1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + versioned "github.com/libopenstorage/stork/pkg/client/clientset/versioned" + internalinterfaces "github.com/libopenstorage/stork/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/libopenstorage/stork/pkg/client/listers/stork/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ResourceTransformationInformer provides access to a shared informer and lister for +// ResourceTransformations. +type ResourceTransformationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ResourceTransformationLister +} + +type resourceTransformationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceTransformationInformer constructs a new informer for ResourceTransformation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceTransformationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceTransformationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceTransformationInformer constructs a new informer for ResourceTransformation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceTransformationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorkV1alpha1().ResourceTransformations(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorkV1alpha1().ResourceTransformations(namespace).Watch(context.TODO(), options) + }, + }, + &storkv1alpha1.ResourceTransformation{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceTransformationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceTransformationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceTransformationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storkv1alpha1.ResourceTransformation{}, f.defaultInformer) +} + +func (f *resourceTransformationInformer) Lister() v1alpha1.ResourceTransformationLister { + return v1alpha1.NewResourceTransformationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/stork/v1alpha1/expansion_generated.go b/pkg/client/listers/stork/v1alpha1/expansion_generated.go index c1d53b74dc..c61fe5420b 100644 --- a/pkg/client/listers/stork/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/stork/v1alpha1/expansion_generated.go @@ -118,6 +118,14 @@ type NamespacedSchedulePolicyListerExpansion interface{} // NamespacedSchedulePolicyNamespaceLister. type NamespacedSchedulePolicyNamespaceListerExpansion interface{} +// ResourceTransformationListerExpansion allows custom methods to be added to +// ResourceTransformationLister. +type ResourceTransformationListerExpansion interface{} + +// ResourceTransformationNamespaceListerExpansion allows custom methods to be added to +// ResourceTransformationNamespaceLister. +type ResourceTransformationNamespaceListerExpansion interface{} + // RuleListerExpansion allows custom methods to be added to // RuleLister. type RuleListerExpansion interface{} diff --git a/pkg/client/listers/stork/v1alpha1/resourcetransformation.go b/pkg/client/listers/stork/v1alpha1/resourcetransformation.go new file mode 100644 index 0000000000..7d7350a30f --- /dev/null +++ b/pkg/client/listers/stork/v1alpha1/resourcetransformation.go @@ -0,0 +1,99 @@ +/* +Copyright 2018 Openstorage.org + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceTransformationLister helps list ResourceTransformations. +// All objects returned here must be treated as read-only. +type ResourceTransformationLister interface { + // List lists all ResourceTransformations in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) + // ResourceTransformations returns an object that can list and get ResourceTransformations. + ResourceTransformations(namespace string) ResourceTransformationNamespaceLister + ResourceTransformationListerExpansion +} + +// resourceTransformationLister implements the ResourceTransformationLister interface. +type resourceTransformationLister struct { + indexer cache.Indexer +} + +// NewResourceTransformationLister returns a new ResourceTransformationLister. +func NewResourceTransformationLister(indexer cache.Indexer) ResourceTransformationLister { + return &resourceTransformationLister{indexer: indexer} +} + +// List lists all ResourceTransformations in the indexer. +func (s *resourceTransformationLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ResourceTransformation)) + }) + return ret, err +} + +// ResourceTransformations returns an object that can list and get ResourceTransformations. +func (s *resourceTransformationLister) ResourceTransformations(namespace string) ResourceTransformationNamespaceLister { + return resourceTransformationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceTransformationNamespaceLister helps list and get ResourceTransformations. +// All objects returned here must be treated as read-only. +type ResourceTransformationNamespaceLister interface { + // List lists all ResourceTransformations in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) + // Get retrieves the ResourceTransformation from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ResourceTransformation, error) + ResourceTransformationNamespaceListerExpansion +} + +// resourceTransformationNamespaceLister implements the ResourceTransformationNamespaceLister +// interface. +type resourceTransformationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceTransformations in the indexer for a given namespace. +func (s resourceTransformationNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ResourceTransformation, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ResourceTransformation)) + }) + return ret, err +} + +// Get retrieves the ResourceTransformation from the indexer for a given namespace and name. +func (s resourceTransformationNamespaceLister) Get(name string) (*v1alpha1.ResourceTransformation, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("resourcetransformation"), name) + } + return obj.(*v1alpha1.ResourceTransformation), nil +} diff --git a/pkg/log/log.go b/pkg/log/log.go index cd3b938364..5f0f9da817 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -157,6 +157,18 @@ func MigrationLog(migration *storkv1.Migration) *logrus.Entry { return logrus.WithFields(logrus.Fields{}) } +// TransformLog formats a log message with resource transformation CR information +func TransformLog(transform *storkv1.ResourceTransformation) *logrus.Entry { + if transform != nil { + return logrus.WithFields(logrus.Fields{ + "ResourceTransformationName": transform.Name, + "ResourceTransformationNamespace": transform.Namespace, + }) + } + + return logrus.WithFields(logrus.Fields{}) +} + // MigrationScheduleLog formats a log message with migrationschedule information func MigrationScheduleLog(migrationSchedule *storkv1.MigrationSchedule) *logrus.Entry { if migrationSchedule != nil { diff --git a/pkg/migration/controllers/clusterpair.go b/pkg/migration/controllers/clusterpair.go index 97f2f8f84c..c33b1f4a14 100644 --- a/pkg/migration/controllers/clusterpair.go +++ b/pkg/migration/controllers/clusterpair.go @@ -64,9 +64,9 @@ func (c *ClusterPairController) Init(mgr manager.Manager) error { func (c *ClusterPairController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logrus.Tracef("Reconciling ClusterPair %s/%s", request.Namespace, request.Name) - // Fetch the ApplicationBackup instance - backup := &stork_api.ClusterPair{} - err := c.client.Get(context.TODO(), request.NamespacedName, backup) + // Fetch the ClusterPair instance + clusterPair := &stork_api.ClusterPair{} + err := c.client.Get(context.TODO(), request.NamespacedName, clusterPair) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. @@ -78,13 +78,13 @@ func (c *ClusterPairController) Reconcile(ctx context.Context, request reconcile return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err } - if !controllers.ContainsFinalizer(backup, controllers.FinalizerCleanup) { - controllers.SetFinalizer(backup, controllers.FinalizerCleanup) - return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), backup) + if !controllers.ContainsFinalizer(clusterPair, controllers.FinalizerCleanup) { + controllers.SetFinalizer(clusterPair, controllers.FinalizerCleanup) + return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), clusterPair) } - if err = c.handle(context.TODO(), backup); err != nil { - logrus.Errorf("%s: %s/%s: %s", reflect.TypeOf(c), backup.Namespace, backup.Name, err) + if err = c.handle(context.TODO(), clusterPair); err != nil { + logrus.Errorf("%s: %s/%s: %s", reflect.TypeOf(c), clusterPair.Namespace, clusterPair.Name, err) return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err } diff --git a/pkg/migration/controllers/migration.go b/pkg/migration/controllers/migration.go index 05804f7cb7..0db1414170 100644 --- a/pkg/migration/controllers/migration.go +++ b/pkg/migration/controllers/migration.go @@ -3,13 +3,14 @@ package controllers import ( "context" "fmt" - "github.com/libopenstorage/stork/pkg/utils" "math/rand" "reflect" "strconv" "strings" "time" + "github.com/libopenstorage/stork/pkg/utils" + "github.com/go-openapi/inflect" "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" @@ -65,7 +66,8 @@ const ( PVReclaimAnnotation = "stork.libopenstorage.org/reclaimPolicy" // StorkAnnotationPrefix for resources created/managed by stork StorkAnnotationPrefix = "stork.libopenstorage.org/" - + // StorkNamespacePrefix for namespace created for applying dry run resources + StorkNamespacePrefix = "stork-transform" // Max number of times to retry applying resources on the desination maxApplyRetries = 10 deletedMaxRetries = 12 @@ -196,6 +198,10 @@ func setDefaults(spec stork_api.MigrationSpec) stork_api.MigrationSpec { defaultBool := false spec.IncludeNetworkPolicyWithCIDR = &defaultBool } + if spec.SkipDeletedNamespaces == nil { + defaultBool := true + spec.SkipDeletedNamespaces = &defaultBool + } return spec } @@ -364,7 +370,52 @@ func (m *MigrationController) handle(ctx context.Context, migration *stork_api.M err.Error()) err = m.updateMigrationCR(context.Background(), migration) if err != nil { - log.MigrationLog(migration).Errorf("Error updating") + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) + } + return nil + } + } + // Make sure if transformation CR is in ready state + if len(migration.Spec.TransformSpecs) != 0 { + // Check if multiple transformation specs are provided + if len(migration.Spec.TransformSpecs) > 1 { + errMsg := fmt.Sprintf("providing multiple transformation specs is not supported in this release %v, err: %v", migration.Spec.TransformSpecs, err) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + errMsg) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) + } + return nil + } + // verify if transform specs are created + resp, err := storkops.Instance().GetResourceTransformation(migration.Spec.TransformSpecs[0], ns) + if err != nil { + errMsg := fmt.Sprintf("unable to retrieve transformation %s, err: %v", migration.Spec.TransformSpecs, err) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + err.Error()) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) + } + return nil + } + if err := storkops.Instance().ValidateResourceTransformation(resp.Name, ns, 1*time.Minute, 5*time.Second); err != nil { + errMsg := fmt.Sprintf("transformation %s is not in ready state: %s", migration.Spec.TransformSpecs[0], resp.Status.Status) + log.MigrationLog(migration).Errorf(errMsg) + m.recorder.Event(migration, + v1.EventTypeWarning, + string(stork_api.MigrationStatusFailed), + errMsg) + err = m.updateMigrationCR(context.Background(), migration) + if err != nil { + log.MigrationLog(migration).Errorf("Error updating CR, err: %v", err) } return nil } @@ -753,6 +804,7 @@ func (m *MigrationController) migrateVolumes(migration *stork_api.Migration, ter return err } } else { + logrus.Infof("Migrating pv and pvcs for volume only migration") err := m.migrateResources(migration, true) if err != nil { log.MigrationLog(migration).Errorf("Error migrating resources: %v", err) @@ -955,7 +1007,6 @@ func (m *MigrationController) migrateResources(migration *stork_api.Migration, v return err } - migration.Status.ResourceMigrationFinishTimestamp = metav1.Now() migration.Status.Stage = stork_api.MigrationStageFinal migration.Status.Status = stork_api.MigrationStatusSuccessful for _, resource := range migration.Status.Resources { @@ -964,6 +1015,7 @@ func (m *MigrationController) migrateResources(migration *stork_api.Migration, v break } } + if *migration.Spec.PurgeDeletedResources { if err := m.purgeMigratedResources(migration, resourceCollectorOpts); err != nil { message := fmt.Sprintf("Error cleaning up resources: %v", err) @@ -976,6 +1028,7 @@ func (m *MigrationController) migrateResources(migration *stork_api.Migration, v } } + migration.Status.ResourceMigrationFinishTimestamp = metav1.Now() migration.Status.FinishTimestamp = metav1.Now() err = m.updateMigrationCR(context.TODO(), migration) if err != nil { @@ -993,6 +1046,22 @@ func (m *MigrationController) prepareResources( if err != nil { return err } + transformName := "" + // this is already handled in pre-checks, we dont support multiple resource transformation + // rules specified in migration specs + if len(migration.Spec.TransformSpecs) != 0 && len(migration.Spec.TransformSpecs) == 1 { + transformName = migration.Spec.TransformSpecs[0] + } + + resPatch := make(map[string]stork_api.KindResourceTransform) + if transformName != "" { + resPatch, err = resourcecollector.GetResourcePatch(transformName, migration.Spec.Namespaces) + if err != nil { + log.MigrationLog(migration). + Warnf("Unable to get transformation spec from :%s, skipping transformation for this migration, err: %v", transformName, err) + return err + } + } for _, o := range objects { metadata, err := meta.Accessor(o) @@ -1016,6 +1085,18 @@ func (m *MigrationController) prepareResources( if err != nil { return fmt.Errorf("error preparing %v resource %v: %v", o.GetObjectKind().GroupVersionKind().Kind, metadata.GetName(), err) } + default: + // if namespace has resource transformation spec + if ns, found := resPatch[metadata.GetNamespace()]; found { + // if transformspec present for current resource kind + if kind, ok := ns[resource.Kind]; ok { + err := resourcecollector.TransformResources(o, kind, metadata.GetName(), metadata.GetNamespace()) + if err != nil { + return fmt.Errorf("error updating %v resource %v: %v", o.GetObjectKind().GroupVersionKind().Kind, metadata.GetName(), err) + } + } + } + // do nothing } // prepare CR resources @@ -1032,7 +1113,6 @@ func (m *MigrationController) prepareResources( } } } - } return nil } @@ -1054,6 +1134,11 @@ func (m *MigrationController) updateResourceStatus( (resource.Group == gkv.Group || (resource.Group == "core" && gkv.Group == "")) && resource.Version == gkv.Version && resource.Kind == gkv.Kind { + if _, ok := metadata.GetAnnotations()[resourcecollector.TransformedResourceName]; ok { + if len(migration.Spec.TransformSpecs) != 0 && len(migration.Spec.TransformSpecs) == 1 { + resource.TransformedBy = migration.Spec.TransformSpecs[0] + } + } resource.Status = status resource.Reason = reason eventType := v1.EventTypeNormal @@ -1097,6 +1182,12 @@ func (m *MigrationController) checkAndUpdateService( object runtime.Unstructured, objHash uint64, ) (bool, error) { + // if transformation spec is provided, always update service with + // transform spec rules + if len(migration.Spec.TransformSpecs) != 0 { + return false, nil + } + var svc v1.Service if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &svc); err != nil { return false, fmt.Errorf("error converting unstructured obj to service resource: %v", err) @@ -1209,7 +1300,7 @@ func (m *MigrationController) preparePVResource( } pv.Annotations[PVReclaimAnnotation] = string(pv.Spec.PersistentVolumeReclaimPolicy) pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain - _, err := m.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil) + _, err := m.volDriver.UpdateMigratedPersistentVolumeSpec(&pv, nil, nil) if err != nil { return err } @@ -1479,11 +1570,11 @@ func (m *MigrationController) applyResources( res.ResourceVersion = "" // if crds is applied as v1beta on k8s version 1.16+ it will have - // preservedUnkownField set and api version converted to v1 , + // preservedUnknownField set and api version converted to v1 , // which cause issue while applying it on dest cluster, // since we will be applying v1 crds with non-valid schema - // this converts `preserveUnknownFiels`(deprecated) to spec.Versions[*].xPreservedUnknown + // this converts `preserveUnknownFields`(deprecated) to spec.Versions[*].xPreservedUnknown // equivalent var updatedVersions []apiextensionsv1.CustomResourceDefinitionVersion if res.Spec.PreserveUnknownFields { @@ -1576,6 +1667,7 @@ func (m *MigrationController) applyResources( updatedObjects = append(updatedObjects, o) } } + logrus.Infof("Recreating pv and pvc object") // create/update pv object with updated policy for _, obj := range pvObjects { var pv v1.PersistentVolume @@ -2140,6 +2232,7 @@ func (m *MigrationController) getVolumeOnlyMigrationResources( return resources, err } resources = append(resources, objects.Items...) + // add pvcs to resource list resource = metav1.APIResource{ Name: "persistentvolumeclaims", diff --git a/pkg/migration/controllers/resourcetransformation.go b/pkg/migration/controllers/resourcetransformation.go new file mode 100644 index 0000000000..ece907030c --- /dev/null +++ b/pkg/migration/controllers/resourcetransformation.go @@ -0,0 +1,360 @@ +package controllers + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/go-openapi/inflect" + "github.com/libopenstorage/stork/drivers/volume" + stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/controllers" + "github.com/libopenstorage/stork/pkg/k8sutils" + "github.com/libopenstorage/stork/pkg/log" + "github.com/libopenstorage/stork/pkg/resourcecollector" + "github.com/libopenstorage/stork/pkg/version" + "github.com/portworx/sched-ops/k8s/apiextensions" + coreops "github.com/portworx/sched-ops/k8s/core" + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + // ResourceTransformationControllerName of resource transformation CR handler + ResourceTransformationControllerName = "resource-transformation-controller" +) + +// NewResourceTransformation creates a new instance of ResourceTransformation Manager +func NewResourceTransformation(mgr manager.Manager, d volume.Driver, r record.EventRecorder, rc resourcecollector.ResourceCollector) *ResourceTransformationController { + return &ResourceTransformationController{ + client: mgr.GetClient(), + recorder: r, + resourceCollector: rc, + } +} + +// ResourceTransformationController controller to watch over ResourceTransformation CR +type ResourceTransformationController struct { + client runtimeclient.Client + + resourceCollector resourcecollector.ResourceCollector + recorder record.EventRecorder +} + +// Init initialize the resource transformation controller +func (r *ResourceTransformationController) Init(mgr manager.Manager) error { + err := r.createCRD() + if err != nil { + return err + } + + return controllers.RegisterTo(mgr, ResourceTransformationControllerName, r, &stork_api.ResourceTransformation{}) +} + +// Reconcile manages ResourceTransformation resources. +func (r *ResourceTransformationController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + resourceTransformation := &stork_api.ResourceTransformation{} + err := r.client.Get(context.TODO(), request.NamespacedName, resourceTransformation) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err + } + + if !controllers.ContainsFinalizer(resourceTransformation, controllers.FinalizerCleanup) { + controllers.SetFinalizer(resourceTransformation, controllers.FinalizerCleanup) + return reconcile.Result{Requeue: true}, r.client.Update(context.TODO(), resourceTransformation) + } + + if err = r.handle(context.TODO(), resourceTransformation); err != nil { + logrus.Errorf("%s: %s/%s: %s", reflect.TypeOf(r), resourceTransformation.Namespace, resourceTransformation.Name, err) + return reconcile.Result{RequeueAfter: controllers.DefaultRequeueError}, err + } + + return reconcile.Result{RequeueAfter: controllers.DefaultRequeue}, nil +} + +func getTransformNamespace(ns string) string { + return StorkNamespacePrefix + "-" + ns +} +func (r *ResourceTransformationController) handle(ctx context.Context, transform *stork_api.ResourceTransformation) error { + var err error + if transform.DeletionTimestamp != nil { + if transform.GetFinalizers() != nil { + controllers.RemoveFinalizer(transform, controllers.FinalizerCleanup) + return r.client.Update(ctx, transform) + } + + return nil + } + switch transform.Status.Status { + case stork_api.ResourceTransformationStatusInitial: + err = r.validateSpecPath(transform) + if err != nil { + message := fmt.Sprintf("Unsupported resource for resource transformation found: %v", err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + err := r.client.Update(ctx, transform) + if err != nil { + return err + } + return nil + } + transform.Status.Status = stork_api.ResourceTransformationStatusInProgress + if err = r.client.Update(ctx, transform); err != nil { + return err + } + case stork_api.ResourceTransformationStatusInProgress: + err = r.validateTransformResource(ctx, transform) + if err != nil { + message := fmt.Sprintf("Error validating resource transformation specs: %v", err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + err := r.client.Update(ctx, transform) + if err != nil { + return err + } + } + case stork_api.ResourceTransformationStatusReady: + case stork_api.ResourceTransformationStatusFailed: + return nil + default: + log.TransformLog(transform).Errorf("Invalid status for ResourceTransformation: %v", transform.Status.Status) + } + return nil +} + +func (r *ResourceTransformationController) validateSpecPath(transform *stork_api.ResourceTransformation) error { + for _, spec := range transform.Spec.Objects { + _, _, kind, err := getGVK(spec.Resource) + if err != nil { + return err + } + if !resourcecollector.GetSupportedK8SResources(kind, []string{}) { + return fmt.Errorf("unsupported resource kind for transformation: %s", kind) + } + for _, path := range spec.Paths { + // TODO: this can be validated via CRDs as well, when we have defined schema + // for stork crds + // https://portworx.atlassian.net/browse/PWX-26465 + if path.Operation == stork_api.JsonResourcePatch { + return fmt.Errorf("json patch for resources is not supported, operation: %s", path.Operation) + } + if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath || + path.Operation == stork_api.ModifyResourcePathValue) { + return fmt.Errorf("unsupported resource patch operation given for kind :%s, operation: %s", kind, path.Operation) + } + if !(path.Type == stork_api.BoolResourceType || path.Type == stork_api.IntResourceType || + path.Type == stork_api.StringResourceType || path.Type == stork_api.SliceResourceType || + path.Type == stork_api.KeyPairResourceType) { + return fmt.Errorf("unsupported type for resource %s, path %s, type: %s", kind, path.Path, path.Type) + } + } + } + log.TransformLog(transform).Infof("validated paths ") + return nil +} + +func (r *ResourceTransformationController) validateTransformResource(ctx context.Context, transform *stork_api.ResourceTransformation) error { + resourceCollectorOpts := resourcecollector.Options{} + config, err := clientcmd.BuildConfigFromFlags("", "") + if err != nil { + return err + } + localInterface, err := dynamic.NewForConfig(config) + if err != nil { + return err + } + localOps, err := coreops.NewForConfig(config) + if err != nil { + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + err.Error()) + return nil + } + + // temp namespace to run dry-run of transformed resource option + remoteTempNamespace := getTransformNamespace(transform.Namespace) + ns := &v1.Namespace{} + ns.Name = remoteTempNamespace + _, err = localOps.CreateNamespace(ns) + if err != nil && !errors.IsAlreadyExists(err) { + message := fmt.Sprintf("Unable to create resource transformation namespace: %v", err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + err := r.client.Update(ctx, transform) + if err != nil { + return err + } + return nil + } + + for _, spec := range transform.Spec.Objects { + group, version, kind, err := getGVK(spec.Resource) + if err != nil { + return fmt.Errorf("invalid resource type should be in format //, actual: %s", spec.Resource) + } + resource := metav1.APIResource{ + Name: strings.ToLower(inflect.Pluralize(kind)), + Kind: kind, + Version: version, + Namespaced: true, + Group: group, + } + objects, err := r.resourceCollector.GetResourcesForType( + resource, + nil, + []string{transform.Namespace}, + spec.Selectors, + nil, + false, + resourceCollectorOpts, + ) + if err != nil { + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + fmt.Sprintf("Error getting resource kind:%s, err: %v", kind, err)) + log.TransformLog(transform).Errorf("Error getting resources kind:%s, err: %v", kind, err) + return err + } + for _, path := range spec.Paths { + // This can be handle by CRD validation- v1 version crd support + if !(path.Operation == stork_api.AddResourcePath || path.Operation == stork_api.DeleteResourcePath || + path.Operation == stork_api.ModifyResourcePathValue) { + return fmt.Errorf("unsupported operation type for given path : %s", path.Operation) + } + for _, object := range objects.Items { + metadata, err := meta.Accessor(object) + if err != nil { + log.TransformLog(transform).Errorf("Unable to read metadata for resource %v, err: %v", kind, err) + return err + } + resInfo := &stork_api.TransformResourceInfo{ + Name: metadata.GetName(), + Namespace: metadata.GetNamespace(), + GroupVersionKind: metav1.GroupVersionKind(object.GetObjectKind().GroupVersionKind()), + Specs: spec, + } + if err := resourcecollector.TransformResources(object, []stork_api.TransformResourceInfo{*resInfo}, metadata.GetName(), metadata.GetNamespace()); err != nil { + log.TransformLog(transform).Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, kind, resInfo.Namespace, resInfo.Name, err) + resInfo.Status = stork_api.ResourceTransformationStatusFailed + resInfo.Reason = err.Error() + return err + } + unstructured, ok := object.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unable to cast object to unstructured: %v", object) + } + resource := &metav1.APIResource{ + Name: inflect.Pluralize(strings.ToLower(kind)), + Namespaced: len(metadata.GetNamespace()) > 0, + } + dynamicClient := localInterface.Resource( + object.GetObjectKind().GroupVersionKind().GroupVersion().WithResource(resource.Name)).Namespace(getTransformNamespace(transform.Namespace)) + + unstructured.SetNamespace(getTransformNamespace(transform.Namespace)) + log.TransformLog(transform).Infof("Applying object %s, %s", + object.GetObjectKind().GroupVersionKind().Kind, + metadata.GetName()) + _, err = dynamicClient.Create(context.TODO(), unstructured, metav1.CreateOptions{DryRun: []string{"All"}}) + if err != nil { + log.TransformLog(transform).Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, kind, resInfo.Namespace, resInfo.Name, err) + resInfo.Status = stork_api.ResourceTransformationStatusFailed + resInfo.Reason = err.Error() + } else { + log.TransformLog(transform).Infof("Applied patch path %s on resource kind: %s/,%s/%s", path, kind, resInfo.Namespace, resInfo.Name) + resInfo.Status = stork_api.ResourceTransformationStatusReady + resInfo.Reason = "" + } + transform.Status.Resources = append(transform.Status.Resources, resInfo) + } + } + } + + transform.Status.Status = stork_api.ResourceTransformationStatusReady + // verify if all resource dry-run is successful + for _, resource := range transform.Status.Resources { + if resource.Status != stork_api.ResourceTransformationStatusReady { + transform.Status.Status = stork_api.ResourceTransformationStatusFailed + } + } + + if err := localOps.DeleteNamespace(remoteTempNamespace); err != nil { + // log & generate event, but lets not fail resource transformation if + // transformation controller could not delete temp namespace created + message := fmt.Sprintf("Unable to delete resource transformation namespace %s: %v", ns.Name, err) + log.TransformLog(transform).Errorf(message) + r.recorder.Event(transform, + v1.EventTypeWarning, + string(stork_api.ResourceTransformationStatusFailed), + message) + } + return r.client.Update(ctx, transform) +} + +// return group,version,kind from give resource type +func getGVK(resource string) (string, string, string, error) { + gvk := strings.Split(resource, "/") + if len(gvk) != 3 { + return "", "", "", fmt.Errorf("invalid resource kind :%s", resource) + } + return gvk[0], gvk[1], gvk[2], nil +} + +func (c *ResourceTransformationController) createCRD() error { + resource := apiextensions.CustomResource{ + Name: stork_api.ResourceTransformationResourceName, + Plural: stork_api.ResourceTransformationResourcePlural, + Group: stork_api.SchemeGroupVersion.Group, + Version: stork_api.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Kind: reflect.TypeOf(stork_api.ResourceTransformation{}).Name(), + } + ok, err := version.RequiresV1Registration() + if err != nil { + return err + } + if ok { + err := k8sutils.CreateCRD(resource) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + return apiextensions.Instance().ValidateCRD(resource.Plural+"."+resource.Group, validateCRDTimeout, validateCRDInterval) + } + err = apiextensions.Instance().CreateCRDV1beta1(resource) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + return apiextensions.Instance().ValidateCRDV1beta1(resource, validateCRDTimeout, validateCRDInterval) +} diff --git a/pkg/objectstore/nfs/nfs.go b/pkg/objectstore/nfs/nfs.go new file mode 100644 index 0000000000..48bb44333f --- /dev/null +++ b/pkg/objectstore/nfs/nfs.go @@ -0,0 +1,13 @@ +package nfs + +import ( + stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/objectstore/common" + "github.com/sirupsen/logrus" +) + +// GetObjLockInfo fetches the object lock configuration of a bucket +func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockInfo, error) { + logrus.Infof("object lock is not supported for nfs server") + return &common.ObjLockInfo{}, nil +} diff --git a/pkg/objectstore/objectstore.go b/pkg/objectstore/objectstore.go index b591fac999..6134c6e31d 100644 --- a/pkg/objectstore/objectstore.go +++ b/pkg/objectstore/objectstore.go @@ -7,6 +7,7 @@ import ( "github.com/libopenstorage/stork/pkg/objectstore/azure" "github.com/libopenstorage/stork/pkg/objectstore/common" "github.com/libopenstorage/stork/pkg/objectstore/google" + "github.com/libopenstorage/stork/pkg/objectstore/nfs" "github.com/libopenstorage/stork/pkg/objectstore/s3" "gocloud.dev/blob" ) @@ -59,6 +60,8 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn return azure.GetObjLockInfo(backupLocation) case stork_api.BackupLocationS3: return s3.GetObjLockInfo(backupLocation) + case stork_api.BackupLocationNFS: + return nfs.GetObjLockInfo(backupLocation) default: return nil, fmt.Errorf("invalid backupLocation type: %v", backupLocation.Location.Type) } diff --git a/pkg/objectstore/s3/s3.go b/pkg/objectstore/s3/s3.go index ca92953a74..f2c00d5a04 100644 --- a/pkg/objectstore/s3/s3.go +++ b/pkg/objectstore/s3/s3.go @@ -11,6 +11,7 @@ import ( "github.com/libopenstorage/secrets/aws/credentials" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/libopenstorage/stork/pkg/objectstore/common" + "github.com/sirupsen/logrus" "gocloud.dev/blob" "gocloud.dev/blob/s3blob" ) @@ -90,7 +91,9 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn objLockInfo := &common.ObjLockInfo{} out, err := s3.New(sess).GetObjectLockConfiguration(input) if err != nil { + logrus.Warnf("GetObjLockInfo: GetObjectLockConfiguration failed with: %v", err) if awsErr, ok := err.(awserr.Error); ok { + logrus.Warnf("GetObjLockInfo: GetObjectLockConfiguration awsErr.Code %v", awsErr.Code()) // When a Minio server doesn't have object-lock implemented then above API // throws following error codes depending on version it runs for normal buckets // 1. "ObjectLockConfigurationNotFoundError" @@ -98,9 +101,11 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn // Similarly in case of AWS, we need to ignore "NoSuchBucket" so that // px-backup/stork can create the bucket on behalf user when validation flag is not set. // With cloudian objectstore, we saw the error as "ObjectLockConfigurationNotFound" + // With Netapp Trident, we saw the error as "NotImplemented" if awsErr.Code() == "ObjectLockConfigurationNotFoundError" || awsErr.Code() == "MethodNotAllowed" || awsErr.Code() == "ObjectLockConfigurationNotFound" || + awsErr.Code() == "NotImplemented" || awsErr.Code() == "NoSuchBucket" { // for a non-objectlocked bucket we needn't throw error return objLockInfo, nil @@ -109,9 +114,11 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn return nil, err } if (out != nil) && (out.ObjectLockConfiguration != nil) { + logrus.Warnf("GetObjLockInfo: out.ObjectLockConfiguration.ObjectLockEnabled: %v", aws.StringValue(out.ObjectLockConfiguration.ObjectLockEnabled)) if aws.StringValue(out.ObjectLockConfiguration.ObjectLockEnabled) == "Enabled" { objLockInfo.LockEnabled = true } else { + logrus.Infof("GetObjLockInfo ObjectLockConfiguration is empty: %v", out.ObjectLockConfiguration) // For some of the objectstore like FB and dell ECS, GetObjectLockConfiguration // will return empty objectlockconfiguration instead of nil or error return objLockInfo, nil @@ -123,9 +130,13 @@ func GetObjLockInfo(backupLocation *stork_api.BackupLocation) (*common.ObjLockIn objLockInfo.RetentionPeriodDays = aws.Int64Value(out.ObjectLockConfiguration.Rule.DefaultRetention.Days) } else { //This is an invalid object-lock config, no default-retention but object-loc enabled + logrus.Errorf("GetObjLockInfo: invalid config: object lock is enabled but default retention period is not set on the bucket") objLockInfo.LockEnabled = false return nil, fmt.Errorf("invalid config: object lock is enabled but default retention period is not set on the bucket") } } + // This debug statement will not be executed as both err and out can not be nil at the same time. + // Adding it, just in case, we hit it. + logrus.Infof("GetObjLockInfo: returning objLockInfo: %v - err %v", objLockInfo, err) return objLockInfo, err } diff --git a/pkg/resourcecollector/clusterrole.go b/pkg/resourcecollector/clusterrole.go index 7f4c32df66..436c5b3a22 100644 --- a/pkg/resourcecollector/clusterrole.go +++ b/pkg/resourcecollector/clusterrole.go @@ -1,6 +1,7 @@ package resourcecollector import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "strings" rbacv1 "k8s.io/api/rbac/v1" @@ -99,6 +100,21 @@ func (r *ResourceCollector) clusterRoleToBeCollected( } } } + + // clusterRole can also be bind to role binding as well at namespace level + // Get the role binding for the given namespace + var filterOptions = metav1.ListOptions{} + rbs, err := r.rbacOps.ListRoleBinding(namespace, filterOptions) + if err != nil { + return false, err + } + // Find the corresponding RoleBinding and see if it belongs to the requested namespace + for _, rb := range rbs.Items { + if rb.RoleRef.Name == name && rb.Namespace == namespace { + return true, nil + } + } + return false, nil } diff --git a/pkg/resourcecollector/endpoint.go b/pkg/resourcecollector/endpoint.go index c8e9215132..8b936c896d 100644 --- a/pkg/resourcecollector/endpoint.go +++ b/pkg/resourcecollector/endpoint.go @@ -7,6 +7,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + headlessService = "service.kubernetes.io/headless" +) + func (r *ResourceCollector) endpointsToBeCollected( object runtime.Unstructured, ) (bool, error) { @@ -34,6 +38,13 @@ func (r *ResourceCollector) endpointsToBeCollected( if _, ok := endpoint.Annotations[v1.LastAppliedConfigAnnotation]; !ok { return false, nil } + } + if endpoint.Labels != nil { + // skip collecting endpointfs for headless service + // https://kubernetes.io/docs/reference/labels-annotations-taints/#servicekubernetesioheadless + if _, ok := endpoint.Labels[headlessService]; ok { + return false, nil + } } return true, nil diff --git a/pkg/resourcecollector/persistentvolume.go b/pkg/resourcecollector/persistentvolume.go index cf11984fcb..4181ec1edb 100644 --- a/pkg/resourcecollector/persistentvolume.go +++ b/pkg/resourcecollector/persistentvolume.go @@ -2,6 +2,8 @@ package resourcecollector import ( "fmt" + "github.com/libopenstorage/stork/pkg/utils" + "github.com/sirupsen/logrus" "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" @@ -10,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + k8shelper "k8s.io/component-helpers/storage/volume" ) func (r *ResourceCollector) pvToBeCollected( @@ -90,10 +93,45 @@ func (r *ResourceCollector) pvToBeCollected( func (r *ResourceCollector) preparePVResourceForCollection( object runtime.Unstructured, ) error { + var pv v1.PersistentVolume + var currentSc string + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &pv); err != nil { + return err + } + // Some time pv spec does not contains the storage class. + // In that case, we will get it from pvc spec. + if len(pv.Spec.StorageClassName) == 0 { + pvc, err := r.coreOps.GetPersistentVolumeClaim(pv.Spec.ClaimRef.Name, pv.Spec.ClaimRef.Namespace) + if err != nil { + return err + } + currentSc, err = utils.GetStorageClassNameForPVC(pvc) + if err != nil { + // Not returning error as there might be some cases, where PVC might not have storage class. + // A case where a PV can be manually bind mounted to a volume ( backend storage volume) + logrus.Debugf("preparePVResourceForCollection: failed to fetch storage class from PVC %v: %v", pv.Spec.ClaimRef.Name, err) + } + } else { + currentSc = pv.Spec.StorageClassName + } err := unstructured.SetNestedField(object.UnstructuredContent(), nil, "spec", "claimRef") if err != nil { return err } + if len(currentSc) > 0 { + annotations, found, err := unstructured.NestedStringMap(object.UnstructuredContent(), "metadata", "annotations") + if err != nil { + return err + } + if !found { + annotations = make(map[string]string) + } + annotations[CurrentStorageClassName] = currentSc + if err := unstructured.SetNestedStringMap(object.UnstructuredContent(), annotations, "metadata", "annotations"); err != nil { + return err + } + object.SetUnstructuredContent(object.UnstructuredContent()) + } return unstructured.SetNestedField(object.UnstructuredContent(), "", "spec", "storageClassName") } @@ -103,6 +141,8 @@ func (r *ResourceCollector) preparePVResourceForApply( object runtime.Unstructured, pvNameMappings map[string]string, vInfo []*stork_api.ApplicationRestoreVolumeInfo, + storageClassMappings map[string]string, + namespaceMappings map[string]string, ) (bool, error) { var updatedName string var present bool @@ -119,6 +159,30 @@ func (r *ResourceCollector) preparePVResourceForApply( if updatedName, present = pvNameMappings[pv.Name]; !present { return true, nil } + // get the storage class name from the CurrentStorageClassName annotation + var oldSc string + var exists bool + var newSc string + if pv.Annotations != nil { + if val, ok := pv.Annotations[CurrentStorageClassName]; ok { + oldSc = val + // delete CurrentStorageClassName annotation before applying + delete(pv.Annotations, CurrentStorageClassName) + if newSc, exists = storageClassMappings[oldSc]; exists && len(newSc) > 0 { + // If the oldSc is present the storageclass map, get the new sc and update it in the pv spec + // Get the provisioner name from the new sc and update it + storageClass, err := r.storageOps.GetStorageClass(newSc) + if err != nil { + return false, fmt.Errorf("failed in getting the storage class [%v]: %v", newSc, err) + } + pv.Annotations[k8shelper.AnnDynamicallyProvisioned] = storageClass.Provisioner + pv.Spec.StorageClassName = newSc + } else { + // if the storageclass map does not have the oldSc name, update the PV spec with the oldSC itself before applying. + pv.Spec.StorageClassName = oldSc + } + } + } pv.Name = updatedName var driverName string @@ -130,6 +194,7 @@ func (r *ResourceCollector) preparePVResourceForApply( break } } + // in case of non-restore call make sure resourcecollector // checks proper driver by looking at pv name if driverName == "" { @@ -144,7 +209,7 @@ func (r *ResourceCollector) preparePVResourceForApply( if err != nil { return false, err } - _, err = driver.UpdateMigratedPersistentVolumeSpec(&pv, volumeInfo) + _, err = driver.UpdateMigratedPersistentVolumeSpec(&pv, volumeInfo, namespaceMappings) if err != nil { return false, err } diff --git a/pkg/resourcecollector/persistentvolumeclaim.go b/pkg/resourcecollector/persistentvolumeclaim.go index 3b92f75c6e..e1ac23cc8b 100644 --- a/pkg/resourcecollector/persistentvolumeclaim.go +++ b/pkg/resourcecollector/persistentvolumeclaim.go @@ -5,10 +5,12 @@ import ( "github.com/libopenstorage/stork/drivers/volume" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/utils" "github.com/portworx/sched-ops/k8s/core" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + k8shelper "k8s.io/component-helpers/storage/volume" pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util" ) @@ -96,12 +98,50 @@ func (r *ResourceCollector) preparePVCResourceForApply( } } - - if len(storageClassMappings) > 0 && pvc.Spec.StorageClassName != nil { - if newSc, exists := storageClassMappings[*pvc.Spec.StorageClassName]; exists && len(newSc) > 0 { - pvc.Spec.StorageClassName = &newSc + if len(storageClassMappings) > 0 { + // In the case of storageClassMappings, we need to reset the + // storage class annotation and the provisioner annotation + var newSc string + var currentSc string + var exists bool + var provisioner string + // Get the existing storage class from the pvc spec + // It can be in BetaStorageClassAnnotation annotation or in the spec. + currentSc, err := utils.GetStorageClassNameForPVC(&pvc) + if err != nil { + // If the storageclassMapping is present, then we can assume that storage class should be present in the PVC spec. + // So handling the error and returning it to caller. + return false, err + } + if len(currentSc) != 0 { + if newSc, exists = storageClassMappings[currentSc]; exists && len(newSc) > 0 { + if _, ok := pvc.Annotations[v1.BetaStorageClassAnnotation]; ok { + pvc.Annotations[v1.BetaStorageClassAnnotation] = newSc + } + if pvc.Spec.StorageClassName != nil && len(*pvc.Spec.StorageClassName) > 0 { + *pvc.Spec.StorageClassName = newSc + } + } + } + if len(newSc) > 0 { + storageClass, err := r.storageOps.GetStorageClass(newSc) + if err != nil { + return false, fmt.Errorf("failed in getting the storage class [%v]: %v", newSc, err) + } + provisioner = storageClass.Provisioner + } + if _, ok := pvc.Annotations[k8shelper.AnnBetaStorageProvisioner]; ok { + if len(provisioner) > 0 { + pvc.Annotations[k8shelper.AnnBetaStorageProvisioner] = provisioner + } + } + if _, ok := pvc.Annotations[k8shelper.AnnStorageProvisioner]; ok { + if len(provisioner) > 0 { + pvc.Annotations[k8shelper.AnnStorageProvisioner] = provisioner + } } } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pvc) if err != nil { return false, err diff --git a/pkg/resourcecollector/resourcecollector.go b/pkg/resourcecollector/resourcecollector.go index 4ee0003d0a..882173039a 100644 --- a/pkg/resourcecollector/resourcecollector.go +++ b/pkg/resourcecollector/resourcecollector.go @@ -13,6 +13,7 @@ import ( stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/portworx/sched-ops/k8s/core" "github.com/portworx/sched-ops/k8s/rbac" + "github.com/portworx/sched-ops/k8s/storage" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/sirupsen/logrus" rbacv1 "k8s.io/api/rbac/v1" @@ -46,6 +47,12 @@ const ( ProjectMappingsOption = "ProjectMappings" // IncludeResources to not skip resources of specific type IncludeResources = "stork.libopenstorage.org/include-resource" + // TransformedResourceName is the annotation used to check if resource has been updated + // as per transformation rules + TransformedResourceName = "stork.libopenstorage.org/resourcetransformation-name" + // CurrentStorageClassName is the annotation used to store the current storage class of the PV before + // taking backup as we will reset it to empty. + CurrentStorageClassName = "stork.libopenstorage.org/current-storage-class-name" // ServiceKind for k8s service resources ServiceKind = "Service" @@ -65,6 +72,7 @@ type ResourceCollector struct { coreOps core.Ops rbacOps rbac.Ops storkOps storkops.Ops + storageOps storage.Ops } // Options are the options passed to the ResourceCollector APIs that dictate how k8s @@ -132,6 +140,11 @@ func (r *ResourceCollector) Init(config *restclient.Config) error { if err != nil { return err } + r.storageOps, err = storage.NewForConfig(config) + if err != nil { + return err + } + return nil } @@ -148,7 +161,15 @@ func resourceToBeCollected(resource metav1.APIResource, grp schema.GroupVersion, return true } } - switch resource.Kind { + + return GetSupportedK8SResources(resource.Kind, optionalResourceTypes) +} + +// GetSupportedK8SResources returns supported k8s resources by resource collector +// pkgs, this can be used to validate list of resources supported by different stork +// controller like migration, backup, clone etc +func GetSupportedK8SResources(kind string, optionalResourceTypes []string) bool { + switch kind { case "PersistentVolumeClaim", "PersistentVolume", "Deployment", @@ -173,7 +194,9 @@ func resourceToBeCollected(resource metav1.APIResource, grp schema.GroupVersion, "LimitRange", "NetworkPolicy", "PodDisruptionBudget", - "Endpoints": + "Endpoints", + "ValidatingWebhookConfiguration", + "MutatingWebhookConfiguration": return true case "Job": return slice.ContainsString(optionalResourceTypes, "job", strings.ToLower) || @@ -516,7 +539,6 @@ func (r *ResourceCollector) objectToBeCollected( } else if !include { return false, nil } - switch objectType.GetKind() { case "Service": return r.serviceToBeCollected(object) @@ -548,8 +570,14 @@ func (r *ResourceCollector) objectToBeCollected( return r.dataVolumesToBeCollected(object) case "VirtualMachineInstance": return r.virtualMachineInstanceToBeCollected(object) + case "VirtualMachineInstanceMigration": + return r.virtualMachineInstanceMigrationToBeCollected(object) case "Endpoints": return r.endpointsToBeCollected(object) + case "MutatingWebhookConfiguration": + return r.mutatingWebHookToBeCollected(object, namespace) + case "ValidatingWebhookConfiguration": + return r.validatingWebHookToBeCollected(object, namespace) } return true, nil @@ -758,7 +786,6 @@ func (r *ResourceCollector) PrepareResourceForApply( optionalResourceTypes []string, vInfo []*stork_api.ApplicationRestoreVolumeInfo, ) (bool, error) { - objectType, err := meta.TypeAccessor(object) if err != nil { return false, err @@ -785,7 +812,6 @@ func (r *ResourceCollector) PrepareResourceForApply( // Update the namespace of the object, will be no-op for clustered resources metadata.SetNamespace(val) } - switch objectType.GetKind() { case "Job": if slice.ContainsString(optionalResourceTypes, "job", strings.ToLower) || @@ -794,13 +820,19 @@ func (r *ResourceCollector) PrepareResourceForApply( } return true, nil case "PersistentVolume": - return r.preparePVResourceForApply(object, pvNameMappings, vInfo) + return r.preparePVResourceForApply(object, pvNameMappings, vInfo, storageClassMappings, namespaceMappings) case "PersistentVolumeClaim": return r.preparePVCResourceForApply(object, allObjects, pvNameMappings, storageClassMappings, vInfo) case "ClusterRoleBinding": return false, r.prepareClusterRoleBindingForApply(object, namespaceMappings) case "RoleBinding": return false, r.prepareRoleBindingForApply(object, namespaceMappings) + case "ValidatingWebhookConfiguration": + return false, r.prepareValidatingWebHookForApply(object, namespaceMappings) + case "MutatingWebhookConfiguration": + return false, r.prepareMutatingWebHookForApply(object, namespaceMappings) + case "Secret": + return false, r.prepareSecretForApply(object) } return false, nil } diff --git a/pkg/resourcecollector/resourcetransformation.go b/pkg/resourcecollector/resourcetransformation.go new file mode 100644 index 0000000000..5b093fec89 --- /dev/null +++ b/pkg/resourcecollector/resourcetransformation.go @@ -0,0 +1,159 @@ +package resourcecollector + +import ( + "fmt" + "strings" + + stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + storkops "github.com/portworx/sched-ops/k8s/stork" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// Since we collect all resources from required migration namespace at once +// getResourcePatch creates map of namespace: {kind: []resourceinfo{}} +// to get transform spec for matching resources +func GetResourcePatch(transformName string, namespaces []string) (map[string]stork_api.KindResourceTransform, error) { + // namespace- Kind:TransformSpec map for faster lookup + patch := make(map[string]stork_api.KindResourceTransform) + if transformName == "" { + logrus.Error("Empty name received for resource transformation") + return patch, nil + } + for _, namespace := range namespaces { + resp, err := storkops.Instance().GetResourceTransformation(transformName, namespace) + if err != nil { + // current namespace does not have any transform CR + // skip it from map + if errors.IsNotFound(err) { + continue + } + logrus.Errorf("Unable to get resource transfomration specs %s/%s, err: %v", namespace, transformName, err) + return nil, err + } + resMap := make(map[string][]stork_api.TransformResourceInfo) + for _, resource := range resp.Status.Resources { + resMap[resource.Kind] = append(resMap[resource.Kind], *resource) + } + patch[namespace] = resMap + } + return patch, nil +} + +// this method transform object as per resource transformation specified in each namespaces +func TransformResources( + object runtime.Unstructured, + resPatch []stork_api.TransformResourceInfo, + objName, objNamespace string, +) error { + for _, patch := range resPatch { + if patch.Name == objName && patch.Namespace == objNamespace { + content := object.UnstructuredContent() + for _, path := range patch.Specs.Paths { + switch path.Operation { + case stork_api.AddResourcePath: + value := getNewValueForPath(path.Value, string(path.Type)) + if path.Type == stork_api.KeyPairResourceType { + updateMap := value.(map[string]string) + err := unstructured.SetNestedStringMap(content, updateMap, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } else if path.Type == stork_api.SliceResourceType { + err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to apply patch path %s on resource kind: %s/,%s/%s, err: %v", path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } else { + err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to perform operation %s on path %s on resource kind: %s/,%s/%s, err: %v", path.Operation, path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } + + case stork_api.DeleteResourcePath: + unstructured.RemoveNestedField(content, strings.Split(path.Path, ".")...) + logrus.Debugf("Removed patch path %s on resource kind: %s/,%s/%s", path, patch.Kind, patch.Namespace, patch.Name) + + case stork_api.ModifyResourcePathValue: + var value interface{} + if path.Type == stork_api.KeyPairResourceType { + currMap, _, err := unstructured.NestedMap(content, strings.Split(path.Path, ".")...) + if err != nil || len(currMap) == 0 { + return fmt.Errorf("unable to find spec path, err: %v", err) + } + mapList := strings.Split(path.Value, ",") + for _, val := range mapList { + keyPair := strings.Split(val, ":") + if len(keyPair) != 2 { + return fmt.Errorf("invalid keypair value format :%s", keyPair) + } + currMap[keyPair[0]] = keyPair[1] + } + value = currMap + } else if path.Type == stork_api.SliceResourceType { + currList, _, err := unstructured.NestedSlice(content, strings.Split(path.Path, ".")...) + if err != nil { + return fmt.Errorf("unable to find spec path, err: %v", err) + } + arrList := strings.Split(path.Value, ",") + for _, val := range arrList { + currList = append(currList, val) + } + value = currList + } else { + value = path.Value + } + err := unstructured.SetNestedField(content, value, strings.Split(path.Path, ".")...) + if err != nil { + logrus.Errorf("Unable to perform operation %s on path %s on resource kind: %s/,%s/%s, err: %v", path.Operation, path, patch.Kind, patch.Namespace, patch.Name, err) + return err + } + } + } + // lets add annotation saying this resource has been transformed by migration/restore + // controller before applying + // set migration annotations + annotations, found, err := unstructured.NestedStringMap(content, "metadata", "annotations") + if err != nil { + return err + } + if !found { + annotations = make(map[string]string) + } + annotations[TransformedResourceName] = "true" + if err := unstructured.SetNestedStringMap(content, annotations, "metadata", "annotations"); err != nil { + return err + } + object.SetUnstructuredContent(content) + logrus.Infof("Updated resource of kind %v with patch , resource: %v", patch.Kind, object) + } + } + return nil +} + +func getNewValueForPath(oldVal, valType string) interface{} { + var updatedValue interface{} + if valType == string(stork_api.KeyPairResourceType) { + newVal := make(map[string]string) + mapList := strings.Split(oldVal, ",") + for _, val := range mapList { + keyPair := strings.Split(val, ":") + newVal[keyPair[0]] = keyPair[1] + } + updatedValue = newVal + } else if valType == string(stork_api.SliceResourceType) { + newVal := []string{} + arrList := strings.Split(oldVal, ",") + newVal = append(newVal, arrList...) + updatedValue = newVal + } else { + updatedValue = oldVal + } + return updatedValue +} diff --git a/pkg/resourcecollector/secret.go b/pkg/resourcecollector/secret.go index a052b5240f..6a0b02dd7c 100644 --- a/pkg/resourcecollector/secret.go +++ b/pkg/resourcecollector/secret.go @@ -8,6 +8,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +const ( + serviceAccountUIDKey = "kubernetes.io/service-account.uid" +) + func (r *ResourceCollector) secretToBeCollected( object runtime.Unstructured, ) (bool, error) { @@ -26,3 +30,30 @@ func (r *ResourceCollector) secretToBeCollected( return true, nil } + +func (r *ResourceCollector) prepareSecretForApply( + object runtime.Unstructured, +) error { + var secret v1.Secret + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &secret); err != nil { + logrus.Errorf("Error converting Secret object %v: %v", object, err) + return err + } + // Reset the " kubernetes.io/service-account.uid" annotation, + // so that it will update the uid of the newly created SA after restoring + if secret.Annotations != nil { + if _, ok := secret.Annotations[serviceAccountUIDKey]; ok { + secret.Annotations[serviceAccountUIDKey] = "" + // Reset the secret token data to empty, so that new service account token will be updated by k8s, during restore. + if secret.Data["token"] != nil { + secret.Data["token"] = nil + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secret) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return err +} diff --git a/pkg/resourcecollector/service.go b/pkg/resourcecollector/service.go index 8631e88e5b..131d324958 100644 --- a/pkg/resourcecollector/service.go +++ b/pkg/resourcecollector/service.go @@ -63,7 +63,8 @@ func (r *ResourceCollector) updateService( return err } - if service.Spec.Type == v1.ServiceTypeNodePort { + if service.Spec.Type == v1.ServiceTypeNodePort || + service.Spec.Type == v1.ServiceTypeLoadBalancer { for i := range service.Spec.Ports { service.Spec.Ports[i].NodePort = 0 } diff --git a/pkg/resourcecollector/virtualmachineinstance.go b/pkg/resourcecollector/virtualmachineinstance.go index a6ea3bcbf8..701797f557 100644 --- a/pkg/resourcecollector/virtualmachineinstance.go +++ b/pkg/resourcecollector/virtualmachineinstance.go @@ -7,3 +7,9 @@ func (r *ResourceCollector) virtualMachineInstanceToBeCollected( ) (bool, error) { return false, nil } + +func (r *ResourceCollector) virtualMachineInstanceMigrationToBeCollected( + object runtime.Unstructured, +) (bool, error) { + return false, nil +} diff --git a/pkg/resourcecollector/webhook.go b/pkg/resourcecollector/webhook.go new file mode 100644 index 0000000000..1799f62b69 --- /dev/null +++ b/pkg/resourcecollector/webhook.go @@ -0,0 +1,194 @@ +package resourcecollector + +import ( + "github.com/libopenstorage/stork/pkg/version" + "github.com/sirupsen/logrus" + admissionv1 "k8s.io/api/admissionregistration/v1" + admissionv1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/runtime" +) + +func (r *ResourceCollector) prepareMutatingWebHookForApply( + object runtime.Unstructured, + namespaceMappings map[string]string, +) error { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return err + } + if ok { + // v1 version + var webhookCfg admissionv1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil +} + +func (r *ResourceCollector) prepareValidatingWebHookForApply( + object runtime.Unstructured, + namespaceMappings map[string]string, +) error { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return err + } + if ok { + // v1 version + var webhookCfg admissionv1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if destNamespace, ok := namespaceMappings[webhook.ClientConfig.Service.Namespace]; ok { + // update the namespace with destination namespace + webhook.ClientConfig.Service.Namespace = destNamespace + } + } + } + o, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhookCfg) + if err != nil { + return err + } + object.SetUnstructuredContent(o) + return nil +} + +func (r *ResourceCollector) validatingWebHookToBeCollected( + object runtime.Unstructured, + namespace string, +) (bool, error) { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return false, err + } + if ok { + // v1 version + var webhookCfg admissionv1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.ValidatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("validatingWebHookToBeCollected: failed in getting validating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil +} + +func (r *ResourceCollector) mutatingWebHookToBeCollected( + object runtime.Unstructured, + namespace string, +) (bool, error) { + ok, err := version.RequiresV1Webhooks() + if err != nil { + return false, err + } + if ok { + // v1 version + var webhookCfg admissionv1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil + } + // v1beta1 version + var webhookCfg admissionv1beta1.MutatingWebhookConfiguration + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(object.UnstructuredContent(), &webhookCfg); err != nil { + logrus.Errorf("mutatingWebHookToBeCollected: failed in getting mutating webhook: err %v", err) + return false, err + } + for _, webhook := range webhookCfg.Webhooks { + if webhook.ClientConfig.Service != nil { + if namespace == webhook.ClientConfig.Service.Namespace { + return true, nil + } + } + } + return false, nil +} diff --git a/pkg/schedule/schedule.go b/pkg/schedule/schedule.go index 03ed3c7704..6581d68c61 100644 --- a/pkg/schedule/schedule.go +++ b/pkg/schedule/schedule.go @@ -8,6 +8,7 @@ import ( stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/libopenstorage/stork/pkg/k8sutils" + "github.com/libopenstorage/stork/pkg/utils" "github.com/libopenstorage/stork/pkg/version" "github.com/portworx/sched-ops/k8s/apiextensions" "github.com/portworx/sched-ops/k8s/core" @@ -229,7 +230,20 @@ func GetOptions(policyName string, namespace string, policyType stork_api.Schedu case stork_api.SchedulePolicyTypeInterval: return schedulePolicy.Policy.Interval.Options, nil case stork_api.SchedulePolicyTypeDaily: - return schedulePolicy.Policy.Daily.Options, nil + options := schedulePolicy.Policy.Daily.Options + if len(options) == 0 { + options = make(map[string]string) + } + scheduledDay, ok := stork_api.Days[schedulePolicy.Policy.Daily.ForceFullSnapshotDay] + if ok { + currentDay := GetCurrentTime().Weekday() + // force full backup on specified day + if currentDay == scheduledDay { + options[utils.PXIncrementalCountAnnotation] = "0" + } + logrus.Infof("Forcing full-snapshot for daily snapshotschedule policy on the day %s", schedulePolicy.Policy.Daily.ForceFullSnapshotDay) + } + return options, nil case stork_api.SchedulePolicyTypeWeekly: return schedulePolicy.Policy.Weekly.Options, nil case stork_api.SchedulePolicyTypeMonthly: diff --git a/pkg/snapshot/controllers/snapshot.go b/pkg/snapshot/controllers/snapshot.go index 3e232a0bdc..8351d3291b 100644 --- a/pkg/snapshot/controllers/snapshot.go +++ b/pkg/snapshot/controllers/snapshot.go @@ -69,22 +69,17 @@ func (s *Snapshotter) Start(stopChannel <-chan struct{}) error { return err } if ok { - err = client.CreateCRDV1(aeclientset) + err = client.CreateCRDV1(aeclientset, validateCRDTimeout, validateCRDInterval) if err != nil { return err } } else { - err = client.CreateCRD(aeclientset) + err = client.CreateCRD(aeclientset, validateCRDTimeout, validateCRDInterval) if err != nil { return err } } - err = client.WaitForSnapshotResource(snapshotClient) - if err != nil { - return err - } - plugins := make(map[string]snapshotvolume.Plugin) plugins[s.Driver.String()] = s.Driver.GetSnapshotPlugin() diff --git a/pkg/snapshot/controllers/snapshotschedule.go b/pkg/snapshot/controllers/snapshotschedule.go index d1bf175c8d..6c5e22db06 100644 --- a/pkg/snapshot/controllers/snapshotschedule.go +++ b/pkg/snapshot/controllers/snapshotschedule.go @@ -42,6 +42,7 @@ const ( storkRuleAnnotationPrefix = "stork.libopenstorage.org" preSnapRuleAnnotationKey = storkRuleAnnotationPrefix + "/pre-snapshot-rule" postSnapRuleAnnotationKey = storkRuleAnnotationPrefix + "/post-snapshot-rule" + StorkSnapshotNameLabel = "stork.libopenstorage.org/snapshotName" ) // NewSnapshotScheduleController creates a new instance of SnapshotScheduleController. @@ -317,6 +318,10 @@ func (s *SnapshotScheduleController) startVolumeSnapshot(snapshotSchedule *stork } snapshot.Metadata.Annotations[SnapshotScheduleNameAnnotation] = snapshotSchedule.Name snapshot.Metadata.Annotations[SnapshotSchedulePolicyTypeAnnotation] = string(policyType) + if snapshot.Metadata.Labels == nil { + snapshot.Metadata.Labels = make(map[string]string) + } + snapshot.Metadata.Labels[StorkSnapshotNameLabel] = snapshotName if snapshotSchedule.Spec.PreExecRule != "" { _, err := storkops.Instance().GetRule(snapshotSchedule.Spec.PreExecRule, snapshotSchedule.Namespace) if err != nil { diff --git a/pkg/storkctl/clusterpair.go b/pkg/storkctl/clusterpair.go index 64043a483c..90a9f27307 100644 --- a/pkg/storkctl/clusterpair.go +++ b/pkg/storkctl/clusterpair.go @@ -3,13 +3,18 @@ package storkctl import ( "bufio" "fmt" - "github.com/libopenstorage/stork/pkg/utils" "io/ioutil" + "net" "os" "reflect" + "strconv" "strings" + "github.com/libopenstorage/stork/pkg/utils" + + clusterclient "github.com/libopenstorage/openstorage/api/client/cluster" storkv1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/portworx/sched-ops/k8s/core" storkops "github.com/portworx/sched-ops/k8s/stork" "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/api/validation" @@ -248,14 +253,22 @@ func newCreateClusterPairCommand(cmdFactory Factory, ioStreams genericclioptions util.CheckErr(err) return } - - srcClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), dIP, dPort, destToken, dFile, projectMappingsStr, false) + printMsg("Using PX-Service Endpoint of DR cluster to create clusterpair...\n", ioStreams.Out) + ip, port, token, err := getClusterPairParams(dFile, dIP) if err != nil { + err := fmt.Errorf("unable to create clusterpair from source to DR cluster. Err: %v", err) util.CheckErr(err) return } + dIP = ip + if dPort == "" { + dPort = port + } + if destToken == "" { + destToken = token + } - destClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), sIP, sPort, srcToken, sFile, projectMappingsStr, true) + srcClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), dIP, dPort, destToken, dFile, projectMappingsStr, false) if err != nil { util.CheckErr(err) return @@ -274,6 +287,28 @@ func newCreateClusterPairCommand(cmdFactory Factory, ioStreams genericclioptions return } printMsg("ClusterPair "+clusterPairName+" created successfully on source cluster", ioStreams.Out) + if sFile == "" { + return + } + printMsg("Using PX-Service endpoints of source cluster to create clusterpair...\n", ioStreams.Out) + ip, port, token, err = getClusterPairParams(sFile, sIP) + if err != nil { + err := fmt.Errorf("unable to create clusterpair from DR to source cluster. Err: %v", err) + util.CheckErr(err) + return + } + sIP = ip + if sPort == "" { + sPort = port + } + if srcToken == "" { + srcToken = token + } + destClusterPair, err := generateClusterPair(clusterPairName, cmdFactory.GetNamespace(), sIP, sPort, srcToken, sFile, projectMappingsStr, true) + if err != nil { + util.CheckErr(err) + return + } // Create cluster-pair on dest cluster conf, err = getConfig(dFile).ClientConfig() if err != nil { @@ -297,8 +332,8 @@ func newCreateClusterPairCommand(cmdFactory Factory, ioStreams genericclioptions createClusterPairCommand.Flags().StringVarP(&dIP, "dest-ip", "", "", "kube-config of destination cluster") createClusterPairCommand.Flags().StringVarP(&dPort, "dest-port", "", "9001", "port of storage node from destination cluster") createClusterPairCommand.Flags().StringVarP(&dFile, "dest-kube-file", "", "", "kube-config of destination cluster") - createClusterPairCommand.Flags().StringVarP(&srcToken, "src-token", "", "", "source cluster token for cluster pairing") - createClusterPairCommand.Flags().StringVarP(&destToken, "dest-token", "", "", "destination cluster token for cluster pairing") + createClusterPairCommand.Flags().StringVarP(&srcToken, "src-token", "", "", "(optional)source cluster token for cluster pairing") + createClusterPairCommand.Flags().StringVarP(&destToken, "dest-token", "", "", "(optional)destination cluster token for cluster pairing") createClusterPairCommand.Flags().StringVarP(&projectMappingsStr, "project-mappings", "", "", "project mappings between source and destination clusters, use comma-separated = pairs (Currently supported only for Rancher)") @@ -436,3 +471,48 @@ func getConfig(configFile string) clientcmd.ClientConfig { configOverrides := &clientcmd.ConfigOverrides{} return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(configLoadingRules, configOverrides) } + +func getClusterPairParams(config, endpoint string) (string, string, string, error) { + var ip, port, token string + client, err := core.NewInstanceFromConfigFile(config) + if err != nil { + return ip, port, token, err + } + + services, err := client.ListServices("", meta.ListOptions{LabelSelector: "name=portworx-api"}) + if err != nil || len(services.Items) == 0 { + err := fmt.Errorf("unable to retrieve portworx-api service from DR cluster. Err: %v", err) + return ip, port, token, err + } + // TODO: in case of setting up aync-dr over cloud, + // users set up different service as load-balancer over px apis + // accept px-service name as env variable + svc := services.Items[0] + ip = endpoint + if ip == "" { + // this works only if px service is converted as load balancer type + // TODO: for 2 cluster where worker nodes are reachable, figure out + // any one worker ip by looking at px/enabled label + ip = svc.Spec.LoadBalancerIP + } + pxToken := os.Getenv("PX_AUTH_TOKEN") + for _, svcPort := range svc.Spec.Ports { + if svcPort.Name == "px-api" { + port = strconv.Itoa(int(svcPort.Port)) + break + } + } + pxEndpoint := net.JoinHostPort(ip, port) + // TODO: support https as well + clnt, err := clusterclient.NewAuthClusterClient("http://"+pxEndpoint, "v1", pxToken, "") + if err != nil { + return ip, port, token, err + } + mgr := clusterclient.ClusterManager(clnt) + resp, err := mgr.GetPairToken(false) + if err != nil { + return ip, port, token, err + } + token = resp.GetToken() + return ip, port, token, nil +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index e30de7a197..396cf40ce1 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -2,6 +2,12 @@ package utils import ( "fmt" + "github.com/aquilax/truncate" + "github.com/libopenstorage/stork/drivers" + "github.com/portworx/sched-ops/k8s/core" + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" "strings" ) @@ -10,6 +16,57 @@ const ( CattlePrefix = "cattle.io" // CattleProjectPrefix is the prefix used in all Rancher project related annotations and labels CattleProjectPrefix = "cattle.io/projectId" + // PXIncrementalCountAnnotation is the annotation used to set cloud backup incremental count + // for volume + PXIncrementalCountAnnotation = "portworx.io/cloudsnap-incremental-count" + // trimCRDGroupNameKey - groups name containing the string from this configmap field will be trimmed + trimCRDGroupNameKey = "TRIM_CRD_GROUP_NAME" + + // PrefixBackup - prefix string that will be used for the kdmp backup job + PrefixBackup = "backup" + // PrefixNFSBackup prefix string that will be used for the nfs backup job + PrefixNFSBackup = "nfs-backup" + // PrefixRestore prefix string that will be used for the kdmp restore job + PrefixRestore = "nfs-restore-resource" + // PrefixNFSRestorePVC prefix string that will be used for pvc creation during nfs vol restore + PrefixNFSRestorePVC = "nfs-restore-pvc" + + // KdmpAnnotationPrefix - KDMP annotation prefix + KdmpAnnotationPrefix = "kdmp.portworx.com/" + // ApplicationBackupCRNameKey - key name to store the applicationbackup CR name with KDMP annotation prefix + ApplicationBackupCRNameKey = KdmpAnnotationPrefix + "applicationbackup-cr-name" + // ApplicationBackupCRUIDKey - key name to store the applicationbackup CR UID with KDMP annotation prefix + ApplicationBackupCRUIDKey = KdmpAnnotationPrefix + "applicationbackup-cr-uid" + // BackupObjectNameKey - annotation key value for backup object name with KDMP annotation prefix + BackupObjectNameKey = KdmpAnnotationPrefix + "backupobject-name" + // BackupObjectUIDKey - annotation key value for backup object UID with KDMP annotation prefix + BackupObjectUIDKey = KdmpAnnotationPrefix + "backupobject-uid" + // ApplicationRestoreCRNameKey - key name to store the applicationrestore CR name with KDMP annotation prefix + ApplicationRestoreCRNameKey = KdmpAnnotationPrefix + "applicationrestore-cr-name" + // ApplicationRestoreCRUIDKey - key name to store the applicationrestore CR UID with KDMP annotation prefix + ApplicationRestoreCRUIDKey = KdmpAnnotationPrefix + "applicationrestore-cr-uid" + // RestoreObjectNameKey - key name to store the restore object name with KDMP annotation prefix + RestoreObjectNameKey = KdmpAnnotationPrefix + "restoreobject-name" + // RestoreObjectUIDKey - key name to store the restore object UID with KDMP annotation prefix + RestoreObjectUIDKey = KdmpAnnotationPrefix + "restoreobject-uid" + + // PxbackupAnnotationPrefix - px-backup annotation prefix + PxbackupAnnotationPrefix = "portworx.io/" + // PxbackupAnnotationCreateByKey - annotation key name to indicate whether the CR was created by px-backup or stork + PxbackupAnnotationCreateByKey = PxbackupAnnotationPrefix + "created-by" + // PxbackupAnnotationCreateByValue - annotation key value for create-by key for px-backup + PxbackupAnnotationCreateByValue = "px-backup" + + // PxbackupObjectUIDKey -annotation key name for backup object UID with px-backup prefix + PxbackupObjectUIDKey = PxbackupAnnotationPrefix + "backup-uid" + // PxbackupObjectNameKey - annotation key name for backup object name with px-backup prefix + PxbackupObjectNameKey = PxbackupAnnotationPrefix + "backup-name" + // SkipResourceAnnotation - annotation value to skip resource during resource collector + SkipResourceAnnotation = "stork.libopenstorage.org/skip-resource" + // StorkAPIVersion API version + StorkAPIVersion = "stork.libopenstorage.org/v1alpha1" + // BackupLocationKind CR kind + BackupLocationKind = "BackupLocation" ) // ParseKeyValueList parses a list of key=values string into a map @@ -27,3 +84,70 @@ func ParseKeyValueList(expressions []string) (map[string]string, error) { return matchLabels, nil } + +// GetTrimmedGroupName - get the trimmed group name +// Usually the groups of names of CRDs that belongs to the common operator have same group name. +// For example: +// keycloakbackups.keycloak.org, keycloakclients.keycloak.org, keycloakrealms.keycloak.org +// keycloaks.keycloak.org, keycloakusers.keycloak.org +// Here the group name is "keycloak.org" +// In some case, the CRDs names are as follow: +// agents.agent.k8s.elastic.co - groupname: agent.k8s.elastic.co +// apmservers.apm.k8s.elastic.co - groupname: apm.k8s.elastic.co +// beats.beat.k8s.elastic.co - group name: beat.k8s.elastic.co +// Here the group name are different even though they belong to a same opeator. +// But they have common three parts, like "k8s.elastic.co" +// So added a logic to combine the CRDs, if they have common last three part, if the group have more than three parts. +func GetTrimmedGroupName(group string) string { + kdmpData, err := core.Instance().GetConfigMap(drivers.KdmpConfigmapName, drivers.KdmpConfigmapNamespace) + if err != nil { + logrus.Warnf("error in reading configMap [%v/%v]", + drivers.KdmpConfigmapName, drivers.KdmpConfigmapNamespace) + return group + } + if len(kdmpData.Data[trimCRDGroupNameKey]) != 0 { + groupNameList := strings.Split(kdmpData.Data[trimCRDGroupNameKey], ",") + for _, groupName := range groupNameList { + if strings.Contains(group, groupName) { + return groupName + } + } + } + return group +} + +// GetStorageClassNameForPVC - Get the storageClass name from the PVC spec +func GetStorageClassNameForPVC(pvc *v1.PersistentVolumeClaim) (string, error) { + var scName string + if pvc.Spec.StorageClassName != nil && len(*pvc.Spec.StorageClassName) > 0 { + scName = *pvc.Spec.StorageClassName + } else { + scName = pvc.Annotations[v1.BetaStorageClassAnnotation] + } + + if len(scName) == 0 { + return "", fmt.Errorf("PVC: %s does not have a storage class", pvc.Name) + } + return scName, nil +} + +// GetValidLabel - will validate the label to make sure the length is less 63 and contains valid label format. +// If the length is greater then 63, it will truncate to 63 character. +func GetValidLabel(labelVal string) string { + if len(labelVal) > validation.LabelValueMaxLength { + labelVal = truncate.Truncate(labelVal, validation.LabelValueMaxLength, "", truncate.PositionEnd) + // make sure the truncated value does not end with the hyphen. + labelVal = strings.Trim(labelVal, "-") + // make sure the truncated value does not end with the dot. + labelVal = strings.Trim(labelVal, ".") + } + return labelVal +} + +// GetShortUID returns the first part of the UID +func GetShortUID(uid string) string { + if len(uid) < 8 { + return "" + } + return uid[0:7] +} diff --git a/pkg/version/version.go b/pkg/version/version.go index 9a9e17a343..bb28106aa6 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -24,6 +24,7 @@ var ( const ( k8sMinVersionCSIDriverV1 = "1.22" k8sMinVersionVolumeSnapshotV1 = "1.20" + K8sMinVersionWebhookv1 = "1.22" ) // RequiresV1Registration returns true if crd needs to be registered as apiVersion V1 @@ -43,6 +44,23 @@ func RequiresV1Registration() (bool, error) { return false, nil } +// RequiresV1Webhooks returns true if V1 version of webhook object is needed +func RequiresV1Webhooks() (bool, error) { + clusterK8sVersion, _, err := GetFullVersion() + if err != nil { + return false, err + } + requiredK8sVer, err := version.NewVersion(K8sMinVersionWebhookv1) + if err != nil { + return false, err + + } + if clusterK8sVersion.GreaterThanOrEqual(requiredK8sVer) { + return true, nil + } + return false, nil +} + // RequiresV1CSIdriver returns true if V1 version of CSIdriver APIs need to be called func RequiresV1CSIdriver() (bool, error) { clusterK8sVersion, _, err := GetFullVersion() diff --git a/test/integration_test/Dockerfile b/test/integration_test/Dockerfile index 6d21b84f56..21ec3a9936 100644 --- a/test/integration_test/Dockerfile +++ b/test/integration_test/Dockerfile @@ -1,8 +1,8 @@ -FROM golang:1.15.1 +FROM golang:1.16.1 # Install dependancies RUN apt-get update && \ - /usr/local/go/bin/go get -u gotest.tools/gotestsum + /usr/local/go/bin/go install gotest.tools/gotestsum@latest RUN apt-get update && apt-get install -y python3-pip diff --git a/test/integration_test/migration_test.go b/test/integration_test/migration_test.go index bde0b911be..de47a4249b 100644 --- a/test/integration_test/migration_test.go +++ b/test/integration_test/migration_test.go @@ -160,7 +160,6 @@ func triggerMigration( return ctxs, preMigrationCtx } -// // validateMigrationSummary validats the migration summary // currently we don't have an automated way to find out how many resources got deployed // through torpedo specs. For ex. a statefulset can have an inline PVC and that should diff --git a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go index bf945fc60c..b934abea57 100644 --- a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go +++ b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/client/client.go @@ -18,6 +18,7 @@ package client import ( "context" + "fmt" "reflect" "time" @@ -68,7 +69,7 @@ func NewClient(cfg *rest.Config) (*rest.RESTClient, *runtime.Scheme, error) { } // CreateCRD creates CustomResourceDefinition -func CreateCRD(clientset apiextensionsclient.Interface) error { +func CreateCRD(clientset apiextensionsclient.Interface, retryInterval, timeout time.Duration) error { crd := &apiextensionsv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: crdv1.VolumeSnapshotDataResourcePlural + "." + crdv1.GroupName, @@ -78,8 +79,8 @@ func CreateCRD(clientset apiextensionsclient.Interface) error { Version: crdv1.SchemeGroupVersion.Version, Scope: apiextensionsv1beta1.ClusterScoped, Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotDataResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), + Plural: crdv1.VolumeSnapshotDataResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), ShortNames: storkVolumeSnapshotDataShortNames, }, }, @@ -100,8 +101,8 @@ func CreateCRD(clientset apiextensionsclient.Interface) error { Version: crdv1.SchemeGroupVersion.Version, Scope: apiextensionsv1beta1.NamespaceScoped, Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), + Plural: crdv1.VolumeSnapshotResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), ShortNames: storkVolumeSnapshotShortNames, }, }, @@ -111,11 +112,31 @@ func CreateCRD(clientset apiextensionsclient.Interface) error { glog.Fatalf("failed to create VolumeSnapshotResource: %#v, err: %#v", res, err) } - return nil + return wait.PollImmediate(retryInterval, timeout, func() (bool, error) { + crd, err := clientset.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } else if err != nil { + return false, err + } + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextensionsv1beta1.Established: + if cond.Status == apiextensionsv1beta1.ConditionTrue { + return true, nil + } + case apiextensionsv1beta1.NamesAccepted: + if cond.Status == apiextensionsv1beta1.ConditionFalse { + return false, fmt.Errorf("name conflict: %v", cond.Reason) + } + } + } + return false, nil + }) } // CreateCRDV1 creates CustomResourceDefinition for v1 apiVersion -func CreateCRDV1(clientset apiextensionsclient.Interface) error { +func CreateCRDV1(clientset apiextensionsclient.Interface, retryInterval, timeout time.Duration) error { setSchema := true annot := make(map[string]string) annot["api-approved.kubernetes.io"] = "https://github.com/kubernetes-csi/external-snapshotter/pull/419" @@ -139,8 +160,8 @@ func CreateCRDV1(clientset apiextensionsclient.Interface) error { }, Scope: apiextensionsv1.ClusterScoped, Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotDataResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), + Plural: crdv1.VolumeSnapshotDataResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshotData{}).Name(), ShortNames: storkVolumeSnapshotDataShortNames, }, }, @@ -172,8 +193,8 @@ func CreateCRDV1(clientset apiextensionsclient.Interface) error { }, Scope: apiextensionsv1.NamespaceScoped, Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: crdv1.VolumeSnapshotResourcePlural, - Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), + Plural: crdv1.VolumeSnapshotResourcePlural, + Kind: reflect.TypeOf(crdv1.VolumeSnapshot{}).Name(), ShortNames: storkVolumeSnapshotShortNames, }, }, @@ -183,7 +204,27 @@ func CreateCRDV1(clientset apiextensionsclient.Interface) error { glog.Fatalf("failed to create VolumeSnapshotResource: %#v, err: %#v", res, err) } - return nil + return wait.PollImmediate(retryInterval, timeout, func() (bool, error) { + crd, err := clientset.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } else if err != nil { + return false, err + } + for _, cond := range crd.Status.Conditions { + switch cond.Type { + case apiextensionsv1.Established: + if cond.Status == apiextensionsv1.ConditionTrue { + return true, nil + } + case apiextensionsv1.NamesAccepted: + if cond.Status == apiextensionsv1.ConditionFalse { + return false, fmt.Errorf("name conflict: %v", cond.Reason) + } + } + } + return false, nil + }) } // WaitForSnapshotResource waits for the snapshot resource diff --git a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go index eced203898..75da61ca04 100644 --- a/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go +++ b/vendor/github.com/kubernetes-incubator/external-storage/snapshot/pkg/controller/snapshotter/snapshotter.go @@ -42,6 +42,7 @@ const ( snapshotMetadataPVName = "SnapshotMetadata-PVName" snapshotDataNamePrefix = "k8s-volume-snapshot" pvNameLabel = "pvName" + StorkSnapshotNameLabel = "stork.libopenstorage.org/snapshotName" defaultExponentialBackOffOnError = true // volumeSnapshot* is configuration of exponential backoff for @@ -376,6 +377,14 @@ func (vs *volumeSnapshotter) updateSnapshotIfExists(uniqueSnapshotName string, s glog.Infof("No tag can be found in snapshot metadata %s", uniqueSnapshotName) return statusNew, snapshot, nil } + + // Find snapshot through cloud provider by existing tags, and create VolumeSnapshotData if such snapshot is found + snapshotDataSource, conditions, err = vs.findSnapshotByTags(snapshotName, snapshot) + if err != nil { + glog.Infof("unable to find snapshot by looking at tags %s, err: %v", uniqueSnapshotName, err) + return statusNew, snapshot, nil + } + // Check whether snapshotData object is already created or not. If yes, snapshot is already // triggered through cloud provider, bind it and return pending state if snapshotDataObj = vs.getSnapshotDataFromSnapshotName(uniqueSnapshotName); snapshotDataObj != nil { @@ -386,11 +395,6 @@ func (vs *volumeSnapshotter) updateSnapshotIfExists(uniqueSnapshotName string, s } return statusPending, snapshotObj, nil } - // Find snapshot through cloud provider by existing tags, and create VolumeSnapshotData if such snapshot is found - snapshotDataSource, conditions, err = vs.findSnapshotByTags(snapshotName, snapshot) - if err != nil { - return statusNew, snapshot, nil - } // Snapshot is found. Create VolumeSnapshotData, bind VolumeSnapshotData to VolumeSnapshot, and update VolumeSnapshot status glog.Infof("updateSnapshotIfExists: create VolumeSnapshotData object for VolumeSnapshot %s.", uniqueSnapshotName) pvName, ok := snapshot.Metadata.Labels[pvNameLabel] diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go index 078420de43..d6cb8d39d9 100644 --- a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/register.go @@ -38,6 +38,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeBackupDeleteList{}, &BackupLocationMaintenance{}, &BackupLocationMaintenanceList{}, + &ResourceExport{}, + &ResourceExportList{}, + &ResourceBackup{}, + &ResourceBackupList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go new file mode 100644 index 0000000000..e1aa7fd764 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourcebackup.go @@ -0,0 +1,131 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ResourceBackupResourceName is name for the ResourceBackup resource. + ResourceBackupResourceName = "resourcebackup" + // ResourceBackupResourcePlural is the name for list of ResourceBackup resources. + ResourceBackupResourcePlural = "resourcebackups" +) + +// ResourceBackupType defines a method of achieving Resource transfer. +type ResourceBackupType string + +// ResourceBackupStatus defines a status of ResourceBackup. +type ResourceBackupStatus string + +const ( + // ResourceBackupStatusInitial is the initial status of ResourceBackup. It indicates + // that a volume Backup request has been received. + ResourceBackupStatusInitial ResourceBackupStatus = "Initial" + // ResourceBackupStatusPending when Resource Backup is pending and not started yet. + ResourceBackupStatusPending ResourceBackupStatus = "Pending" + // ResourceBackupStatusInProgress when Resource is being transferred. + ResourceBackupStatusInProgress ResourceBackupStatus = "InProgress" + // ResourceBackupStatusFailed when Resource transfer is failed. + ResourceBackupStatusFailed ResourceBackupStatus = "Failed" + // ResourceBackupStatusSuccessful when Resource has been transferred. + ResourceBackupStatusSuccessful ResourceBackupStatus = "Successful" + // ResourceBackupStatusPartialSuccess when Resource was partially successful + ResourceBackupStatusPartialSuccess ResourceBackupStatus = "PartialSuccess" +) + +// ResourceBackupProgressStatus overall resource backup/restore progress +type ResourceBackupProgressStatus struct { + // ProgressPercentage is the progress of the command in percentage + ProgressPercentage float64 + // Status status of resource export + Status ResourceBackupStatus `json:"status,omitempty"` + // Reason status reason + Reason string `json:"reason,omitempty"` + // Resources status of each resource being restore + Resources []*ResourceRestoreResourceInfo `json:"resources"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceBackup defines a spec for holding restore of resource status updated by NFS executor job +type ResourceBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceBackupSpec `json:"spec"` + // Type - Backup or Restore + Type ResourceBackupType `json:"type,omitempty"` + // Status Overall status + Status ResourceBackupProgressStatus `json:"status,omitempty"` + // VolumesInfo Contains list of vols to be restored. Filled in by nfs executor job + VolumesInfo []*ResourceBackupVolumeInfo `json:"volumesInfo,omitempty"` + // ExistingVolumesInfo existing vols which are not be restored + ExistingVolumesInfo []*ResourceRestoreVolumeInfo `json:"existingVolumesInfo,omitempty"` +} + +// ResourceBackupSpec configuration parameters for ResourceBackup +type ResourceBackupSpec struct { + // ObjRef here is backuplocation CR + ObjRef ResourceBackupObjectReference `json:"source,omitempty"` + // PVC obj ref - During restore of vols store the ref of pvc + PVCObjRef ResourceBackupObjectReference `json:"pvcobj,omitempty"` +} + +// ResourceBackupObjectReference contains enough information to let you inspect the referred object. +type ResourceBackupObjectReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion,omitempty"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + Namespace string `json:"namespace,omitempty"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceBackupList is a list of ResourceBackup resources. +type ResourceBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metaResource,omitempty"` + + Items []ResourceBackup `json:"items"` +} + +// ResourceBackupVolumeInfo is the info for the backup of a volume +type ResourceBackupVolumeInfo struct { + PersistentVolumeClaim string `json:"persistentVolumeClaim"` + PersistentVolumeClaimUID string `json:"persistentVolumeClaimUID"` + Namespace string `json:"namespace"` + Volume string `json:"volume"` + BackupID string `json:"backupID"` + DriverName string `json:"driverName"` + Zones []string `json:"zones"` + Status ResourceBackupStatus `json:"status"` + Reason string `json:"reason"` + Options map[string]string `json:"options"` + TotalSize uint64 `json:"totalSize"` + ActualSize uint64 `json:"actualSize"` + StorageClass string `json:"storageClass"` + Provisioner string `json:"provisioner"` + VolumeSnapshot string `json:"volumeSnapshot"` +} + +// ResourceRestoreVolumeInfo is the info for the restore of a volume +type ResourceRestoreVolumeInfo struct { + PersistentVolumeClaim string `json:"persistentVolumeClaim"` + PersistentVolumeClaimUID string `json:"persistentVolumeClaimUID"` + SourceNamespace string `json:"sourceNamespace"` + SourceVolume string `json:"sourceVolume"` + RestoreVolume string `json:"restoreVolume"` + DriverName string `json:"driverName"` + Zones []string `json:"zones"` + Status ResourceBackupStatus `json:"status"` + Reason string `json:"reason"` + TotalSize uint64 `json:"totalSize"` + Options map[string]string `json:"options"` +} diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourceexport.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourceexport.go new file mode 100644 index 0000000000..3d9ac555f2 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/resourceexport.go @@ -0,0 +1,152 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // ResourceExportResourceName is name for the ResourceExport resource. + ResourceExportResourceName = "resourceexport" + // ResourceExportResourcePlural is the name for list of ResourceExport resources. + ResourceExportResourcePlural = "resourceexports" + // ResourceExportNFS resource export provided by nfs path + ResourceExportNFS ResourceExportType = "nfs" +) + +// ResourceExportType defines a method of achieving Resource transfer. +type ResourceExportType string + +// ResourceExportStatus defines a status of ResourceExport. +type ResourceExportStatus string + +// ResourceExportStage is the stage of the ResourceExport +type ResourceExportStage string + +// ResourceRestoreStatus defines the status of Resource after applying the spec during restore. +type ResourceRestoreStatus string + +// ObjectInfo contains info about an object being backed up or restored +type ObjectInfo struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + metav1.GroupVersionKind `json:",inline"` +} + +// ResourceRestoreResourceInfo is the info for the restore of a resource +type ResourceRestoreResourceInfo struct { + ObjectInfo `json:",inline"` + Status ResourceRestoreStatus `json:"status"` + Reason string `json:"reason"` +} + +const ( + // ResourceRestoreStatusFailed Restore Failed + ResourceRestoreStatusFailed ResourceRestoreStatus = "Failed" + // ResourceRestoreStatusRetained Restore Retained + ResourceRestoreStatusRetained ResourceRestoreStatus = "Retained" + // ResourceRestoreStatusSuccessful Restore Successful + ResourceRestoreStatusSuccessful ResourceRestoreStatus = "Successful" + // ResourceRestoreStatusInProgress Restore InProgress + ResourceRestoreStatusInProgress ResourceRestoreStatus = "InProgress" +) + +const ( + // ResourceExportStatusInitial is the initial status of ResourceExport. It indicates + // that a volume export request has been received. + ResourceExportStatusInitial ResourceExportStatus = "Initial" + // ResourceExportStatusPending when Resource export is pending and not started yet. + ResourceExportStatusPending ResourceExportStatus = "Pending" + // ResourceExportStatusInProgress when Resource is being transferred. + ResourceExportStatusInProgress ResourceExportStatus = "InProgress" + // ResourceExportStatusFailed when Resource transfer is failed. + ResourceExportStatusFailed ResourceExportStatus = "Failed" + // ResourceExportStatusSuccessful when Resource has been transferred. + ResourceExportStatusSuccessful ResourceExportStatus = "Successful" +) + +const ( + // ResourceExportBackup backup op for resource upload + ResourceExportBackup ResourceExportType = "nfs" +) + +const ( + // ResourceExportStageInitial is the initial stage for ResourceExport + ResourceExportStageInitial ResourceExportStage = "Initial" + // ResourceExportStageInProgress is the InProgress stage for ResourceExport + ResourceExportStageInProgress ResourceExportStage = "InProgress" + // ResourceExportStageFailed is the Failed stage for ResourceExport + ResourceExportStageFailed ResourceExportStage = "Failed" + // ResourceExportStageSuccessful is the Successful stage for ResourceExport + ResourceExportStageSuccessful ResourceExportStage = "Successful" + // ResourceExportStageFinal is the Final stage for ResourceExport + ResourceExportStageFinal ResourceExportStage = "Final" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceExport defines a spec for restoring resources to NFS target +type ResourceExport struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec ResourceExportSpec `json:"spec"` + // Status Overall status + Status ResourceStatus `json:"status,omitempty"` + // VolumesInfo Contains list of vols to be restored. Filled in by nfs executor job + VolumesInfo []*ResourceBackupVolumeInfo `json:"volumesInfo"` + // ExistingVolumesInfo existing vols which are not be restored + ExistingVolumesInfo []*ResourceRestoreVolumeInfo `json:"existingVolumesInfo,omitempty"` +} + +// ResourceExportSpec configuration parameters for ResourceExport +type ResourceExportSpec struct { + // Type - Backup or Restore + Type ResourceExportType `json:"type,omitempty"` + // TriggeredFrom is to know which module is created the resourceExport CR. + // The intention is to know from where to get the nfs executor image + TriggeredFrom string `json:"triggerFrom,omitempty"` + TriggeredFromNs string `json:"triggerFromNs,omitempty"` + // Source here is applicationBackup CR for backup + Source ResourceExportObjectReference `json:"source,omitempty"` + // Destination is the ref to BL CR + Destination ResourceExportObjectReference `json:"destination,omitempty"` +} + +// ResourceStatus overall resource backup/restore progress +type ResourceStatus struct { + // Status status of resource export + Status ResourceExportStatus `json:"status,omitempty"` + // Reason status reason + Reason string `json:"reason,omitempty"` + // TransferID job transfer ID + TransferID string `json:"transferID,omitempty"` + // Stage resource export stage + Stage ResourceExportStage `json:"stage,omitempty"` + // Resources status of each resource being restore + Resources []*ResourceRestoreResourceInfo `json:"resources"` +} + +// ResourceExportObjectReference contains enough information to let you inspect the referred object. +type ResourceExportObjectReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion,omitempty"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + Kind string `json:"kind,omitempty"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + Namespace string `json:"namespace,omitempty"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceExportList is a list of ResourceExport resources. +type ResourceExportList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metaResource,omitempty"` + + Items []ResourceExport `json:"items"` +} diff --git a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go index 357b1784f2..23d1027b30 100644 --- a/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* @@ -11,6 +12,7 @@ LICENSE package v1alpha1 import ( + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -127,7 +129,7 @@ func (in *DataExport) DeepCopyInto(out *DataExport) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -219,6 +221,11 @@ func (in *DataExportSpec) DeepCopy() *DataExportSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExportStatus) DeepCopyInto(out *ExportStatus) { *out = *in + if in.RestorePVC != nil { + in, out := &in.RestorePVC, &out.RestorePVC + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } return } @@ -232,6 +239,23 @@ func (in *ExportStatus) DeepCopy() *ExportStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectInfo) DeepCopyInto(out *ObjectInfo) { + *out = *in + out.GroupVersionKind = in.GroupVersionKind + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectInfo. +func (in *ObjectInfo) DeepCopy() *ObjectInfo { + if in == nil { + return nil + } + out := new(ObjectInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepoMaintenanceStatus) DeepCopyInto(out *RepoMaintenanceStatus) { *out = *in @@ -249,6 +273,367 @@ func (in *RepoMaintenanceStatus) DeepCopy() *RepoMaintenanceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackup) DeepCopyInto(out *ResourceBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + if in.VolumesInfo != nil { + in, out := &in.VolumesInfo, &out.VolumesInfo + *out = make([]*ResourceBackupVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceBackupVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + if in.ExistingVolumesInfo != nil { + in, out := &in.ExistingVolumesInfo, &out.ExistingVolumesInfo + *out = make([]*ResourceRestoreVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackup. +func (in *ResourceBackup) DeepCopy() *ResourceBackup { + if in == nil { + return nil + } + out := new(ResourceBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupList) DeepCopyInto(out *ResourceBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupList. +func (in *ResourceBackupList) DeepCopy() *ResourceBackupList { + if in == nil { + return nil + } + out := new(ResourceBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupObjectReference) DeepCopyInto(out *ResourceBackupObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupObjectReference. +func (in *ResourceBackupObjectReference) DeepCopy() *ResourceBackupObjectReference { + if in == nil { + return nil + } + out := new(ResourceBackupObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupProgressStatus) DeepCopyInto(out *ResourceBackupProgressStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*ResourceRestoreResourceInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreResourceInfo) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupProgressStatus. +func (in *ResourceBackupProgressStatus) DeepCopy() *ResourceBackupProgressStatus { + if in == nil { + return nil + } + out := new(ResourceBackupProgressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupSpec) DeepCopyInto(out *ResourceBackupSpec) { + *out = *in + out.ObjRef = in.ObjRef + out.PVCObjRef = in.PVCObjRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupSpec. +func (in *ResourceBackupSpec) DeepCopy() *ResourceBackupSpec { + if in == nil { + return nil + } + out := new(ResourceBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceBackupVolumeInfo) DeepCopyInto(out *ResourceBackupVolumeInfo) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceBackupVolumeInfo. +func (in *ResourceBackupVolumeInfo) DeepCopy() *ResourceBackupVolumeInfo { + if in == nil { + return nil + } + out := new(ResourceBackupVolumeInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExport) DeepCopyInto(out *ResourceExport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + if in.VolumesInfo != nil { + in, out := &in.VolumesInfo, &out.VolumesInfo + *out = make([]*ResourceBackupVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceBackupVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + if in.ExistingVolumesInfo != nil { + in, out := &in.ExistingVolumesInfo, &out.ExistingVolumesInfo + *out = make([]*ResourceRestoreVolumeInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreVolumeInfo) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExport. +func (in *ResourceExport) DeepCopy() *ResourceExport { + if in == nil { + return nil + } + out := new(ResourceExport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceExport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExportList) DeepCopyInto(out *ResourceExportList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceExport, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportList. +func (in *ResourceExportList) DeepCopy() *ResourceExportList { + if in == nil { + return nil + } + out := new(ResourceExportList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceExportList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExportObjectReference) DeepCopyInto(out *ResourceExportObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportObjectReference. +func (in *ResourceExportObjectReference) DeepCopy() *ResourceExportObjectReference { + if in == nil { + return nil + } + out := new(ResourceExportObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceExportSpec) DeepCopyInto(out *ResourceExportSpec) { + *out = *in + out.Source = in.Source + out.Destination = in.Destination + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceExportSpec. +func (in *ResourceExportSpec) DeepCopy() *ResourceExportSpec { + if in == nil { + return nil + } + out := new(ResourceExportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRestoreResourceInfo) DeepCopyInto(out *ResourceRestoreResourceInfo) { + *out = *in + out.ObjectInfo = in.ObjectInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRestoreResourceInfo. +func (in *ResourceRestoreResourceInfo) DeepCopy() *ResourceRestoreResourceInfo { + if in == nil { + return nil + } + out := new(ResourceRestoreResourceInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRestoreVolumeInfo) DeepCopyInto(out *ResourceRestoreVolumeInfo) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRestoreVolumeInfo. +func (in *ResourceRestoreVolumeInfo) DeepCopy() *ResourceRestoreVolumeInfo { + if in == nil { + return nil + } + out := new(ResourceRestoreVolumeInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]*ResourceRestoreResourceInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(ResourceRestoreResourceInfo) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeBackup) DeepCopyInto(out *VolumeBackup) { *out = *in diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go index c3dfe990e3..653ba63330 100644 --- a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/generated_expansion.go @@ -12,6 +12,10 @@ type BackupLocationMaintenanceExpansion interface{} type DataExportExpansion interface{} +type ResourceBackupExpansion interface{} + +type ResourceExportExpansion interface{} + type VolumeBackupExpansion interface{} type VolumeBackupDeleteExpansion interface{} diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go index a1421c000d..83c5912c2b 100644 --- a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/kdmp_client.go @@ -18,6 +18,8 @@ type KdmpV1alpha1Interface interface { RESTClient() rest.Interface BackupLocationMaintenancesGetter DataExportsGetter + ResourceBackupsGetter + ResourceExportsGetter VolumeBackupsGetter VolumeBackupDeletesGetter } @@ -35,6 +37,14 @@ func (c *KdmpV1alpha1Client) DataExports(namespace string) DataExportInterface { return newDataExports(c, namespace) } +func (c *KdmpV1alpha1Client) ResourceBackups(namespace string) ResourceBackupInterface { + return newResourceBackups(c, namespace) +} + +func (c *KdmpV1alpha1Client) ResourceExports(namespace string) ResourceExportInterface { + return newResourceExports(c, namespace) +} + func (c *KdmpV1alpha1Client) VolumeBackups(namespace string) VolumeBackupInterface { return newVolumeBackups(c, namespace) } diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourcebackup.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourcebackup.go new file mode 100644 index 0000000000..5d26d37417 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourcebackup.go @@ -0,0 +1,185 @@ +/* + +LICENSE + +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + scheme "github.com/portworx/kdmp/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResourceBackupsGetter has a method to return a ResourceBackupInterface. +// A group's client should implement this interface. +type ResourceBackupsGetter interface { + ResourceBackups(namespace string) ResourceBackupInterface +} + +// ResourceBackupInterface has methods to work with ResourceBackup resources. +type ResourceBackupInterface interface { + Create(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.CreateOptions) (*v1alpha1.ResourceBackup, error) + Update(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (*v1alpha1.ResourceBackup, error) + UpdateStatus(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (*v1alpha1.ResourceBackup, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceBackup, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceBackupList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceBackup, err error) + ResourceBackupExpansion +} + +// resourceBackups implements ResourceBackupInterface +type resourceBackups struct { + client rest.Interface + ns string +} + +// newResourceBackups returns a ResourceBackups +func newResourceBackups(c *KdmpV1alpha1Client, namespace string) *resourceBackups { + return &resourceBackups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceBackup, and returns the corresponding resourceBackup object, and an error if there is any. +func (c *resourceBackups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceBackups that match those selectors. +func (c *resourceBackups) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceBackupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ResourceBackupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceBackups. +func (c *resourceBackups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceBackup and creates it. Returns the server's representation of the resourceBackup, and an error, if there is any. +func (c *resourceBackups) Create(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.CreateOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceBackup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceBackup and updates it. Returns the server's representation of the resourceBackup, and an error, if there is any. +func (c *resourceBackups) Update(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(resourceBackup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceBackup). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceBackups) UpdateStatus(ctx context.Context, resourceBackup *v1alpha1.ResourceBackup, opts v1.UpdateOptions) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(resourceBackup.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceBackup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceBackup and deletes it. Returns an error if one occurs. +func (c *resourceBackups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcebackups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceBackups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcebackups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceBackup. +func (c *resourceBackups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceBackup, err error) { + result = &v1alpha1.ResourceBackup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcebackups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourceexport.go b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourceexport.go new file mode 100644 index 0000000000..7d51a89566 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1/resourceexport.go @@ -0,0 +1,185 @@ +/* + +LICENSE + +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + scheme "github.com/portworx/kdmp/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResourceExportsGetter has a method to return a ResourceExportInterface. +// A group's client should implement this interface. +type ResourceExportsGetter interface { + ResourceExports(namespace string) ResourceExportInterface +} + +// ResourceExportInterface has methods to work with ResourceExport resources. +type ResourceExportInterface interface { + Create(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.CreateOptions) (*v1alpha1.ResourceExport, error) + Update(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (*v1alpha1.ResourceExport, error) + UpdateStatus(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (*v1alpha1.ResourceExport, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ResourceExport, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ResourceExportList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceExport, err error) + ResourceExportExpansion +} + +// resourceExports implements ResourceExportInterface +type resourceExports struct { + client rest.Interface + ns string +} + +// newResourceExports returns a ResourceExports +func newResourceExports(c *KdmpV1alpha1Client, namespace string) *resourceExports { + return &resourceExports{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceExport, and returns the corresponding resourceExport object, and an error if there is any. +func (c *resourceExports) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceexports"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceExports that match those selectors. +func (c *resourceExports) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ResourceExportList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ResourceExportList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceExports. +func (c *resourceExports) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resourceExport and creates it. Returns the server's representation of the resourceExport, and an error, if there is any. +func (c *resourceExports) Create(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.CreateOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceExport). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resourceExport and updates it. Returns the server's representation of the resourceExport, and an error, if there is any. +func (c *resourceExports) Update(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourceexports"). + Name(resourceExport.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceExport). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resourceExports) UpdateStatus(ctx context.Context, resourceExport *v1alpha1.ResourceExport, opts v1.UpdateOptions) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourceexports"). + Name(resourceExport.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resourceExport). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resourceExport and deletes it. Returns an error if one occurs. +func (c *resourceExports) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceexports"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceExports) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourceexports"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resourceExport. +func (c *resourceExports) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ResourceExport, err error) { + result = &v1alpha1.ResourceExport{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourceexports"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/common.go b/vendor/github.com/portworx/kdmp/pkg/controllers/common.go new file mode 100644 index 0000000000..a3fc3d4a7b --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/common.go @@ -0,0 +1,50 @@ +package controllers + +import ( + "os" + "time" + + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/portworx/sched-ops/k8s/stork" + "k8s.io/apimachinery/pkg/util/yaml" +) + +var ( + // ResyncPeriod controller resync period + ResyncPeriod = 10 * time.Second + // RequeuePeriod controller requeue period + RequeuePeriod = 5 * time.Second + // ValidateCRDInterval CRD validation interval + ValidateCRDInterval time.Duration = 10 * time.Second + // ValidateCRDTimeout CRD validation timeout + ValidateCRDTimeout time.Duration = 2 * time.Minute + // CleanupFinalizer cleanup finalizer + CleanupFinalizer = "kdmp.portworx.com/finalizer-cleanup" + // TaskDefaultTimeout timeout for retry task + TaskDefaultTimeout = 1 * time.Minute + // TaskProgressCheckInterval to check task progress at specified interval + TaskProgressCheckInterval = 5 * time.Second +) + +// ReadBackupLocation fetching backuplocation CR +func ReadBackupLocation(name, namespace, filePath string) (*storkapi.BackupLocation, error) { + if name != "" { + if namespace == "" { + namespace = "default" + } + return stork.Instance().GetBackupLocation(name, namespace) + } + + // TODO: This is needed for restic, we can think of removing it later + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + + out := &storkapi.BackupLocation{} + if err = yaml.NewYAMLOrJSONDecoder(f, 1024).Decode(out); err != nil { + return nil, err + } + + return out, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go index e60315078c..1ad93b2e66 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/dataexport.go @@ -3,11 +3,11 @@ package dataexport import ( "context" "reflect" - "time" "github.com/libopenstorage/stork/pkg/controllers" "github.com/libopenstorage/stork/pkg/snapshotter" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" "github.com/portworx/kdmp/pkg/utils" "github.com/portworx/kdmp/pkg/version" "github.com/portworx/sched-ops/k8s/apiextensions" @@ -23,15 +23,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -var ( - resyncPeriod = 10 * time.Second - requeuePeriod = 5 * time.Second - validateCRDInterval time.Duration = 10 * time.Second - validateCRDTimeout time.Duration = 2 * time.Minute - - cleanupFinalizer = "kdmp.portworx.com/finalizer-cleanup" -) - // Controller is a k8s controller that handles DataExport resources. type Controller struct { client runtimeclient.Client @@ -71,7 +62,6 @@ func (c *Controller) Init(mgr manager.Manager) error { // // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -// func (c *Controller) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logrus.Tracef("Reconciling DataExport %s/%s", request.Namespace, request.Name) @@ -84,24 +74,24 @@ func (c *Controller) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, nil } // Error reading the object - requeue the request. - return reconcile.Result{RequeueAfter: requeuePeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil } - if !controllers.ContainsFinalizer(dataExport, cleanupFinalizer) { - controllers.SetFinalizer(dataExport, cleanupFinalizer) + if !controllers.ContainsFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) { + controllers.SetFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), dataExport) } requeue, err := c.sync(context.TODO(), dataExport) if err != nil { logrus.Errorf("kdmp controller: %s/%s: %s", request.Namespace, request.Name, err) - return reconcile.Result{RequeueAfter: requeuePeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil } if requeue { - return reconcile.Result{RequeueAfter: requeuePeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil } - return reconcile.Result{RequeueAfter: resyncPeriod}, nil + return reconcile.Result{RequeueAfter: kdmpcontroller.ResyncPeriod}, nil } func (c *Controller) createCRD() error { @@ -124,7 +114,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRD(vb.Plural+"."+vb.Group, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRD(vb.Plural+"."+vb.Group, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } else { @@ -132,7 +122,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRDV1beta1(vb, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRDV1beta1(vb, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } @@ -151,7 +141,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRD(resource.Plural+"."+vb.Group, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRD(resource.Plural+"."+vb.Group, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } else { @@ -159,7 +149,7 @@ func (c *Controller) createCRD() error { if err != nil && !errors.IsAlreadyExists(err) { return err } - if err := apiextensions.Instance().ValidateCRDV1beta1(resource, validateCRDTimeout, validateCRDInterval); err != nil { + if err := apiextensions.Instance().ValidateCRDV1beta1(resource, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { return err } } diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go index f588c013d4..87658ab457 100644 --- a/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/dataexport/reconcile.go @@ -3,7 +3,6 @@ package dataexport import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -18,10 +17,12 @@ import ( "github.com/libopenstorage/stork/pkg/controllers" "github.com/libopenstorage/stork/pkg/snapshotter" kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" "github.com/portworx/kdmp/pkg/drivers" "github.com/portworx/kdmp/pkg/drivers/driversinstance" "github.com/portworx/kdmp/pkg/drivers/utils" kdmpopts "github.com/portworx/kdmp/pkg/util/ops" + "github.com/portworx/kdmp/pkg/version" "github.com/portworx/sched-ops/k8s/batch" "github.com/portworx/sched-ops/k8s/core" @@ -35,7 +36,6 @@ import ( k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/rest" k8shelper "k8s.io/component-helpers/storage/volume" @@ -72,9 +72,7 @@ const ( // pvcNameLenLimitForJob is the max length of PVC name that the bound job // will incorporate in their names pvcNameLenLimitForJob = 48 - volumeinitialDelay = 2 * time.Second - volumeFactor = 1.5 - volumeSteps = 15 + defaultTimeout = 1 * time.Minute progressCheckInterval = 5 * time.Second compressionKey = "KDMP_COMPRESSION" @@ -96,12 +94,6 @@ type updateDataExportDetail struct { volumeSnapshot string } -var volumeAPICallBackoff = wait.Backoff{ - Duration: volumeinitialDelay, - Factor: volumeFactor, - Steps: volumeSteps, -} - func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, error) { if in == nil { return false, nil @@ -123,7 +115,7 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er // delete an object on the init stage without cleanup if dataExport.DeletionTimestamp != nil && dataExport.Status.Stage == kdmpapi.DataExportStageInitial { - if !controllers.ContainsFinalizer(dataExport, cleanupFinalizer) { + if !controllers.ContainsFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) { return false, nil } @@ -158,7 +150,7 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er } if dataExport.DeletionTimestamp != nil { - if !controllers.ContainsFinalizer(dataExport, cleanupFinalizer) { + if !controllers.ContainsFinalizer(dataExport, kdmpcontroller.CleanupFinalizer) { return false, nil } if err = c.cleanUp(driver, dataExport); err != nil { @@ -232,15 +224,23 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er // Create the pvc from the spec provided in the dataexport CR pvcSpec := dataExport.Status.RestorePVC - _, err = c.createPVC(dataExport) + // For NFS PVC creation happens upfront and createPVC() fails internally during vol restore + // as in DE CR PVC ref doesn't have all PVC params to create just has pvc name and ns which is + // expected as PVC is already created so doing additional check. + _, err = core.Instance().GetPersistentVolumeClaim(pvcSpec.Name, pvcSpec.Namespace) if err != nil { - msg := fmt.Sprintf("Error creating pvc %s/%s for restore: %v", pvcSpec.Namespace, pvcSpec.Name, err) - logrus.Errorf(msg) - data := updateDataExportDetail{ - status: kdmpapi.DataExportStatusFailed, - reason: msg, + if k8sErrors.IsNotFound(err) { + _, err = c.createPVC(dataExport) + if err != nil { + msg := fmt.Sprintf("Error creating pvc %s/%s for restore: %v", pvcSpec.Namespace, pvcSpec.Name, err) + logrus.Errorf(msg) + data := updateDataExportDetail{ + status: kdmpapi.DataExportStatusFailed, + reason: msg, + } + return false, c.updateStatus(dataExport, data) + } } - return false, c.updateStatus(dataExport, data) } _, err = checkPVCIgnoringJobMounts(dataExport.Spec.Destination, dataExport.Name) @@ -273,7 +273,28 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er compressionType = kdmpData.Data[compressionKey] podDataPath = kdmpData.Data[backupPath] } + blName := dataExport.Spec.Destination.Name + blNamespace := dataExport.Spec.Destination.Namespace + if driverName == drivers.KopiaRestore { + blName = vb.Spec.BackupLocation.Name + blNamespace = vb.Spec.BackupLocation.Namespace + } + + backupLocation, err := readBackupLocation(blName, blNamespace, "") + if err != nil { + msg := fmt.Sprintf("reading of backuplocation [%v/%v] failed: %v", blNamespace, blName, err) + logrus.Errorf(msg) + data := updateDataExportDetail{ + status: kdmpapi.DataExportStatusFailed, + reason: msg, + } + return false, c.updateStatus(dataExport, data) + } + + if backupLocation.Location.Type != storkapi.BackupLocationNFS { + backupLocation.Location.NfsConfig = &storkapi.NfsConfig{} + } // start data transfer id, err := startTransferJob( driver, @@ -283,6 +304,9 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er podDataPath, utils.KdmpConfigmapName, utils.KdmpConfigmapNamespace, + backupLocation.Location.NfsConfig.ServerAddr, + backupLocation.Location.Path, + backupLocation.Location.NfsConfig.MountOption, ) if err != nil && err != utils.ErrJobAlreadyRunning && err != utils.ErrOutOfJobResources { msg := fmt.Sprintf("failed to start a data transfer job, dataexport [%v]: %v", dataExport.Name, err) @@ -429,6 +453,17 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er data := updateDataExportDetail{ stage: kdmpapi.DataExportStageFinal, } + // Append the job-pod log to stork's pod log in case of failure + // it is best effort approach, hence errors are ignored. + if dataExport.Status.Status == kdmpapi.DataExportStatusFailed { + if dataExport.Status.TransferID != "" { + namespace, name, err := utils.ParseJobID(dataExport.Status.TransferID) + if err != nil { + logrus.Infof("job-pod name and namespace extraction failed: %v", err) + } + appendPodLogToStork(name, namespace) + } + } cleanupTask := func() (interface{}, bool, error) { cleanupErr := c.cleanUp(driver, dataExport) if cleanupErr != nil { @@ -452,6 +487,34 @@ func (c *Controller) sync(ctx context.Context, in *kdmpapi.DataExport) (bool, er return false, nil } +func appendPodLogToStork(jobName string, namespace string) { + // Get job and check whether it has live pod attaced to it + job, err := batch.Instance().GetJob(jobName, namespace) + if err != nil && !k8sErrors.IsNotFound(err) { + logrus.Infof("failed in getting job %v/%v with err: %v", namespace, jobName, err) + } + pods, err := core.Instance().GetPods( + job.Namespace, + map[string]string{ + "job-name": job.Name, + }, + ) + if err != nil { + logrus.Infof("failed in fetching job pods %s/%s: %v", namespace, jobName, err) + } + for _, pod := range pods.Items { + numLogLines := int64(50) + podLog, err := core.Instance().GetPodLog(pod.Name, pod.Namespace, &corev1.PodLogOptions{TailLines: &numLogLines}) + if err != nil { + logrus.Infof("error fetching log of job-pod %s: %v", pod.Name, err) + } else { + logrus.Infof("start of job-pod [%s]'s log...", pod.Name) + logrus.Infof(podLog) + logrus.Infof("end of job-pod [%s]'s log...", pod.Name) + } + } +} + func (c *Controller) createJobCredCertSecrets( dataExport *kdmpapi.DataExport, vb *kdmpapi.VolumeBackup, @@ -624,7 +687,7 @@ func (c *Controller) stageSnapshotScheduled(ctx context.Context, dataExport *kdm snapName := toSnapName(dataExport.Spec.Source.Name, string(dataExport.UID)) annotations := make(map[string]string) annotations[dataExportUIDAnnotation] = string(dataExport.UID) - annotations[dataExportNameAnnotation] = trimLabel(dataExport.Name) + annotations[dataExportNameAnnotation] = utils.GetValidLabel(dataExport.Name) annotations[backupObjectUIDKey] = backupUID annotations[pvcUIDKey] = pvcUID labels := make(map[string]string) @@ -1190,6 +1253,13 @@ func (c *Controller) stageLocalSnapshotRestoreInProgress(ctx context.Context, da func (c *Controller) cleanUp(driver drivers.Interface, de *kdmpapi.DataExport) error { var bl *storkapi.BackupLocation + doCleanup, err := utils.DoCleanupResource() + if err != nil { + return err + } + if (de.Status.Status == kdmpapi.DataExportStatusFailed) && !doCleanup { + return nil + } if driver == nil { return fmt.Errorf("driver is nil") } @@ -1265,6 +1335,21 @@ func (c *Controller) cleanUp(driver drivers.Interface, de *kdmpapi.DataExport) e if err != nil && !k8sErrors.IsNotFound(err) { return fmt.Errorf("delete %s job: %s", de.Status.TransferID, err) } + //TODO : Need better way to find BL type from de CR + // For now deleting unconditionally for all BL type. + namespace, jobName, err := utils.ParseJobID(de.Status.TransferID) + if err != nil { + return err + } + pvcName := utils.GetPvcNameForJob(jobName) + if err := core.Instance().DeletePersistentVolumeClaim(pvcName, namespace); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s/%s pvc: %s", namespace, pvcName, err) + } + + pvName := utils.GetPvNameForJob(jobName) + if err := core.Instance().DeletePersistentVolume(pvName); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s pv: %s", pvName, err) + } } if err := core.Instance().DeleteSecret(utils.GetCredSecretName(de.Name), namespace); err != nil && !k8sErrors.IsNotFound(err) { @@ -1389,7 +1474,7 @@ func (c *Controller) updateStatus(de *kdmpapi.DataExport, data updateDataExportD de.Status.SnapshotNamespace = data.snapshotNamespace } if data.removeFinalizer { - controllers.RemoveFinalizer(de, cleanupFinalizer) + controllers.RemoveFinalizer(de, kdmpcontroller.CleanupFinalizer) } if data.volumeSnapshot != "" { de.Status.VolumeSnapshot = data.volumeSnapshot @@ -1440,7 +1525,7 @@ func (c *Controller) restoreSnapshot(ctx context.Context, snapshotDriver snapsho pvc.Annotations = make(map[string]string) pvc.Annotations[skipResourceAnnotation] = "true" pvc.Annotations[dataExportUIDAnnotation] = string(de.UID) - pvc.Annotations[dataExportNameAnnotation] = trimLabel(de.Name) + pvc.Annotations[dataExportNameAnnotation] = utils.GetValidLabel(de.Name) // If storage class annotation is set , then put that annotation too in the temp pvc // Sometimes the spec.storageclass might be empty, in that case the temp pvc may get the sc as the default sc @@ -1557,7 +1642,11 @@ func startTransferJob( dataExport *kdmpapi.DataExport, podDataPath string, jobConfigMap string, - jobConfigMapNs string) (string, error) { + jobConfigMapNs string, + nfsServerAddr string, + nfsExportPath string, + nfsMountOption string, +) (string, error) { if drv == nil { return "", fmt.Errorf("data transfer driver is not set") } @@ -1605,6 +1694,9 @@ func startTransferJob( drivers.WithPodDatapathType(podDataPath), drivers.WithJobConfigMap(jobConfigMap), drivers.WithJobConfigMapNs(jobConfigMapNs), + drivers.WithNfsServer(nfsServerAddr), + drivers.WithNfsExportDir(nfsExportPath), + drivers.WithNfsMountOption(nfsMountOption), ) case drivers.KopiaRestore: return drv.StartJob( @@ -1621,6 +1713,8 @@ func startTransferJob( drivers.WithCertSecretNamespace(dataExport.Spec.Destination.Namespace), drivers.WithJobConfigMap(jobConfigMap), drivers.WithJobConfigMapNs(jobConfigMapNs), + drivers.WithNfsServer(nfsServerAddr), + drivers.WithNfsExportDir(nfsExportPath), ) } @@ -1632,7 +1726,7 @@ func checkPVC(in kdmpapi.DataExportObjectReference, checkMounts bool) (*corev1.P return nil, err } // wait for pvc to get bound - pvc, err := waitForPVCBound(in, checkMounts) + pvc, err := utils.WaitForPVCBound(in.Name, in.Namespace) if err != nil { return nil, err } @@ -1650,36 +1744,6 @@ func checkPVC(in kdmpapi.DataExportObjectReference, checkMounts bool) (*corev1.P return pvc, nil } -func waitForPVCBound(in kdmpapi.DataExportObjectReference, checkMounts bool) (*corev1.PersistentVolumeClaim, error) { - if err := checkNameNamespace(in); err != nil { - return nil, err - } - // wait for pvc to get bound - var pvc *corev1.PersistentVolumeClaim - var err error - var errMsg string - wErr := wait.ExponentialBackoff(volumeAPICallBackoff, func() (bool, error) { - pvc, err = core.Instance().GetPersistentVolumeClaim(in.Name, in.Namespace) - if err != nil { - return false, err - } - - if pvc.Status.Phase != corev1.ClaimBound { - errMsg = fmt.Sprintf("pvc status: expected %s, got %s", corev1.ClaimBound, pvc.Status.Phase) - logrus.Debugf("%v", errMsg) - return false, nil - } - - return true, nil - }) - - if wErr != nil { - logrus.Errorf("%v", wErr) - return nil, fmt.Errorf("%s:%s", wErr, errMsg) - } - return pvc, nil -} - func checkPVCIgnoringJobMounts(in kdmpapi.DataExportObjectReference, expectedMountJob string) (*corev1.PersistentVolumeClaim, error) { var pvc *corev1.PersistentVolumeClaim var checkErr error @@ -1701,7 +1765,7 @@ func checkPVCIgnoringJobMounts(in kdmpapi.DataExportObjectReference, expectedMou logrus.Debugf("checkPVCIgnoringJobMounts: pvc name %v - storage class VolumeBindingMode %v", pvc.Name, *sc.VolumeBindingMode) if *sc.VolumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer { // wait for pvc to get bound - pvc, checkErr = waitForPVCBound(in, true) + pvc, checkErr = utils.WaitForPVCBound(in.Name, in.Namespace) if checkErr != nil { return "", false, checkErr } @@ -1709,7 +1773,7 @@ func checkPVCIgnoringJobMounts(in kdmpapi.DataExportObjectReference, expectedMou } else { // If sc is not set, we will direct check the pvc status // wait for pvc to get bound - pvc, checkErr = waitForPVCBound(in, true) + pvc, checkErr = utils.WaitForPVCBound(in.Name, in.Namespace) if checkErr != nil { return "", false, checkErr } @@ -1899,6 +1963,8 @@ func CreateCredentialsSecret(secretName, blName, blNamespace, namespace string, return createGoogleSecret(secretName, backupLocation, namespace, labels) case storkapi.BackupLocationAzure: return createAzureSecret(secretName, backupLocation, namespace, labels) + case storkapi.BackupLocationNFS: + return utils.CreateNfsSecret(secretName, backupLocation, namespace, labels) } return fmt.Errorf("unsupported backup location: %v", backupLocation.Location.Type) @@ -1936,7 +2002,7 @@ func createS3Secret(secretName string, backupLocation *storkapi.BackupLocation, credentialData["type"] = []byte(backupLocation.Location.Type) credentialData["password"] = []byte(backupLocation.Location.RepositoryPassword) credentialData["disablessl"] = []byte(strconv.FormatBool(backupLocation.Location.S3Config.DisableSSL)) - err := createJobSecret(secretName, namespace, credentialData, labels) + err := utils.CreateJobSecret(secretName, namespace, credentialData, labels) return err } @@ -1948,7 +2014,7 @@ func createGoogleSecret(secretName string, backupLocation *storkapi.BackupLocati credentialData["accountkey"] = []byte(backupLocation.Location.GoogleConfig.AccountKey) credentialData["projectid"] = []byte(backupLocation.Location.GoogleConfig.ProjectID) credentialData["path"] = []byte(backupLocation.Location.Path) - err := createJobSecret(secretName, namespace, credentialData, labels) + err := utils.CreateJobSecret(secretName, namespace, credentialData, labels) return err } @@ -1960,7 +2026,7 @@ func createAzureSecret(secretName string, backupLocation *storkapi.BackupLocatio credentialData["path"] = []byte(backupLocation.Location.Path) credentialData["storageaccountname"] = []byte(backupLocation.Location.AzureConfig.StorageAccountName) credentialData["storageaccountkey"] = []byte(backupLocation.Location.AzureConfig.StorageAccountKey) - err := createJobSecret(secretName, namespace, credentialData, labels) + err := utils.CreateJobSecret(secretName, namespace, credentialData, labels) return err } @@ -1968,7 +2034,7 @@ func createAzureSecret(secretName string, backupLocation *storkapi.BackupLocatio func createCertificateSecret(secretName, namespace string, labels map[string]string) error { drivers.CertFilePath = os.Getenv(drivers.CertDirPath) if drivers.CertFilePath != "" { - certificateData, err := ioutil.ReadFile(filepath.Join(drivers.CertFilePath, drivers.CertFileName)) + certificateData, err := os.ReadFile(filepath.Join(drivers.CertFilePath, drivers.CertFileName)) if err != nil { errMsg := fmt.Sprintf("failed reading data from file %s : %s", drivers.CertFilePath, err) logrus.Errorf("%v", errMsg) @@ -1977,7 +2043,7 @@ func createCertificateSecret(secretName, namespace string, labels map[string]str certData := make(map[string][]byte) certData[drivers.CertFileName] = certificateData - err = createJobSecret(secretName, namespace, certData, labels) + err = utils.CreateJobSecret(secretName, namespace, certData, labels) return err } @@ -1985,32 +2051,6 @@ func createCertificateSecret(secretName, namespace string, labels map[string]str return nil } -func createJobSecret( - secretName string, - namespace string, - credentialData map[string][]byte, - labels map[string]string, -) error { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - Labels: labels, - Annotations: map[string]string{ - utils.SkipResourceAnnotation: "true", - }, - }, - Data: credentialData, - Type: corev1.SecretTypeOpaque, - } - _, err := core.Instance().CreateSecret(secret) - if err != nil && k8sErrors.IsAlreadyExists(err) { - return nil - } - - return err -} - func toSnapName(pvcName, dataExportUID string) string { truncatedPVCName := pvcName if len(pvcName) > pvcNameLenLimit { @@ -2038,13 +2078,6 @@ func toBoundJobPVCName(pvcName string, pvcUID string) string { return fmt.Sprintf("%s-%s-%s", "bound", truncatedPVCName, uidToken[0]) } -func trimLabel(label string) string { - if len(label) > 63 { - return label[:63] - } - return label -} - func getRepoPVCName(de *kdmpapi.DataExport, pvcName string) string { if hasSnapshotStage(de) { subStrings := strings.Split(pvcName, "-") diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/reconcile.go b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/reconcile.go new file mode 100644 index 0000000000..71501b6ea7 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/reconcile.go @@ -0,0 +1,430 @@ +package resourceexport + +import ( + "context" + "fmt" + "reflect" + + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/controllers" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/driversinstance" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/portworx/sched-ops/k8s/core" + "github.com/portworx/sched-ops/k8s/kdmp" + "github.com/portworx/sched-ops/task" + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// updateResourceExportFields when an update needs to be done to ResourceExport +// user can choose which field to be updated and pass the same to updateStatus() +type updateResourceExportFields struct { + stage kdmpapi.ResourceExportStage + status kdmpapi.ResourceExportStatus + reason string + id string + resources []*kdmpapi.ResourceRestoreResourceInfo + VolumesInfo []*kdmpapi.ResourceBackupVolumeInfo + ExistingVolumesInfo []*kdmpapi.ResourceRestoreVolumeInfo +} + +func (c *Controller) process(ctx context.Context, in *kdmpapi.ResourceExport) (bool, error) { + funct := "re.process" + if in == nil { + return false, nil + } + resourceExport := in.DeepCopy() + if resourceExport.DeletionTimestamp != nil { + if controllers.ContainsFinalizer(resourceExport, kdmpcontroller.CleanupFinalizer) { + err := c.cleanupResources(resourceExport) + if err != nil { + return false, nil + } + } + if resourceExport.GetFinalizers() != nil { + controllers.RemoveFinalizer(resourceExport, kdmpcontroller.CleanupFinalizer) + err := c.client.Update(context.TODO(), resourceExport) + if err != nil { + errMsg := fmt.Sprintf("failed updating resourceExport CR %s: %v", resourceExport.Name, err) + logrus.Errorf("%v", errMsg) + return false, fmt.Errorf("%v", errMsg) + } + } + return true, nil + } + if resourceExport.Status.Stage == kdmpapi.ResourceExportStageFinal { + return true, nil + } + + // Set to initial status to start with + if resourceExport.Status.Status == "" { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageInitial, + status: kdmpapi.ResourceExportStatusInitial, + reason: "", + } + return true, c.updateStatus(resourceExport, updateData) + } + // Get the driver type + opType, err := getDriverType(resourceExport) + if err != nil { + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: "fetching driver type failed", + } + return false, c.updateStatus(resourceExport, updateData) + } + + driver, err := driversinstance.Get(opType) + if err != nil { + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: "fetching driver instance failed", + } + return false, c.updateStatus(resourceExport, updateData) + } + blName := resourceExport.Spec.Destination.Name + blNamespace := resourceExport.Spec.Destination.Namespace + backupLocation, err := kdmpcontroller.ReadBackupLocation(blName, blNamespace, "") + + if err != nil { + msg := fmt.Sprintf("reading of backuplocation [%v/%v] failed: %v", blNamespace, blName, err) + logrus.Errorf(msg) + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed reading bl [%v/%v]: %v", blNamespace, blName, err), + } + return false, c.updateStatus(resourceExport, updateData) + } + + switch resourceExport.Status.Stage { + case kdmpapi.ResourceExportStageInitial: + // Create ResourceBackup CR + err = createResourceBackup(resourceExport.Name, resourceExport.Namespace) + if err != nil { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed to create ResourceBackup CR [%v/%v]", resourceExport.Namespace, resourceExport.Name), + } + return false, c.updateStatus(resourceExport, updateData) + } + // start data transfer + id, serr := startNfsResourceJob( + driver, + utils.KdmpConfigmapName, + utils.KdmpConfigmapNamespace, + resourceExport, + backupLocation, + ) + logrus.Tracef("%s: startNfsResourceJob id: %v", funct, id) + if serr != nil { + logrus.Errorf("%s: serr: %v", funct, serr) + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed to create startNfsResourceJob job [%v/%v]", resourceExport.Namespace, resourceExport.Name), + } + return false, c.updateStatus(resourceExport, updateData) + } + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageInProgress, + status: kdmpapi.ResourceExportStatusInProgress, + id: id, + reason: "", + } + return false, c.updateStatus(resourceExport, updateData) + case kdmpapi.ResourceExportStageInProgress: + + // Read the job status and move the reconciler to next state + progress, err := driver.JobStatus(resourceExport.Status.TransferID) + logrus.Tracef("%s job progress: %v", funct, progress) + if err != nil { + errMsg := fmt.Sprintf("failed to get %s job status: %s", resourceExport.Status.TransferID, err) + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: errMsg, + } + return false, c.updateStatus(resourceExport, updateData) + } + if progress.Status == batchv1.JobFailed { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: fmt.Sprintf("failed to create ResourceBackup CR [%v/%v]", resourceExport.Namespace, resourceExport.Name), + } + if len(progress.Reason) == 0 { + // As we couldn't get actual reason from executor + // marking it as internal error + updateData.reason = "internal error from executor" + return true, c.updateStatus(resourceExport, updateData) + } + return true, c.updateStatus(resourceExport, updateData) + } else if progress.Status == batchv1.JobConditionType("") { + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageInProgress, + status: kdmpapi.ResourceExportStatusInProgress, + reason: "RestoreExport job in progress", + } + return true, c.updateStatus(resourceExport, updateData) + } + + var rb *kdmpapi.ResourceBackup + // Get the resourcebackup + rb, err = kdmp.Instance().GetResourceBackup(resourceExport.Name, resourceExport.Namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to get resourcebackup CR [%s/%s]: %s", resourceExport.Namespace, resourceExport.Name, err) + updateData := updateResourceExportFields{ + status: kdmpapi.ResourceExportStatusFailed, + reason: errMsg, + } + return false, c.updateStatus(resourceExport, updateData) + } + + switch progress.State { + case drivers.JobStateFailed: + errMsg := fmt.Sprintf("%s transfer job failed: %s", resourceExport.Status.TransferID, progress.Reason) + // If a job has failed it means it has tried all possible retires and given up. + // In such a scenario we need to fail DE CR and move to clean up stage + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusFailed, + reason: errMsg, + resources: rb.Status.Resources, + } + return true, c.updateStatus(resourceExport, updateData) + case drivers.JobStateCompleted: + // Go for clean up with success state + updateData := updateResourceExportFields{ + stage: kdmpapi.ResourceExportStageFinal, + status: kdmpapi.ResourceExportStatusSuccessful, + reason: "Job successful", + resources: rb.Status.Resources, + VolumesInfo: rb.VolumesInfo, + ExistingVolumesInfo: rb.ExistingVolumesInfo, + } + + return true, c.updateStatus(resourceExport, updateData) + } + case kdmpapi.ResourceExportStageFinal: + // Do nothing + } + + return true, nil +} + +func (c *Controller) cleanupResources(resourceExport *kdmpapi.ResourceExport) error { + // clean up resources + rbNamespace, rbName, err := utils.ParseJobID(resourceExport.Status.TransferID) + if err != nil { + errMsg := fmt.Sprintf("failed to parse job ID %v from ResourceeExport CR: %v: %v", + resourceExport.Status.TransferID, resourceExport.Name, err) + logrus.Errorf("%v", errMsg) + return err + } + err = kdmp.Instance().DeleteResourceBackup(rbName, rbNamespace) + if err != nil && !k8sErrors.IsNotFound(err) { + errMsg := fmt.Sprintf("failed to delete ResourceBackup CR[%v/%v]: %v", rbNamespace, rbName, err) + logrus.Errorf("%v", errMsg) + return err + } + if err = batch.Instance().DeleteJob(resourceExport.Name, resourceExport.Namespace); err != nil && !k8sErrors.IsNotFound(err) { + return err + } + pvcName := utils.GetPvcNameForJob(rbName) + if err := core.Instance().DeletePersistentVolumeClaim(pvcName, rbNamespace); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s/%s pvc: %s", rbNamespace, pvcName, err) + } + pvName := utils.GetPvNameForJob(rbName) + if err := core.Instance().DeletePersistentVolume(pvName); err != nil && !k8sErrors.IsNotFound(err) { + return fmt.Errorf("delete %s pv: %s", pvName, err) + } + if err := utils.CleanServiceAccount(rbName, rbNamespace); err != nil { + errMsg := fmt.Sprintf("deletion of service account %s/%s failed: %v", rbNamespace, rbName, err) + logrus.Errorf("%s: %v", "cleanupResources", errMsg) + return fmt.Errorf(errMsg) + } + if err := core.Instance().DeleteSecret(utils.GetCredSecretName(rbName), rbNamespace); err != nil && !k8sErrors.IsNotFound(err) { + errMsg := fmt.Sprintf("deletion of backup credential secret %s failed: %v", rbName, err) + logrus.Errorf(errMsg) + return fmt.Errorf(errMsg) + } + return nil +} + +func (c *Controller) updateStatus(re *kdmpapi.ResourceExport, data updateResourceExportFields) error { + var updErr error + t := func() (interface{}, bool, error) { + logrus.Infof("updateStatus data: %+v", data) + namespacedName := types.NamespacedName{} + namespacedName.Name = re.Name + namespacedName.Namespace = re.Namespace + err := c.client.Get(context.TODO(), namespacedName, re) + if err != nil { + errMsg := fmt.Sprintf("failed in getting RE CR %v/%v: %v", re.Namespace, re.Name, err) + logrus.Infof("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + + if data.status != "" { + re.Status.Status = data.status + re.Status.Reason = data.reason + } + + if data.id != "" { + re.Status.TransferID = data.id + } + + if data.stage != "" { + re.Status.Stage = data.stage + } + + if len(data.resources) != 0 { + re.Status.Resources = data.resources + } + if len(data.VolumesInfo) != 0 { + re.VolumesInfo = data.VolumesInfo + } + + if len(data.ExistingVolumesInfo) != 0 { + re.ExistingVolumesInfo = data.ExistingVolumesInfo + } + + updErr = c.client.Update(context.TODO(), re) + if updErr != nil { + errMsg := fmt.Sprintf("failed updating resourceExport CR %s: %v", re.Name, updErr) + logrus.Errorf("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + return "", false, nil + } + if _, err := task.DoRetryWithTimeout(t, kdmpcontroller.TaskDefaultTimeout, kdmpcontroller.TaskProgressCheckInterval); err != nil { + errMsg := fmt.Sprintf("max retries done, failed updating resourceExport CR %s: %v", re.Name, updErr) + logrus.Errorf("%v", errMsg) + // Exhausted all retries, fail the CR + return fmt.Errorf("%v", errMsg) + } + + return nil + +} + +func getDriverType(re *kdmpapi.ResourceExport) (string, error) { + src := re.Spec.Source + doBackup := false + doRestore := false + + if isApplicationBackupRef(src) { + doBackup = true + } else if isApplicationRestoreRef(src) { + doRestore = true + } else { + return "", fmt.Errorf("invalid kind for nfs backup destination: expected BackupLocation") + } + + switch re.Spec.Type { + case kdmpapi.ResourceExportNFS: + if doBackup { + return drivers.NFSBackup, nil + } + if doRestore { + return drivers.NFSRestore, nil + } + return "", fmt.Errorf("invalid kind for nfs source: expected nfs type") + } + return string(re.Spec.Type), nil +} + +func isApplicationBackupRef(ref kdmpapi.ResourceExportObjectReference) bool { + return ref.Kind == "ApplicationBackup" && ref.APIVersion == "stork.libopenstorage.org/v1alpha1" +} + +func isApplicationRestoreRef(ref kdmpapi.ResourceExportObjectReference) bool { + return ref.Kind == "ApplicationRestore" && ref.APIVersion == "stork.libopenstorage.org/v1alpha1" +} + +func startNfsResourceJob( + drv drivers.Interface, + jobConfigMap string, + jobConfigMapNs string, + re *kdmpapi.ResourceExport, + bl *storkapi.BackupLocation, +) (string, error) { + + err := utils.CreateNfsSecret(utils.GetCredSecretName(re.Name), bl, re.Namespace, nil) + if err != nil { + logrus.Errorf("failed to create NFS cred secret: %v", err) + return "", fmt.Errorf("failed to create NFS cred secret: %v", err) + } + switch drv.Name() { + case drivers.NFSBackup: + return drv.StartJob( + // TODO: below two calls need to be generalized and changed in all the startJob Calls + // For NFS it need to be populated in ResourceExport CR and passed to Job via its reconciler. + drivers.WithNfsImageExecutorSource(re.Spec.TriggeredFrom), + drivers.WithNfsImageExecutorSourceNs(re.Spec.TriggeredFromNs), + drivers.WithRestoreExport(re.Name), + drivers.WithJobNamespace(re.Namespace), + drivers.WithNfsServer(bl.Location.NfsConfig.ServerAddr), + drivers.WithNfsExportDir(bl.Location.Path), + drivers.WithAppCRName(re.Spec.Source.Name), + drivers.WithAppCRNamespace(re.Spec.Source.Namespace), + drivers.WithNamespace(re.Namespace), + drivers.WithResoureBackupName(re.Name), + drivers.WithResoureBackupNamespace(re.Namespace), + drivers.WithNfsMountOption(bl.Location.NfsConfig.MountOption), + ) + case drivers.NFSRestore: + return drv.StartJob( + drivers.WithNfsImageExecutorSource(re.Spec.TriggeredFrom), + drivers.WithNfsImageExecutorSourceNs(re.Spec.TriggeredFromNs), + drivers.WithRestoreExport(re.Name), + drivers.WithJobNamespace(re.Namespace), + drivers.WithNfsServer(bl.Location.NfsConfig.ServerAddr), + drivers.WithNfsExportDir(bl.Location.Path), + drivers.WithAppCRName(re.Spec.Source.Name), + drivers.WithAppCRNamespace(re.Spec.Source.Namespace), + drivers.WithNamespace(re.Namespace), + drivers.WithResoureBackupName(re.Name), + drivers.WithResoureBackupNamespace(re.Namespace), + drivers.WithNfsMountOption(bl.Location.NfsConfig.MountOption), + ) + } + return "", fmt.Errorf("unknown data transfer driver: %s", drv.Name()) +} + +func createResourceBackup(name, namespace string) error { + funct := "createResourceBackup" + + rbCR := &kdmpapi.ResourceBackup{ + TypeMeta: metav1.TypeMeta{ + Kind: reflect.TypeOf(kdmpapi.ResourceBackup{}).Name(), + APIVersion: "kdmp.portworx.com/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + }, + // TODO: As part of restore resources, prefill resources info + // so that job can update the same + Spec: kdmpapi.ResourceBackupSpec{}, + } + + _, err := kdmp.Instance().CreateResourceBackup(rbCR) + if err != nil { + logrus.Errorf("%s: %v", funct, err) + return err + } + + return nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/resourceexport.go b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/resourceexport.go new file mode 100644 index 0000000000..9021b452e1 --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/controllers/resourceexport/resourceexport.go @@ -0,0 +1,134 @@ +package resourceexport + +import ( + "context" + "reflect" + + "github.com/libopenstorage/stork/pkg/controllers" + kdmpapi "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + kdmpcontroller "github.com/portworx/kdmp/pkg/controllers" + "github.com/portworx/kdmp/pkg/utils" + "github.com/portworx/kdmp/pkg/version" + "github.com/portworx/sched-ops/k8s/apiextensions" + "github.com/portworx/sched-ops/k8s/kdmp" + "github.com/sirupsen/logrus" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + + "k8s.io/apimachinery/pkg/api/errors" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// Controller is a k8s controller that handles ResourceExport resources. +type Controller struct { + client runtimeclient.Client +} + +// NewController returns a new instance of the controller. +func NewController(mgr manager.Manager) (*Controller, error) { + return &Controller{ + client: mgr.GetClient(), + }, nil +} + +// Init Initialize the application backup controller +func (c *Controller) Init(mgr manager.Manager) error { + err := c.createCRD() + if err != nil { + return err + } + + // Create a new controller + ctrl, err := controller.New("resource-export-controller", mgr, controller.Options{ + Reconciler: c, + MaxConcurrentReconciles: 10, + }) + if err != nil { + return err + } + + // Watch for changes to primary resource + return ctrl.Watch(&source.Kind{Type: &kdmpapi.ResourceExport{}}, &handler.EnqueueRequestForObject{}) +} + +// Reconcile reads that state of the cluster for an object and makes changes based on the state read +// and what is in the Spec. +func (c *Controller) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + restoreExport, err := kdmp.Instance().GetResourceExport(request.Name, request.Namespace) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil + } + if !controllers.ContainsFinalizer(restoreExport, kdmpcontroller.CleanupFinalizer) { + controllers.SetFinalizer(restoreExport, kdmpcontroller.CleanupFinalizer) + return reconcile.Result{Requeue: true}, c.client.Update(context.TODO(), restoreExport) + } + + requeue, err := c.process(context.TODO(), restoreExport) + if err != nil { + logrus.Errorf("fail to execute process function for restoreExport CR %v: %v", restoreExport.Name, err) + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil + } + if requeue { + return reconcile.Result{RequeueAfter: kdmpcontroller.RequeuePeriod}, nil + } + + return reconcile.Result{RequeueAfter: kdmpcontroller.ResyncPeriod}, nil +} + +func (c *Controller) createCRD() error { + requiresV1, err := version.RequiresV1Registration() + if err != nil { + return err + } + resources := []apiextensions.CustomResource{ + { + Name: kdmpapi.ResourceExportResourceName, + Plural: kdmpapi.ResourceExportResourcePlural, + Group: kdmpapi.SchemeGroupVersion.Group, + Version: kdmpapi.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Kind: reflect.TypeOf(kdmpapi.ResourceExport{}).Name(), + }, + { + Name: kdmpapi.ResourceBackupResourceName, + Plural: kdmpapi.ResourceBackupResourcePlural, + Group: kdmpapi.SchemeGroupVersion.Group, + Version: kdmpapi.SchemeGroupVersion.Version, + Scope: apiextensionsv1beta1.NamespaceScoped, + Kind: reflect.TypeOf(kdmpapi.ResourceBackup{}).Name(), + }, + } + + for _, res := range resources { + if requiresV1 { + err := utils.CreateCRD(res) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + if err := apiextensions.Instance().ValidateCRD(res.Plural+"."+res.Group, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { + return err + } + } else { + err = apiextensions.Instance().CreateCRDV1beta1(res) + if err != nil && !errors.IsAlreadyExists(err) { + return err + } + if err := apiextensions.Instance().ValidateCRDV1beta1(res, kdmpcontroller.ValidateCRDTimeout, kdmpcontroller.ValidateCRDInterval); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go index 75849ee94d..e4f4a25271 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/drivers.go @@ -2,6 +2,7 @@ package drivers import ( "fmt" + batchv1 "k8s.io/api/batch/v1" ) @@ -14,6 +15,9 @@ const ( KopiaRestore = "kopiarestore" KopiaDelete = "kopiadelete" KopiaMaintenance = "kopiamaintenance" + NFSBackup = "nfsbackup" + NFSRestore = "nfsrestore" + NFSDelete = "nfsdelete" ) // Docker images. @@ -21,6 +25,7 @@ const ( ResticExecutorImage = "portworx/resticexecutor" KopiaExecutorImage = "kopiaexecutor" RsyncImage = "eeacms/rsync" + NfsExecutorImage = "nfsexecutor" ) // Driver labels. @@ -46,6 +51,7 @@ const ( CertFileName = "public.crt" CertSecretName = "tls-s3-cert" CertMount = "/etc/tls-s3-cert" + NfsMount = "/tmp/nfs-target/" ) // Driver job options. @@ -70,6 +76,10 @@ const ( KopiaExecutorRequestMemory = "KDMP_KOPIAEXECUTOR_REQUEST_MEMORY" KopiaExecutorLimitCPU = "KDMP_KOPIAEXECUTOR_LIMIT_CPU" KopiaExecutorLimitMemory = "KDMP_KOPIAEXECUTOR_LIMIT_MEMORY" + NFSExecutorRequestCPU = "KDMP_NFSEXECUTOR_REQUEST_CPU" + NFSExecutorRequestMemory = "KDMP_NFSEXECUTOR_REQUEST_MEMORY" + NFSExecutorLimitCPU = "KDMP_NFSEXECUTOR_LIMIT_CPU" + NFSExecutorLimitMemory = "KDMP_NFSEXECUTOR_LIMIT_MEMORNFS" ) // Default parameters for job options. @@ -86,6 +96,10 @@ const ( DefaultKopiaExecutorRequestMemory = "700Mi" DefaultKopiaExecutorLimitCPU = "0.2" DefaultKopiaExecutorLimitMemory = "1Gi" + DefaultNFSExecutorRequestCPU = "0.1" + DefaultNFSExecutorRequestMemory = "700Mi" + DefaultNFSExecutorLimitCPU = "0.2" + DefaultNFSExecutorLimitMemory = "1Gi" ) var ( diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go b/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go index 44998fb49b..886d422f58 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/driversinstance/driversinstance.go @@ -9,6 +9,9 @@ import ( "github.com/portworx/kdmp/pkg/drivers/kopiadelete" "github.com/portworx/kdmp/pkg/drivers/kopiamaintenance" "github.com/portworx/kdmp/pkg/drivers/kopiarestore" + "github.com/portworx/kdmp/pkg/drivers/nfsbackup" + "github.com/portworx/kdmp/pkg/drivers/nfsdelete" + "github.com/portworx/kdmp/pkg/drivers/nfsrestore" "github.com/portworx/kdmp/pkg/drivers/resticbackup" "github.com/portworx/kdmp/pkg/drivers/resticrestore" "github.com/portworx/kdmp/pkg/drivers/rsync" @@ -24,6 +27,9 @@ var ( drivers.KopiaRestore: kopiarestore.Driver{}, drivers.KopiaDelete: kopiadelete.Driver{}, drivers.KopiaMaintenance: kopiamaintenance.Driver{}, + drivers.NFSBackup: nfsbackup.Driver{}, + drivers.NFSRestore: nfsrestore.Driver{}, + drivers.NFSDelete: nfsdelete.Driver{}, } ) diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go index 5884bfb830..c21902b623 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackup.go @@ -93,6 +93,15 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { logrus.Errorf("%s: %v", fn, errMsg) return "", fmt.Errorf(errMsg) } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { errMsg := fmt.Sprintf("creation of backup job %s failed: %v", jobName, err) logrus.Errorf("%s: %v", fn, errMsg) @@ -274,28 +283,22 @@ func jobFor( splitCmd = append(splitCmd, "--compression", jobOption.Compression) cmd = strings.Join(splitCmd, " ") } - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during backup: %v", err) - return nil, err + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - if len(imageRegistrySecret) != 0 { - err = utils.CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) - if err != nil { - return nil, err - } - - } - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) } - job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -340,6 +343,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "vol", @@ -362,7 +366,25 @@ func jobFor( }, }, } - + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } if drivers.CertFilePath != "" { volumeMount := corev1.VolumeMount{ Name: utils.TLSCertMountVol, diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go index 48f40d57de..876f37c476 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiabackup/kopiabackuplive.go @@ -69,29 +69,22 @@ func jobForLiveBackup( } privileged := true - - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during live backup: %v", err) - return nil, err - } - if len(imageRegistrySecret) != 0 { - err = utils.CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) - if err != nil { - return nil, err - } + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) } - job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -140,6 +133,7 @@ func jobForLiveBackup( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "vol", @@ -163,6 +157,26 @@ func jobForLiveBackup( }, } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + if drivers.CertFilePath != "" { volumeMount := corev1.VolumeMount{ Name: utils.TLSCertMountVol, diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go index ab6dcfd574..da51c02938 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiadelete/kopiadelete.go @@ -84,6 +84,15 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { logrus.Errorf("%s %v", fn, errMsg) return "", fmt.Errorf(errMsg) } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { errMsg := fmt.Sprintf("creation of backup snapshot delete job [%s] failed: %v", jobName, err) logrus.Errorf("%s %v", fn, errMsg) @@ -184,21 +193,22 @@ func jobFor( jobOption.VolumeBackupDeleteNamespace, }, " ") - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, imageRegistrySecret, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during delete: %v", err) - return nil, err + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) } - job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -240,6 +250,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "cred-secret", @@ -255,6 +266,27 @@ func jobFor( }, } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + if drivers.CertFilePath != "" { volumeMount := corev1.VolumeMount{ Name: utils.TLSCertMountVol, @@ -309,6 +341,16 @@ func jobFor( }, }, } + } else { + nodeAffinity, err := utils.GetNodeAffinityFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the node affinity details: %v", err) + return nil, fmt.Errorf("failed to get the node affinity details for job [%s/%s]", jobOption.Namespace, jobName) + } + job.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: nodeAffinity, + } } job.Spec.Template.Spec.Containers[0].Env = env @@ -330,7 +372,7 @@ func toRepoName(pvcName, pvcNamespace string) string { func addVolumeBackupDeleteLabels(jobOpts drivers.JobOpts) map[string]string { labels := make(map[string]string) - labels[utils.BackupObjectNameKey] = jobOpts.BackupObjectName + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(jobOpts.BackupObjectName) labels[utils.BackupObjectUIDKey] = jobOpts.BackupObjectUID return labels } @@ -341,7 +383,7 @@ func addJobLabels(labels map[string]string, jobOpts drivers.JobOpts) map[string] } labels[drivers.DriverNameLabel] = drivers.KopiaDelete - labels[utils.BackupObjectNameKey] = jobOpts.BackupObjectName + labels[utils.BackupObjectNameKey] = utils.GetValidLabel(jobOpts.BackupObjectName) labels[utils.BackupObjectUIDKey] = jobOpts.BackupObjectUID return labels } diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go index 0457394876..47e4bd7f28 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiamaintenance/kopiamaintenance.go @@ -70,6 +70,14 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { return "", fmt.Errorf(errMsg) } + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, o.JobNamespace, o) + if err != nil { + return "", err + } + } + if requiresV1 { jobV1 := job.(*batchv1.CronJob) _, err = batch.Instance().CreateCronJob(jobV1) @@ -196,19 +204,22 @@ func jobFor( jobOption.MaintenanceType, }, " ") - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, imageRegistrySecret, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during maintenance: %v", err) - return nil, err + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() + + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) } jobObjectMeta := metav1.ObjectMeta{ @@ -246,6 +257,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "cred-secret", @@ -257,10 +269,30 @@ func jobFor( }, }, } - var volumeMount corev1.VolumeMount var volume corev1.Volume var env []corev1.EnvVar + + if len(jobOption.NfsServer) != 0 { + volumeMount = corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + jobSpec.Containers[0].VolumeMounts = append( + jobSpec.Containers[0].VolumeMounts, + volumeMount, + ) + volume = corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + jobSpec.Volumes = append(jobSpec.Volumes, volume) + } + if drivers.CertFilePath != "" { volumeMount = corev1.VolumeMount{ Name: utils.TLSCertMountVol, @@ -309,6 +341,16 @@ func jobFor( }, }, } + } else { + nodeAffinity, err := utils.GetNodeAffinityFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the node affinity details: %v", err) + return nil, fmt.Errorf("failed to get the node affinity details for job [%s/%s]", jobOption.Namespace, jobName) + } + jobSpec.Affinity = &corev1.Affinity{ + NodeAffinity: nodeAffinity, + } } if requiresV1 { diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go index ed5beddff1..23f1fd2fe1 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/kopiarestore/kopiarestore.go @@ -70,6 +70,15 @@ func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { if err != nil { return "", err } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { return "", err } @@ -191,28 +200,22 @@ func jobFor( vb.Status.SnapshotID, }, " ") - imageRegistry, imageRegistrySecret, err := utils.GetKopiaExecutorImageRegistryAndSecret( + kopiaExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.KopiaExecutorImage, jobOption.KopiaImageExecutorSource, jobOption.KopiaImageExecutorSourceNs, - ) + jobName, + jobOption) if err != nil { - logrus.Errorf("jobFor: getting kopia image registry and image secret failed during restore: %v", err) - return nil, err - } - if len(imageRegistrySecret) != 0 { - err = utils.CreateImageRegistrySecret(imageRegistrySecret, jobName, jobOption.KopiaImageExecutorSourceNs, jobOption.Namespace) - if err != nil { - return nil, err - } + errMsg := fmt.Errorf("failed to get the executor image details for job %s", jobName) + logrus.Errorf("%v", errMsg) + return nil, errMsg } - - var kopiaExecutorImage string - if len(imageRegistry) != 0 { - kopiaExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, utils.GetKopiaExecutorImageName()) - } else { - kopiaExecutorImage = utils.GetKopiaExecutorImageName() + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.KopiaImageExecutorSource, + jobOption.KopiaImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobName) } - job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: jobName, @@ -257,6 +260,7 @@ func jobFor( }, }, }, + Tolerations: tolerations, Volumes: []corev1.Volume{ { Name: "vol", @@ -313,6 +317,26 @@ func jobFor( job.Spec.Template.Spec.Containers[0].Env = env } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobName), + }, + }, + } + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + return job, nil } diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go new file mode 100644 index 0000000000..0e5453631c --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsbackup/nfsbackup.go @@ -0,0 +1,284 @@ +package nfsbackup + +import ( + "fmt" + "strings" + + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/portworx/sched-ops/k8s/kdmp" + + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Driver is a nfsbackup implementation of the data export interface. +type Driver struct{} + +// Name returns a name of the driver. +func (d Driver) Name() string { + return drivers.NFSBackup +} + +// StartJob creates a job for data transfer between volumes. +func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { + // FOr every ns to be backed up a new job should be created + funct := "NfsStartJob" + logrus.Infof("Inside function %s", funct) + o := drivers.JobOpts{} + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return "", err + } + } + } + + job, err := buildJob(o) + if err != nil { + return "", err + } + + // Create PV & PVC only in case of NFS. + jobName := o.RestoreExportName + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(jobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of nfs backup job %s failed: %v", o.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return "", fmt.Errorf(errMsg) + } + + return utils.NamespacedName(job.Namespace, job.Name), nil +} + +// DeleteJob stops data transfer between volumes. +func (d Driver) DeleteJob(id string) error { + + return nil +} + +// JobStatus returns a progress status for a data transfer. +func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { + fn := "JobStatus" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + return utils.ToJobStatus(0, err.Error(), batchv1.JobConditionType("")), nil + } + + job, err := batch.Instance().GetJob(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch backup %s/%s job: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + var jobStatus batchv1.JobConditionType + if len(job.Status.Conditions) != 0 { + jobStatus = job.Status.Conditions[0].Type + + } + err = utils.JobNodeExists(job) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch the node info tied to the job %s/%s: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + jobErr, nodeErr := utils.IsJobOrNodeFailed(job) + var errMsg string + if jobErr { + errMsg = fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) + return utils.ToJobStatus(0, errMsg, jobStatus), nil + } + if nodeErr { + errMsg = fmt.Sprintf("Node [%v] on which job [%v/%v] schedules is NotReady", job.Spec.Template.Spec.NodeName, namespace, name) + return utils.ToJobStatus(0, errMsg, jobStatus), nil + } + + res, err := kdmp.Instance().GetResourceBackup(name, namespace) + if err != nil { + if apierrors.IsNotFound(err) { + if utils.IsJobPending(job) { + logrus.Warnf("backup job %s is in pending state", job.Name) + return utils.ToJobStatus(0, err.Error(), jobStatus), nil + } + } + } + logrus.Tracef("res.Status: %v", res.Status) + return utils.ToJobStatus(res.Status.ProgressPercentage, res.Status.Reason, jobStatus), nil +} + +func buildJob( + jobOptions drivers.JobOpts, +) (*batchv1.Job, error) { + funct := "NfsbuildJob" + // Setup service account using same role permission as stork role + logrus.Infof("Inside %s function", funct) + if err := utils.SetupNFSServiceAccount(jobOptions.RestoreExportName, jobOptions.Namespace, roleFor()); err != nil { + errMsg := fmt.Sprintf("error creating service account %s/%s: %v", jobOptions.Namespace, jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + resources, err := utils.NFSResourceRequirements(jobOptions.JobConfigMap, jobOptions.JobConfigMapNs) + if err != nil { + return nil, err + } + + job, err := jobForBackupResource(jobOptions, resources) + if err != nil { + errMsg := fmt.Sprintf("building resource backup job %s failed: %v", jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + return job, nil +} + +func roleFor() *rbacv1.ClusterRole { + role := &rbacv1.ClusterRole{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{rbacv1.VerbAll}, + }, + }, + } + + return role +} + +func addJobLabels(labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + + labels[drivers.DriverNameLabel] = drivers.NFSBackup + return labels +} + +func jobForBackupResource( + jobOption drivers.JobOpts, + resources corev1.ResourceRequirements, +) (*batchv1.Job, error) { + cmd := strings.Join([]string{ + "/nfsexecutor", + "backup", + "--app-cr-name", + jobOption.AppCRName, + "--backup-namespace", + jobOption.AppCRNamespace, + // resourcebackup CR name + "--rb-cr-name", + jobOption.ResoureBackupName, + // resourcebackup CR namespace + "--rb-cr-namespace", + jobOption.ResoureBackupNamespace, + }, " ") + + labels := addJobLabels(jobOption.Labels) + + nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, + jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs, + jobOption.RestoreExportName, + jobOption) + + if err != nil { + logrus.Errorf("failed to get the executor image details") + return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) + } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobOption.RestoreExportName) + } + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobOption.RestoreExportName, + Namespace: jobOption.Namespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &utils.JobPodBackOffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ImagePullSecrets: utils.ToImagePullSecret(utils.GetImageSecretName(jobOption.RestoreExportName)), + ServiceAccountName: jobOption.RestoreExportName, + Containers: []corev1.Container{ + { + Name: drivers.NfsExecutorImage, + Image: nfsExecutorImage, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "/bin/sh", + "-x", + "-c", + cmd, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "cred-secret", + MountPath: drivers.KopiaCredSecretMount, + ReadOnly: true, + }, + }, + }, + }, + Tolerations: tolerations, + Volumes: []corev1.Volume{ + { + Name: "cred-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: utils.GetCredSecretName(jobOption.RestoreExportName), + }, + }, + }, + }, + }, + }, + }, + } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobOption.RestoreExportName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + + return job, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go new file mode 100644 index 0000000000..a4964a5a7e --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsdelete/nfsdelete.go @@ -0,0 +1,269 @@ +package nfsdelete + +import ( + "fmt" + "strings" + "sync" + + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/kdmp/pkg/jobratelimit" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Driver is an implementation of resource delete in NFS. +type Driver struct{} + +// Name returns a name of the driver. +func (d Driver) Name() string { + return drivers.NFSBackup +} + +var deleteJobLock sync.Mutex + +// StartJob creates a job for resource delete. +func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { + fn := "StartJob" + deleteJobLock.Lock() + defer deleteJobLock.Unlock() + o := drivers.JobOpts{} + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return "", err + } + } + } + // Check whether there is slot to schedule delete job. + available, err := jobratelimit.CanJobBeScheduled(d.Name()) + if err != nil { + logrus.Errorf("%v", err) + return "", err + } + if !available { + return "", utils.ErrOutOfJobResources + } + + job, err := buildJob(o) + if err != nil { + errMsg := fmt.Sprintf("building of resource delete job [%s] failed: %v", job.Name, err) + logrus.Errorf("%s %v", fn, errMsg) + return "", fmt.Errorf(errMsg) + } + + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(o.JobName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of resource delete job %s failed: %v", job.Name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return "", fmt.Errorf(errMsg) + } + + logrus.Infof("%s created resource delete job [%s] successfully", fn, job.Name) + return utils.NamespacedName(job.Namespace, job.Name), nil + +} + +// DeleteJob deletes the resource delete job. +func (d Driver) DeleteJob(id string) error { + fn := "DeleteJob:" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + logrus.Errorf("%s %v", fn, err) + return err + } + if err = batch.Instance().DeleteJob(name, namespace); err != nil && !apierrors.IsNotFound(err) { + errMsg := fmt.Sprintf("deletion of resource delete job [%s/%s] failed: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return fmt.Errorf(errMsg) + } + + return nil +} + +// JobStatus fetches job status +func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { + fn := "JobStatus" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + return utils.ToJobStatus(0, err.Error(), batchv1.JobConditionType("")), nil + } + + job, err := batch.Instance().GetJob(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch resource delete %s/%s job: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + err = utils.JobNodeExists(job) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch the node info tied to the job %s/%s: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + var jobStatus batchv1.JobConditionType + if len(job.Status.Conditions) != 0 { + jobStatus = job.Status.Conditions[0].Type + + } + + if utils.IsJobFailed(job) { + errMsg := fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) + return utils.ToJobStatus(0, errMsg, jobStatus), nil + } + if utils.IsJobCompleted(job) { + return utils.ToJobStatus(drivers.TransferProgressCompleted, "", jobStatus), nil + } + return utils.ToJobStatus(0, "", jobStatus), nil +} + +func buildJob( + jobOptions drivers.JobOpts, +) (*batchv1.Job, error) { + resources, err := utils.NFSResourceRequirements(jobOptions.JobConfigMap, jobOptions.JobConfigMapNs) + if err != nil { + return nil, err + } + labels := addJobLabels(jobOptions.Labels, jobOptions) + return jobForDeleteResource(jobOptions, resources, labels) +} + +func addJobLabels(labels map[string]string, jobOpts drivers.JobOpts) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + + labels[drivers.DriverNameLabel] = drivers.NFSDelete + labels[utils.BackupObjectNameKey] = jobOpts.BackupObjectName + labels[utils.BackupObjectUIDKey] = jobOpts.BackupObjectUID + return labels +} + +func jobForDeleteResource( + jobOption drivers.JobOpts, + resources corev1.ResourceRequirements, + labels map[string]string, +) (*batchv1.Job, error) { + cmd := strings.Join([]string{ + "/nfsexecutor", + "delete", + "--app-cr-name", + jobOption.AppCRName, + "--namespace", + jobOption.AppCRNamespace, + }, " ") + + nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, + jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs, + jobOption.JobName, + jobOption) + if err != nil { + logrus.Errorf("failed to get the executor image details") + return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) + } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobOption.JobName) + } + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobOption.JobName, + Namespace: jobOption.JobNamespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &utils.JobPodBackOffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ImagePullSecrets: utils.ToImagePullSecret(utils.GetImageSecretName(jobOption.JobName)), + ServiceAccountName: jobOption.ServiceAccountName, + Containers: []corev1.Container{ + { + Name: "nfsexecutor", + Image: nfsExecutorImage, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "/bin/sh", + "-x", + "-c", + cmd, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "cred-secret", + MountPath: drivers.KopiaCredSecretMount, + ReadOnly: true, + }, + }, + }, + }, + Tolerations: tolerations, + Volumes: []corev1.Volume{ + { + Name: "cred-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: jobOption.CredSecretName, + }, + }, + }, + }, + }, + }, + }, + } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobOption.JobName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + + nodeAffinity, err := utils.GetNodeAffinityFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the node affinity details: %v", err) + return nil, fmt.Errorf("failed to get the node affinity details for job [%s/%s]", jobOption.Namespace, jobOption.JobName) + } + job.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: nodeAffinity, + } + + return job, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go new file mode 100644 index 0000000000..12409df44b --- /dev/null +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/nfsrestore/nfsrestore.go @@ -0,0 +1,303 @@ +package nfsrestore + +import ( + "fmt" + "strings" + + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/portworx/kdmp/pkg/drivers" + "github.com/portworx/kdmp/pkg/drivers/utils" + "github.com/portworx/sched-ops/k8s/batch" + "github.com/portworx/sched-ops/k8s/kdmp" + storkops "github.com/portworx/sched-ops/k8s/stork" + + "github.com/sirupsen/logrus" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Driver is a nfsbackup implementation of the data export interface. +type Driver struct{} + +// Name returns a name of the driver. +func (d Driver) Name() string { + return drivers.NFSRestore +} + +// StartJob creates a job for data transfer between volumes. +func (d Driver) StartJob(opts ...drivers.JobOption) (id string, err error) { + // FOr every ns to be backed up a new job should be created + funct := "NfsStartJob" + logrus.Infof("Inside function %s", funct) + o := drivers.JobOpts{} + for _, opt := range opts { + if opt != nil { + if err := opt(&o); err != nil { + return "", err + } + } + } + + job, err := buildJob(o) + if err != nil { + return "", err + } + // Create PV & PVC only in case of NFS. + if o.NfsServer != "" { + err := utils.CreateNFSPvPvcForJob(o.RestoreExportName, job.ObjectMeta.Namespace, o) + if err != nil { + return "", err + } + } + + if _, err = batch.Instance().CreateJob(job); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of restore job %s failed: %v", o.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return "", fmt.Errorf(errMsg) + } + + return utils.NamespacedName(job.Namespace, job.Name), nil +} + +// DeleteJob stops data transfer between volumes. +func (d Driver) DeleteJob(id string) error { + + return nil +} + +// JobStatus returns a progress status for a data transfer. +func (d Driver) JobStatus(id string) (*drivers.JobStatus, error) { + fn := "JobStatus" + namespace, name, err := utils.ParseJobID(id) + if err != nil { + return utils.ToJobStatus(0, err.Error(), batchv1.JobConditionType("")), nil + } + + job, err := batch.Instance().GetJob(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch restore %s/%s job: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + var jobStatus batchv1.JobConditionType + if len(job.Status.Conditions) != 0 { + jobStatus = job.Status.Conditions[0].Type + + } + err = utils.JobNodeExists(job) + if err != nil { + errMsg := fmt.Sprintf("failed to fetch the node info tied to the job %s/%s: %v", namespace, name, err) + logrus.Errorf("%s: %v", fn, errMsg) + return nil, fmt.Errorf(errMsg) + } + jobErr, nodeErr := utils.IsJobOrNodeFailed(job) + + var errMsg string + if jobErr { + errMsg = fmt.Sprintf("check %s/%s job for details: %s", namespace, name, drivers.ErrJobFailed) + return utils.ToJobStatus(0, errMsg, jobStatus), nil + } + if nodeErr { + errMsg = fmt.Sprintf("Node [%v] on which job [%v/%v] schedules is NotReady", job.Spec.Template.Spec.NodeName, namespace, name) + return utils.ToJobStatus(0, errMsg, jobStatus), nil + } + + res, err := kdmp.Instance().GetResourceBackup(name, namespace) + if err != nil { + if apierrors.IsNotFound(err) { + if utils.IsJobPending(job) { + logrus.Warnf("restore job %s is in pending state", job.Name) + return utils.ToJobStatus(0, err.Error(), jobStatus), nil + } + } + } + logrus.Tracef("%s jobStatus:%v", fn, jobStatus) + return utils.ToJobStatus(res.Status.ProgressPercentage, res.Status.Reason, jobStatus), nil +} + +func buildJob( + jobOptions drivers.JobOpts, +) (*batchv1.Job, error) { + funct := "NfsbuildJob" + // Setup service account using same role permission as stork role + logrus.Infof("Inside %s function", funct) + if err := utils.SetupNFSServiceAccount(jobOptions.RestoreExportName, jobOptions.Namespace, roleFor()); err != nil { + errMsg := fmt.Sprintf("error creating service account %s/%s: %v", jobOptions.Namespace, jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + resources, err := utils.NFSResourceRequirements(jobOptions.JobConfigMap, jobOptions.JobConfigMapNs) + if err != nil { + return nil, err + } + job, err := jobForRestoreResource(jobOptions, resources) + if err != nil { + errMsg := fmt.Sprintf("building resource backup job %s failed: %v", jobOptions.RestoreExportName, err) + logrus.Errorf("%s: %v", funct, errMsg) + return nil, fmt.Errorf(errMsg) + } + + return job, nil +} + +func roleFor() *rbacv1.ClusterRole { + role := &rbacv1.ClusterRole{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"*"}, + Verbs: []string{rbacv1.VerbAll}, + }, + }, + } + + return role +} + +func addJobLabels(labels map[string]string) map[string]string { + if labels == nil { + labels = make(map[string]string) + } + + labels[drivers.DriverNameLabel] = drivers.NFSRestore + return labels +} + +func jobForRestoreResource( + jobOption drivers.JobOpts, + resources corev1.ResourceRequirements, +) (*batchv1.Job, error) { + funct := "jobForRestoreResource" + // Read the ApplicationRestore stage and decide which restore operation to perform + restoreCR, err := storkops.Instance().GetApplicationRestore(jobOption.AppCRName, jobOption.AppCRNamespace) + if err != nil { + logrus.Errorf("%s: Error getting restore cr[%v/%v]: %v", funct, jobOption.AppCRNamespace, jobOption.AppCRName, err) + return nil, err + } + var opType string + switch restoreCR.Status.Stage { + case storkapi.ApplicationRestoreStageVolumes: + opType = "restore-vol" + case storkapi.ApplicationRestoreStageApplications: + opType = "restore" + default: + errMsg := fmt.Sprintf("invalid stage %v in applicationRestore CR[%v/%v]:", + restoreCR.Status.Stage, jobOption.AppCRNamespace, jobOption.AppCRName) + logrus.Errorf("%v", errMsg) + return nil, fmt.Errorf(errMsg) + } + + cmd := strings.Join([]string{ + "/nfsexecutor", + opType, + "--app-cr-name", + jobOption.AppCRName, + "--restore-namespace", + jobOption.AppCRNamespace, + // resourcebackup CR name + "--rb-cr-name", + jobOption.ResoureBackupName, + // resourcebackup CR namespace + "--rb-cr-namespace", + jobOption.ResoureBackupNamespace, + }, " ") + + labels := addJobLabels(jobOption.Labels) + + nfsExecutorImage, _, err := utils.GetExecutorImageAndSecret(drivers.NfsExecutorImage, + jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs, + jobOption.RestoreExportName, + jobOption) + if err != nil { + logrus.Errorf("failed to get the executor image details") + return nil, fmt.Errorf("failed to get the executor image details for job %s", jobOption.JobName) + } + tolerations, err := utils.GetTolerationsFromDeployment(jobOption.NfsImageExecutorSource, + jobOption.NfsImageExecutorSourceNs) + if err != nil { + logrus.Errorf("failed to get the toleration details: %v", err) + return nil, fmt.Errorf("failed to get the toleration details for job [%s/%s]", jobOption.Namespace, jobOption.RestoreExportName) + } + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobOption.RestoreExportName, + Namespace: jobOption.Namespace, + Annotations: map[string]string{ + utils.SkipResourceAnnotation: "true", + }, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + BackoffLimit: &utils.JobPodBackOffLimit, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ImagePullSecrets: utils.ToImagePullSecret(utils.GetImageSecretName(jobOption.RestoreExportName)), + ServiceAccountName: jobOption.RestoreExportName, + Containers: []corev1.Container{ + { + Name: drivers.NfsExecutorImage, + Image: nfsExecutorImage, + ImagePullPolicy: corev1.PullAlways, + Command: []string{ + "/bin/sh", + "-x", + "-c", + cmd, + }, + Resources: resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "cred-secret", + MountPath: drivers.KopiaCredSecretMount, + ReadOnly: true, + }, + }, + }, + }, + Tolerations: tolerations, + Volumes: []corev1.Volume{ + { + Name: "cred-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: utils.GetCredSecretName(jobOption.RestoreExportName), + }, + }, + }, + }, + }, + }, + }, + } + if len(jobOption.NfsServer) != 0 { + volumeMount := corev1.VolumeMount{ + Name: utils.NfsVolumeName, + MountPath: drivers.NfsMount, + } + job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + job.Spec.Template.Spec.Containers[0].VolumeMounts, + volumeMount, + ) + volume := corev1.Volume{ + Name: utils.NfsVolumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: utils.GetPvcNameForJob(jobOption.RestoreExportName), + }, + }, + } + + job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) + } + + return job, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/options.go b/vendor/github.com/portworx/kdmp/pkg/drivers/options.go index e5041b48db..ec7ed018a7 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/options.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/options.go @@ -44,7 +44,112 @@ type JobOpts struct { JobConfigMapNs string KopiaImageExecutorSource string KopiaImageExecutorSourceNs string + NfsImageExecutorSource string + NfsImageExecutorSourceNs string NodeAffinity map[string]string + NfsServer string + NfsMountOption string + NfsSubPath string + NfsExportDir string + RestoreExportName string + AppCRName string + AppCRNamespace string + ResoureBackupName string + ResoureBackupNamespace string +} + +// WithResoureBackupName is job parameter +func WithResoureBackupName(name string) JobOption { + return func(opts *JobOpts) error { + opts.ResoureBackupName = strings.TrimSpace(name) + return nil + } +} + +// WithResoureBackupNamespace is job parameter +func WithResoureBackupNamespace(namespace string) JobOption { + return func(opts *JobOpts) error { + opts.ResoureBackupNamespace = strings.TrimSpace(namespace) + return nil + } +} + +// WithAppCRName is job parameter +func WithAppCRName(name string) JobOption { + return func(opts *JobOpts) error { + opts.AppCRName = strings.TrimSpace(name) + return nil + } +} + +// WithAppCRNamespace is job parameter +func WithAppCRNamespace(namespace string) JobOption { + return func(opts *JobOpts) error { + opts.AppCRNamespace = strings.TrimSpace(namespace) + return nil + } +} + +// WithRestoreExport is job parameter +func WithRestoreExport(name string) JobOption { + return func(opts *JobOpts) error { + opts.RestoreExportName = strings.TrimSpace(name) + return nil + } +} + +// WithNfsServer is job parameter. +func WithNfsServer(server string) JobOption { + return func(opts *JobOpts) error { + opts.NfsServer = strings.TrimSpace(server) + return nil + } +} + +// WithNfsMountOption is job parameter. +func WithNfsMountOption(server string) JobOption { + return func(opts *JobOpts) error { + opts.NfsMountOption = strings.TrimSpace(server) + return nil + } +} + +// WithNfsSubPath is job parameter. +func WithNfsSubPath(server string) JobOption { + return func(opts *JobOpts) error { + opts.NfsSubPath = strings.TrimSpace(server) + return nil + } +} + +// WithNfsExportDir is job parameter. +func WithNfsExportDir(exportDir string) JobOption { + return func(opts *JobOpts) error { + opts.NfsExportDir = strings.TrimSpace(exportDir) + return nil + } +} + +// WithNfsImageExecutorSource is job parameter. +func WithNfsImageExecutorSource(source string) JobOption { + return func(opts *JobOpts) error { + if strings.TrimSpace(source) == "" { + return fmt.Errorf("nfs image executor source should be set") + } + opts.NfsImageExecutorSource = strings.TrimSpace(source) + return nil + } +} + +// WithNfsImageExecutorSourceNs is job parameter. +func WithNfsImageExecutorSourceNs(namespace string) JobOption { + return func(opts *JobOpts) error { + if strings.TrimSpace(namespace) == "" { + return fmt.Errorf("nfs image executor source namespace should be set") + } + opts.NfsImageExecutorSourceNs = strings.TrimSpace(namespace) + return nil + } } // WithKopiaImageExecutorSource is job parameter. diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go index cb564524e2..b3990c9b48 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/common.go @@ -4,6 +4,8 @@ import ( "fmt" "time" + version "github.com/hashicorp/go-version" + storkversion "github.com/libopenstorage/stork/pkg/version" coreops "github.com/portworx/sched-ops/k8s/core" rbacops "github.com/portworx/sched-ops/k8s/rbac" "github.com/portworx/sched-ops/task" @@ -26,15 +28,28 @@ const ( // BackupObjectUIDKey - label key to store backup object uid BackupObjectUIDKey = "backup-object-uid" // TLSCertMountVol mount vol name for tls certificate secret - TLSCertMountVol = "tls-secret" - defaultTimeout = 1 * time.Minute - progressCheckInterval = 5 * time.Second + TLSCertMountVol = "tls-secret" + // NfsVolumeName is the Volume spec's name to be used in kopia Job Spec + NfsVolumeName = "nfs-target" + // DefaultTimeout default timeout for tasks retry + DefaultTimeout = 1 * time.Minute + // ProgressCheckInterval regular interval at which task does a retry + ProgressCheckInterval = 5 * time.Second // KdmpConfigmapName kdmp config map name KdmpConfigmapName = "kdmp-config" // KdmpConfigmapNamespace kdmp config map ns KdmpConfigmapNamespace = "kube-system" // DefaultCompresion default compression type DefaultCompresion = "s2-parallel-8" + // DefaultQPS - default qps value for k8s apis + DefaultQPS = 100 + // DefaultBurst - default burst value for k8s apis + DefaultBurst = 100 + // QPSKey - configmap QPS key name + QPSKey = "K8S_QPS" + // BurstKey - configmap burst key name + BurstKey = "K8S_BURST" + k8sMinVersionSASecretTokenNotSupport = "1.24" ) var ( @@ -42,6 +57,23 @@ var ( JobPodBackOffLimit = int32(10) ) +// isServiceAccountSecretMissing returns true, if the K8s version does not support secret token for the service account. +func isServiceAccountSecretMissing() (bool, error) { + k8sVersion, _, err := storkversion.GetFullVersion() + if err != nil { + return false, err + } + VersionTokenNotSupported, err := version.NewVersion(k8sMinVersionSASecretTokenNotSupport) + if err != nil { + return false, err + + } + if k8sVersion.GreaterThanOrEqual(VersionTokenNotSupported) { + return true, nil + } + return false, nil +} + // SetupServiceAccount create a service account and bind it to a provided role. func SetupServiceAccount(name, namespace string, role *rbacv1.Role) error { if role != nil { @@ -61,35 +93,39 @@ func SetupServiceAccount(name, namespace string, role *rbacv1.Role) error { if sa, err = coreops.Instance().CreateServiceAccount(serviceAccountFor(name, namespace)); err != nil && !errors.IsAlreadyExists(err) { return fmt.Errorf("create %s/%s serviceaccount: %s", namespace, name, err) } - t := func() (interface{}, bool, error) { - sa, err = coreops.Instance().GetServiceAccount(name, namespace) - if err != nil { - errMsg := fmt.Sprintf("failed fetching sa [%v/%v]: %v", name, namespace, err) - logrus.Tracef("%v", errMsg) - return "", true, fmt.Errorf("%v", errMsg) + // From 1.24.0 onwards service token does not support default secret token + tokenSupported, err := isServiceAccountSecretMissing() + if !tokenSupported { + t := func() (interface{}, bool, error) { + sa, err = coreops.Instance().GetServiceAccount(name, namespace) + if err != nil { + errMsg := fmt.Sprintf("failed fetching sa [%v/%v]: %v", name, namespace, err) + logrus.Tracef("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + if sa.Secrets == nil { + errMsg := fmt.Sprintf("secret token is missing in sa [%v/%v]", name, namespace) + return "", true, fmt.Errorf("%v", errMsg) + } + return "", false, nil } - if sa.Secrets == nil { - errMsg := fmt.Sprintf("secret token is missing in sa [%v/%v]", name, namespace) - return "", true, fmt.Errorf("%v", errMsg) + if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { + errMsg := fmt.Sprintf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, err) + logrus.Errorf("%v", errMsg) + // Exhausted all retries + return err } - return "", false, nil - } - if _, err := task.DoRetryWithTimeout(t, defaultTimeout, progressCheckInterval); err != nil { - errMsg := fmt.Sprintf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, err) - logrus.Errorf("%v", errMsg) - // Exhausted all retries - return err - } - tokenName := sa.Secrets[0].Name - secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) - if err != nil { - return fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) - } - secretToken.Annotations[SkipResourceAnnotation] = "true" - _, err = coreops.Instance().UpdateSecret(secretToken) - if err != nil { - return fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + tokenName := sa.Secrets[0].Name + secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) + if err != nil { + return fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + } + secretToken.Annotations[SkipResourceAnnotation] = "true" + _, err = coreops.Instance().UpdateSecret(secretToken) + if err != nil { + return fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + } } return nil } @@ -108,6 +144,68 @@ func CleanServiceAccount(name, namespace string) error { return nil } +// SetupNFSServiceAccount create a service account and bind it to a provided role. +func SetupNFSServiceAccount(name, namespace string, role *rbacv1.ClusterRole) error { + if role != nil { + role.Name, role.Namespace = name, namespace + role.Annotations = map[string]string{ + SkipResourceAnnotation: "true", + } + if _, err := rbacops.Instance().CreateClusterRole(role); err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("create %s/%s cluster role: %s", namespace, name, err) + } + if _, err := rbacops.Instance().CreateClusterRoleBinding(clusterRoleBindingFor(name, namespace)); err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("create %s/%s cluster rolebinding: %s", namespace, name, err) + } + } + var sa *corev1.ServiceAccount + var err error + if sa, err = coreops.Instance().CreateServiceAccount(serviceAccountFor(name, namespace)); err != nil && !errors.IsAlreadyExists(err) { + return fmt.Errorf("create %s/%s serviceaccount: %s", namespace, name, err) + } + var errMsg error + // From 1.24.0 onwards service token does not support default secret token + tokenSupported, err := isServiceAccountSecretMissing() + if !tokenSupported { + t := func() (interface{}, bool, error) { + sa, err = coreops.Instance().GetServiceAccount(name, namespace) + if err != nil { + errMsg = fmt.Errorf("failed fetching sa [%v/%v]: %v", name, namespace, err) + logrus.Errorf("%v", errMsg) + return "", true, fmt.Errorf("%v", errMsg) + } + if sa.Secrets == nil { + logrus.Infof("Returned sa-secret null") + errMsg = fmt.Errorf("secret token is missing in sa [%v/%v]", name, namespace) + return "", true, fmt.Errorf("%v", errMsg) + } + return "", false, nil + } + if _, err := task.DoRetryWithTimeout(t, DefaultTimeout, ProgressCheckInterval); err != nil { + eMsg := fmt.Errorf("max retries done, failed in fetching secret token of sa [%v/%v]: %v ", name, namespace, errMsg) + logrus.Errorf("%v", eMsg) + // Exhausted all retries + return eMsg + } + + tokenName := sa.Secrets[0].Name + secretToken, err := coreops.Instance().GetSecret(tokenName, namespace) + if err != nil { + errMsg := fmt.Errorf("failed in getting secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + logrus.Errorf("%v", errMsg) + return errMsg + } + secretToken.Annotations[SkipResourceAnnotation] = "true" + _, err = coreops.Instance().UpdateSecret(secretToken) + if err != nil { + errMsg := fmt.Errorf("failed in updating the secretToken [%v] of service account [%v/%v]: %v", tokenName, name, namespace, err) + logrus.Errorf("%v", errMsg) + return errMsg + } + } + return nil +} + func roleBindingFor(name, namespace string) *rbacv1.RoleBinding { return &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -132,6 +230,29 @@ func roleBindingFor(name, namespace string) *rbacv1.RoleBinding { } } +func clusterRoleBindingFor(name, namespace string) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Subjects: []rbacv1.Subject{ + { + Kind: rbacv1.ServiceAccountKind, + Name: name, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Name: name, + Kind: "ClusterRole", + APIGroup: rbacv1.GroupName, + }, + } +} + func serviceAccountFor(name, namespace string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ diff --git a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go index 2ba0f708ae..07e9bae4e5 100644 --- a/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go +++ b/vendor/github.com/portworx/kdmp/pkg/drivers/utils/utils.go @@ -4,9 +4,13 @@ import ( "errors" "fmt" "os" + "strconv" "strings" + "time" "github.com/aquilax/truncate" + storkapi "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/libopenstorage/stork/pkg/k8sutils" "github.com/portworx/kdmp/pkg/drivers" "github.com/portworx/kdmp/pkg/version" "github.com/portworx/sched-ops/k8s/apps" @@ -19,17 +23,23 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/wait" ) const ( defaultPXNamespace = "kube-system" - kdmpConfig = "kdmp-config" + // KdmpConfig defines the config map name of kdmp module + KdmpConfig = "kdmp-config" // TriggeredFromStork - denotes the kopia job is triggered from stork module TriggeredFromStork = "stork" // TriggeredFromPxBackup - denotes the kopia job is triggered from px-backup module TriggeredFromPxBackup = "px-backup" kopiaExecutorImageRegistryEnvVar = "KOPIA-EXECUTOR-IMAGE-REGISTRY" kopiaExecutorImageRegistrySecretEnvVar = "KOPIA-EXECUTOR-IMAGE-REGISTRY-SECRET" + // NfsExecutorImageRegistryEnvVar is the os environment variable for nfs executor image + NfsExecutorImageRegistryEnvVar = "NFS-EXECUTOR-IMAGE-REGISTRY" + // NfsExecutorImageRegistrySecretEnvVar is the os environ varibale for nfs executor image + NfsExecutorImageRegistrySecretEnvVar = "NFS-EXECUTOR-IMAGE-REGISTRY-SECRET" // AdminNamespace - kube-system namespace, where privilige pods will be deployed for live kopiabackup. AdminNamespace = "kube-system" imageSecretPrefix = "image-secret" @@ -39,6 +49,20 @@ const ( ImageSecret = "image-secret" // CertSecret - cert secret prefix CertSecret = "cert-secret" + // ResourceCleanupKey - this key controls the enable or disable of the resource cleanup process. + ResourceCleanupKey = "RESOURCE_CLEANUP" + // ResourceCleanupDefaultValue is true as resource cleanup process is enabled by default for debugging user can set to false. + ResourceCleanupDefaultValue = "true" + volumeinitialDelay = 2 * time.Second + volumeFactor = 1.5 + volumeSteps = 15 + nfsVolumeSize = "10Gi" + // ResourceUploadSuccessMsg - resource update success message + ResourceUploadSuccessMsg = "upload resource Successfully" + // PvcBoundSuccessMsg - pvc bound success message + PvcBoundSuccessMsg = "pvc bounded successfully" + // PvcBoundFailedMsg pvc not bounded msg + PvcBoundFailedMsg = "pvc not bounded" ) var ( @@ -47,6 +71,11 @@ var ( // ErrJobAlreadyRunning - Already a job is running for the given instance of PVC ErrJobAlreadyRunning = errors.New("job Already Running") ) +var volumeAPICallBackoff = wait.Backoff{ + Duration: volumeinitialDelay, + Factor: volumeFactor, + Steps: volumeSteps, +} // NamespacedName returns a name in form "/". func NamespacedName(namespace, name string) string { @@ -184,19 +213,20 @@ func FetchJobContainerRestartCount(j *batchv1.Job) (int32, error) { // ToJobStatus returns a job status for provided parameters. func ToJobStatus(progress float64, errMsg string, jobStatus batchv1.JobConditionType) *drivers.JobStatus { - if len(errMsg) > 0 { - return &drivers.JobStatus{ - State: drivers.JobStateFailed, - Reason: errMsg, - Status: jobStatus, - } - } - if drivers.IsTransferCompleted(progress) { return &drivers.JobStatus{ State: drivers.JobStateCompleted, ProgressPercents: progress, Status: jobStatus, + Reason: errMsg, + } + } + + if len(errMsg) > 0 { + return &drivers.JobStatus{ + State: drivers.JobStateFailed, + Reason: errMsg, + Status: jobStatus, } } @@ -215,13 +245,30 @@ func GetConfigValue(cm, ns, key string) string { ns, ) if err != nil { - logrus.Warnf("Failed in getting value for key [%v] from configmap[%v]", key, kdmpConfig) + logrus.Warnf("Failed in getting value for key [%v] from configmap[%v]", key, KdmpConfig) // try reading from the Env variable return os.Getenv(key) } return configMap.Data[key] } +// DoCleanupResource returns whether to cleanup the CRs & other resources. +func DoCleanupResource() (bool, error) { + doCleanup := true + cleanupResourceVal, err := k8sutils.GetConfigValue(KdmpConfig, defaultPXNamespace, ResourceCleanupKey) + if err != nil { + logrus.Errorf("Failed to get %s key from kdmp-config-map: %v", ResourceCleanupKey, err) + return true, err + } + if cleanupResourceVal != "" { + doCleanup, err = strconv.ParseBool(cleanupResourceVal) + if err != nil { + return true, err + } + } + return doCleanup, nil +} + // ResticExecutorImage returns a docker image that contains resticexecutor binary. func ResticExecutorImage() string { if customImage := strings.TrimSpace(os.Getenv(drivers.ResticExecutorImageKey)); customImage != "" { @@ -255,7 +302,66 @@ func GetImageRegistryFromDeployment(name, namespace string) (string, string, err return registry, "", nil } +// GetExecutorImageAndSecret returns the image name and secret to use in the job pod +func GetExecutorImageAndSecret(executorImageType, deploymentName, deploymentNs, + jobName string, jobOption drivers.JobOpts) (string, string, error) { + var imageRegistry, imageRegistrySecret string + var err error + if executorImageType == drivers.KopiaExecutorImage { + if len(os.Getenv(kopiaExecutorImageRegistryEnvVar)) != 0 { + imageRegistry = os.Getenv(kopiaExecutorImageRegistryEnvVar) + imageRegistrySecret = os.Getenv(kopiaExecutorImageRegistrySecretEnvVar) + } + } else if executorImageType == drivers.NfsExecutorImage { + if len(os.Getenv(NfsExecutorImageRegistryEnvVar)) != 0 { + imageRegistry = os.Getenv(NfsExecutorImageRegistryEnvVar) + imageRegistrySecret = os.Getenv(NfsExecutorImageRegistrySecretEnvVar) + } + } + // Still we didn't get image registry from environment variable + if imageRegistry == "" { + imageRegistry, imageRegistrySecret, err = GetImageRegistryFromDeployment(deploymentName, deploymentNs) + if err != nil { + logrus.Errorf("GetExecutorImageRegistryAndSecret: error in getting image registory from %v:%v deployment", deploymentNs, deploymentName) + return "", "", err + } + } + if len(imageRegistrySecret) != 0 { + var secretSourceNs string + if executorImageType == drivers.NfsExecutorImage { + secretSourceNs = jobOption.NfsImageExecutorSourceNs + } else { + secretSourceNs = jobOption.KopiaImageExecutorSourceNs + } + err = CreateImageRegistrySecret(imageRegistrySecret, jobName, secretSourceNs, jobOption.Namespace) + if err != nil { + return "", "", err + } + } + // TODO Need to be optimized.. too many if else .. :-) + var ExecutorImage string + if len(imageRegistry) != 0 { + if executorImageType == drivers.KopiaExecutorImage { + ExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, GetKopiaExecutorImageName()) + } else if executorImageType == drivers.NfsExecutorImage { + ExecutorImage = fmt.Sprintf("%s/%s", imageRegistry, GetNfsExecutorImageName()) + } + } else { + if executorImageType == drivers.KopiaExecutorImage { + ExecutorImage = GetKopiaExecutorImageName() + } else if executorImageType == drivers.NfsExecutorImage { + ExecutorImage = GetNfsExecutorImageName() + } + } + logrus.Infof("The returned image and secret is %v %v", ExecutorImage, imageRegistrySecret) + return ExecutorImage, imageRegistrySecret, nil +} + // GetKopiaExecutorImageRegistryAndSecret - will return the kopia image registry and image secret +// TODO: This is a duplicate method of GetExecutorImage(), +// but in CSI_snapshotter code we don't have jobOption passed, hence we are keeping this intact for now +// because anyway we are deferring changes to CSI to later point in time. At that time we will remove this function. +// by passing "nil" to jobOption from csi snapshotter path. func GetKopiaExecutorImageRegistryAndSecret(source, sourceNs string) (string, string, error) { var registry, registrySecret string var err error @@ -273,11 +379,57 @@ func GetKopiaExecutorImageRegistryAndSecret(source, sourceNs string) (string, st } +// CreateNfsSecret creates the NFS secret which will be mounted by job pod and accessed accordingly +func CreateNfsSecret(secretName string, backupLocation *storkapi.BackupLocation, namespace string, labels map[string]string) error { + credentialData := make(map[string][]byte) + credentialData["type"] = []byte(backupLocation.Location.Type) + credentialData["serverAddr"] = []byte(backupLocation.Location.NfsConfig.ServerAddr) + credentialData["password"] = []byte(backupLocation.Location.RepositoryPassword) + credentialData["path"] = []byte(backupLocation.Location.Path) + credentialData["subPath"] = []byte(backupLocation.Location.NfsConfig.SubPath) + + err := CreateJobSecret(secretName, namespace, credentialData, labels) + + return err +} + +// CreateJobSecret creates a job secret resource in k8s +func CreateJobSecret( + secretName string, + namespace string, + credentialData map[string][]byte, + labels map[string]string, +) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + Labels: labels, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Data: credentialData, + Type: corev1.SecretTypeOpaque, + } + _, err := core.Instance().CreateSecret(secret) + if err != nil && apierrors.IsAlreadyExists(err) { + return nil + } + + return err +} + // GetKopiaExecutorImageName - will return the default kopia executor image func GetKopiaExecutorImageName() string { return strings.Join([]string{drivers.KopiaExecutorImage, version.Get().GitVersion}, ":") } +// GetNfsExecutorImageName - will return the default nfs executor image +func GetNfsExecutorImageName() string { + return strings.Join([]string{drivers.NfsExecutorImage, version.Get().GitVersion}, ":") +} + // RsyncImage returns a docker image that contains rsync binary. func RsyncImage() string { if customImage := strings.TrimSpace(os.Getenv(drivers.RsyncImageKey)); customImage != "" { @@ -339,6 +491,31 @@ func KopiaResourceRequirements(configMap, ns string) (corev1.ResourceRequirement return toResourceRequirements(requestCPU, requestMem, limitCPU, limitMem) } +// NFSResourceRequirements returns ResourceRequirements for the kopiaexecutor container. +func NFSResourceRequirements(configMap, ns string) (corev1.ResourceRequirements, error) { + requestCPU := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorRequestCPU)) + if requestCPU == "" { + requestCPU = drivers.DefaultNFSExecutorRequestCPU + } + + requestMem := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorRequestMemory)) + if requestMem == "" { + requestMem = drivers.DefaultNFSExecutorRequestMemory + } + + limitCPU := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorLimitCPU)) + if limitCPU == "" { + limitCPU = drivers.DefaultNFSExecutorLimitCPU + } + + limitMem := strings.TrimSpace(GetConfigValue(configMap, ns, drivers.NFSExecutorLimitMemory)) + if limitMem == "" { + limitMem = drivers.DefaultNFSExecutorLimitMemory + } + + return toResourceRequirements(requestCPU, requestMem, limitCPU, limitMem) +} + // ResticResourceRequirements returns JobResourceRequirements for the executor container. func ResticResourceRequirements() (corev1.ResourceRequirements, error) { requestCPU := drivers.DefaultResticExecutorRequestCPU @@ -429,6 +606,10 @@ func CreateImageRegistrySecret(sourceName, destName, sourceNamespace, destNamesp // and create one in the current job's namespace secret, err := core.Instance().GetSecret(sourceName, sourceNamespace) if err != nil { + // Safely exit if image registry secret is not found. + if apierrors.IsNotFound(err) { + return nil + } logrus.Errorf("failed in getting secret [%v/%v]: %v", sourceNamespace, sourceName, err) return err } @@ -451,17 +632,213 @@ func CreateImageRegistrySecret(sourceName, destName, sourceNamespace, destNamesp return nil } -//GetCredSecretName - get credential secret name +// GetCredSecretName - get credential secret name func GetCredSecretName(name string) string { return CredSecret + "-" + name } -//GetImageSecretName - get image secret name +// GetImageSecretName - get image secret name func GetImageSecretName(name string) string { return ImageSecret + "-" + name } -//GetCertSecretName - get cert secret name +// GetCertSecretName - get cert secret name func GetCertSecretName(name string) string { return CertSecret + "-" + name } + +// CreateNfsPv - Create a persistent volume for NFS specific jobs +func CreateNfsPv(pvName string, + nfsServerAddr string, + nfsExportDir string, + nfsMountOption string) error { + + fn := "CreateNfsPv" + // Let's Create PV & PVC before creating JOB + pv := &corev1.PersistentVolume{ + TypeMeta: metav1.TypeMeta{Kind: "PersistentVolume"}, + ObjectMeta: metav1.ObjectMeta{ + Name: pvName, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Spec: corev1.PersistentVolumeSpec{ + // Setting it to empty stringm so that default storage class will not selected. + StorageClassName: "", + AccessModes: []corev1.PersistentVolumeAccessMode{ + "ReadWriteMany", + }, + Capacity: corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(nfsVolumeSize), + }, + PersistentVolumeSource: corev1.PersistentVolumeSource{ + NFS: &corev1.NFSVolumeSource{ + Server: nfsServerAddr, + Path: nfsExportDir, + ReadOnly: false, + }, + }, + MountOptions: []string{nfsMountOption}, + }, + } + + if _, err := core.Instance().CreatePersistentVolume(pv); err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of pv name [%s] failed: %v", pvName, err) + logrus.Errorf("%s: %v", fn, errMsg) + return fmt.Errorf(errMsg) + } + + // wait for pv to be available + _, err := WaitForPVAvailable(pvName) + if err != nil { + return err + } + + return nil +} + +// CreateNfsPvc - Create a persistent volume claim for NFS specific jobs +func CreateNfsPvc(pvcName string, pvName string, namespace string) error { + fn := "CreateNfsPvc" + empttyStorageClass := "" + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + Annotations: map[string]string{ + SkipResourceAnnotation: "true", + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + // Setting it to empty stringm so that default storage class will not selected. + StorageClassName: &empttyStorageClass, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceName(corev1.ResourceStorage): resource.MustParse(nfsVolumeSize), + }, + }, + VolumeName: pvName, + }, + } + + _, err := core.Instance().CreatePersistentVolumeClaim(pvc) + if err != nil && !apierrors.IsAlreadyExists(err) { + errMsg := fmt.Sprintf("creation of pvc name [%s] failed: %v", pvcName, err) + logrus.Errorf("%s: %v", fn, errMsg) + return fmt.Errorf(errMsg) + } + + // wait for pvc to get bound + _, err = WaitForPVCBound(pvcName, namespace) + if err != nil { + return err + } + + return nil +} + +// CreateNFSPvPvcForJob - this function creates PV and PVC for NFS job. +func CreateNFSPvPvcForJob(jobName string, namespace string, o drivers.JobOpts) error { + // create PV before creating job + nfsPvName := GetPvNameForJob(jobName) + if err := CreateNfsPv(nfsPvName, o.NfsServer, o.NfsExportDir, o.NfsMountOption); err != nil { + return err + } + logrus.Debugf("Created NFS PV successfully %s", nfsPvName) + // create pvc before creating job + nfsPvcName := GetPvcNameForJob(jobName) + if err := CreateNfsPvc(nfsPvcName, nfsPvName, namespace); err != nil { + return err + } + logrus.Debugf("Created NFS PVC successfully %s/%s", namespace, nfsPvcName) + return nil +} + +// WaitForPVCBound - This function makes the flow wait till the PVC moves to Bound state else returns timeout error. +func WaitForPVCBound(pvcName string, namespace string) (*corev1.PersistentVolumeClaim, error) { + if namespace == "" { + return nil, fmt.Errorf("namespace has to be set") + } + // wait for pvc to get bound + var pvc *corev1.PersistentVolumeClaim + var err error + var errMsg string + wErr := wait.ExponentialBackoff(volumeAPICallBackoff, func() (bool, error) { + pvc, err = core.Instance().GetPersistentVolumeClaim(pvcName, namespace) + if err != nil { + return false, err + } + + if pvc.Status.Phase != corev1.ClaimBound { + errMsg = fmt.Sprintf("nfs pvc status: expected %s, got %s for pvc %s/%s", corev1.ClaimBound, pvc.Status.Phase, namespace, pvcName) + logrus.Debugf("%v", errMsg) + return false, nil + } + + return true, nil + }) + + if wErr != nil { + logrus.Errorf("%v", wErr) + return nil, fmt.Errorf("%s:%s", wErr, errMsg) + } + return pvc, nil +} + +// WaitForPVAvailable - This function makes the flow wait till the PV becomes available else returns timeout error. +func WaitForPVAvailable(pvName string) (*corev1.PersistentVolume, error) { + // wait for pv to be available + var pv *corev1.PersistentVolume + var err error + var errMsg string + wErr := wait.ExponentialBackoff(volumeAPICallBackoff, func() (bool, error) { + pv, err = core.Instance().GetPersistentVolume(pvName) + if err != nil { + return false, err + } + // If the pv is not Available state or not Bound state, wait for it. + if !(pv.Status.Phase == corev1.VolumeAvailable || pv.Status.Phase == corev1.VolumeBound) { + errMsg = fmt.Sprintf("nfs pv [%v] status: expected %s, got %s", pvName, corev1.VolumeAvailable, pv.Status.Phase) + logrus.Debugf("%v", errMsg) + return false, nil + } + + return true, nil + }) + + if wErr != nil { + logrus.Errorf("%v:%v", wErr, errMsg) + return nil, fmt.Errorf("%s:%s", wErr, errMsg) + } + return pv, nil +} + +// GetPvcNameForJob - returns the PVC name for a job +func GetPvcNameForJob(jobName string) string { + return "pvc-" + jobName +} + +// GetPvNameForJob - returns pv name for a job +func GetPvNameForJob(jobName string) string { + return "pv-" + jobName +} + +// GetTolerationsFromDeployment - extract tolerations from deployment spec +func GetTolerationsFromDeployment(name, namespace string) ([]corev1.Toleration, error) { + deploy, err := apps.Instance().GetDeployment(name, namespace) + if err != nil { + return nil, err + } + return deploy.Spec.Template.Spec.Tolerations, nil +} + +// GetNodeAffinityFromDeployment - extract NodeAffinity from deployment spec +func GetNodeAffinityFromDeployment(name, namespace string) (*corev1.NodeAffinity, error) { + deploy, err := apps.Instance().GetDeployment(name, namespace) + if err != nil { + return nil, err + } + return deploy.Spec.Template.Spec.Affinity.NodeAffinity, nil +} diff --git a/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go b/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go index abd9e11d43..eea8956a27 100644 --- a/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go +++ b/vendor/github.com/portworx/kdmp/pkg/jobratelimit/jobratelimit.go @@ -48,6 +48,8 @@ func getJobLimitConfigmapKey(driverName string) (string, error) { return RestoreJobLimitKey, nil case drivers.KopiaDelete: return DeleteJobLimitKey, nil + case drivers.NFSDelete: + return DeleteJobLimitKey, nil case drivers.KopiaMaintenance: return MaintenanceJobLimitKey, nil default: @@ -105,6 +107,8 @@ func getDefaultJobLimit(jobType string) int { return DefaultRestoreJobLimit case drivers.KopiaDelete: return DefaultDeleteJobLimit + case drivers.NFSDelete: + return DefaultDeleteJobLimit case drivers.KopiaMaintenance: return DefaultMaintenanceJobLimit default: diff --git a/vendor/github.com/portworx/kdmp/pkg/version/version.go b/vendor/github.com/portworx/kdmp/pkg/version/version.go index 11242843d1..e488487015 100644 --- a/vendor/github.com/portworx/kdmp/pkg/version/version.go +++ b/vendor/github.com/portworx/kdmp/pkg/version/version.go @@ -24,7 +24,7 @@ const ( // // These variables typically come from -ldflags settings. var ( - gitVersion = "master" + gitVersion = "1.2.3-dev" gitCommit = "" // sha1 from git, output of $(git rev-parse HEAD) buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') kbVerRegex = regexp.MustCompile(`^(v\d+\.\d+\.\d+)(.*)`) diff --git a/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go b/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go index 2aa2ed3c9c..ec0096e8ca 100644 --- a/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go +++ b/vendor/github.com/portworx/px-object-controller/pkg/controller/controller.go @@ -19,6 +19,7 @@ import ( v1 "k8s.io/api/core/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -144,6 +145,14 @@ func New(cfg *Config) (*Controller, error) { func (ctrl *Controller) Run(workers int, stopCh chan struct{}) { ctrl.objectFactory.Start(stopCh) + informers := []cache.InformerSynced{ctrl.accessListerSynced, ctrl.bucketListerSynced} + if !cache.WaitForCacheSync(stopCh, informers...) { + logrus.Errorf("Cannot sync caches") + return + } + + ctrl.loadCaches(ctrl.bucketLister, ctrl.accessLister) + for i := 0; i < workers; i++ { go wait.Until(ctrl.bucketWorker, 0, stopCh) go wait.Until(ctrl.accessWorker, 0, stopCh) @@ -235,9 +244,7 @@ func (ctrl *Controller) processBucket(ctx context.Context, key string) error { ctx = ctrl.setupContextFromValue(ctx, bucketclaim.Status.BackendType) logrus.WithContext(ctx).Infof("deleting bucketclaim %q with driver %s", key, bucketclaim.Status.BackendType) - ctrl.deleteBucket(ctx, bucketclaim) - - return nil + return ctrl.deleteBucket(ctx, bucketclaim) } // enqueueBucketClaimWork adds bucketclaim to given work queue. @@ -375,3 +382,34 @@ func (ctrl *Controller) enqueueAccessWork(obj interface{}) { ctrl.accessQueue.Add(objName) } } + +// loadCache fills all controller caches with initial data. +// without this, the caches will be empty and not be able to process +// any new requests when the controller is restarted +func (ctrl *Controller) loadCaches(bucketLister bucketlisters.PXBucketClaimLister, accessLister bucketlisters.PXBucketAccessLister) { + bucketList, err := bucketLister.List(labels.Everything()) + if err != nil { + logrus.Errorf("Controller can't initialize caches: %v", err) + return + } + for _, bucket := range bucketList { + bucketClone := bucket.DeepCopy() + if _, err = ctrl.storeBucketUpdate(bucketClone); err != nil { + logrus.Errorf("error updating bucket cache: %v", err) + } + } + + accessList, err := accessLister.List(labels.Everything()) + if err != nil { + logrus.Errorf("Controller can't initialize caches: %v", err) + return + } + for _, access := range accessList { + accessClone := access.DeepCopy() + if _, err = ctrl.storeAccessUpdate(accessClone); err != nil { + logrus.Errorf("error updating bucket access cache: %v", err) + } + } + + logrus.Info("controller initialized for PXBucketClaims and PXBucketAccesses") +} diff --git a/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go b/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go index 00f121a62f..de85351219 100644 --- a/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go +++ b/vendor/github.com/portworx/px-object-controller/pkg/controller/operation.go @@ -3,6 +3,7 @@ package controller import ( "context" "fmt" + "strconv" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/api/server/sdk" @@ -21,6 +22,7 @@ const ( commonObjectServiceKeyPrefix = "object.portworx.io/" backendTypeKey = commonObjectServiceKeyPrefix + "backend-type" endpointKey = commonObjectServiceKeyPrefix + "endpoint" + clearBucketKey = commonObjectServiceKeyPrefix + "clear-bucket" commonObjectServiceFinalizerKeyPrefix = "finalizers.object.portworx.io/" accessGrantedFinalizer = commonObjectServiceFinalizerKeyPrefix + "access-granted" @@ -57,16 +59,27 @@ func (ctrl *Controller) deleteBucket(ctx context.Context, pbc *crdv1alpha1.PXBuc return nil } + clearBucket := false + if clearBucketVal, ok := pbc.Annotations[clearBucketKey]; ok { + var err error + clearBucket, err = strconv.ParseBool(clearBucketVal) + if err != nil { + logrus.Errorf("invalid value %s for %s, defaulting to false: %v", clearBucketVal, clearBucketKey, err) + } + } + // Provisioned and deletionPolicy is delete. Delete the bucket here. _, err := ctrl.bucketClient.DeleteBucket(ctx, &api.BucketDeleteRequest{ - BucketId: pbc.Status.BucketID, - Region: pbc.Status.Region, - Endpoint: pbc.Status.Endpoint, + BucketId: pbc.Status.BucketID, + Region: pbc.Status.Region, + Endpoint: pbc.Status.Endpoint, + ClearBucket: clearBucket, }) if err != nil { errMsg := fmt.Sprintf("delete bucket %s failed: %v", pbc.Name, err) logrus.WithContext(ctx).Errorf(errMsg) ctrl.eventRecorder.Event(pbc, v1.EventTypeWarning, "DeleteBucketError", errMsg) + return err } err = ctrl.removeBucketFinalizers(ctx, pbc) @@ -108,6 +121,12 @@ func (ctrl *Controller) createBucket(ctx context.Context, pbc *crdv1alpha1.PXBuc pbc.Status.BackendType = pbclass.Parameters[backendTypeKey] pbc.Status.Endpoint = pbclass.Parameters[endpointKey] pbc.Finalizers = append(pbc.Finalizers, bucketProvisionedFinalizer) + if pbc.Annotations == nil { + pbc.Annotations = make(map[string]string) + } + if clearBucketVal, ok := pbclass.Parameters[clearBucketKey]; ok { + pbc.Annotations[clearBucketKey] = clearBucketVal + } pbc, err = ctrl.k8sBucketClient.ObjectV1alpha1().PXBucketClaims(pbc.Namespace).Update(ctx, pbc, metav1.UpdateOptions{}) if err != nil { ctrl.eventRecorder.Event(pbc, v1.EventTypeWarning, "CreateBucketError", fmt.Sprintf("failed to update bucket: %v", err)) @@ -147,8 +166,8 @@ func (ctrl *Controller) setupContextFromClass(ctx context.Context, pbclass *crdv return grpcserver.AddMetadataToContext(ctx, sdk.ContextDriverKey, backendTypeValue), nil } -func getAccountName(pbclass *crdv1alpha1.PXBucketClass) string { - return fmt.Sprintf("px-os-account-%v", pbclass.ObjectMeta.UID) +func getAccountName(namespace *v1.Namespace) string { + return fmt.Sprintf("px-os-account-%v", namespace.GetUID()) } func getCredentialsSecretName(pba *crdv1alpha1.PXBucketAccess) string { @@ -159,9 +178,18 @@ func getCredentialsSecretName(pba *crdv1alpha1.PXBucketAccess) string { } func (ctrl *Controller) createAccess(ctx context.Context, pba *crdv1alpha1.PXBucketAccess, pbclass *crdv1alpha1.PXBucketClass, bucketID string) error { + // Get namespace UID for multitenancy + namespace, err := ctrl.k8sClient.CoreV1().Namespaces().Get(ctx, pba.Namespace, metav1.GetOptions{}) + if err != nil { + errMsg := fmt.Sprintf("failed to get namespace during grant bucket access %s: %v", pba.Name, err) + logrus.WithContext(ctx).Errorf(errMsg) + ctrl.eventRecorder.Event(pba, v1.EventTypeWarning, "GrantAccessError", errMsg) + return err + } + resp, err := ctrl.bucketClient.AccessBucket(ctx, &api.BucketGrantAccessRequest{ BucketId: bucketID, - AccountName: getAccountName(pbclass), + AccountName: getAccountName(namespace), }) if err != nil { errMsg := fmt.Sprintf("create bucket access %s failed: %v", pba.Name, err) diff --git a/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go b/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go index 6478ea09c5..edaf7338b6 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go +++ b/vendor/github.com/portworx/sched-ops/k8s/admissionregistration/admissionregistration.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" apiadmissionsclientv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" apiadmissionsclient "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" @@ -146,7 +147,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.admission, err = apiadmissionsclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go b/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go index 3704406441..b26a6f29c0 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go +++ b/vendor/github.com/portworx/sched-ops/k8s/apiextensions/apiextensions.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" @@ -139,7 +140,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.extension, err = apiextensionsclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go b/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go index 33bfedb0c4..8d09ad38a4 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go +++ b/vendor/github.com/portworx/sched-ops/k8s/apps/apps.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" @@ -153,7 +154,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.apps, err = appsv1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go b/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go index ef843b3e8d..a5e60ef919 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go +++ b/vendor/github.com/portworx/sched-ops/k8s/autopilot/autopilot.go @@ -6,6 +6,7 @@ import ( "sync" autopilotclientset "github.com/libopenstorage/autopilot-api/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -136,7 +137,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.autopilot, err = autopilotclientset.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go b/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go index a0fc2490ef..0c90ce452c 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go +++ b/vendor/github.com/portworx/sched-ops/k8s/batch/batch.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" batchv1beta1client "k8s.io/client-go/kubernetes/typed/batch/v1beta1" @@ -144,7 +145,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.batch, err = batchv1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/common/utils.go b/vendor/github.com/portworx/sched-ops/k8s/common/utils.go new file mode 100644 index 0000000000..9f56023ba2 --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/common/utils.go @@ -0,0 +1,32 @@ +package common + +import ( + "fmt" + "os" + "strconv" + + "k8s.io/client-go/rest" +) + +const ( + QPSRate = "KUBERNETES_OPS_QPS_RATE" + BurstRate = "KUBERNETES_OPS_BURST_RATE" +) + +func SetRateLimiter(config *rest.Config) error { + if val := os.Getenv(QPSRate); val != "" { + qps, err := strconv.Atoi(val) + if err != nil { + return fmt.Errorf("invalid qps count specified %v: %v", val, err) + } + config.QPS = float32(qps) + } + if val := os.Getenv(BurstRate); val != "" { + burst, err := strconv.Atoi(val) + if err != nil { + return fmt.Errorf("invalid burst count specified %v: %v", val, err) + } + config.Burst = int(burst) + } + return nil +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/core.go b/vendor/github.com/portworx/sched-ops/k8s/core/core.go index 30eb90acf5..f8472aa237 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/core.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/core.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/portworx/sched-ops/k8s/common" "github.com/portworx/sched-ops/task" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" @@ -23,6 +24,7 @@ import ( const ( masterLabelKey = "node-role.kubernetes.io/master" controlplaneLabelKey = "node-role.kubernetes.io/controlplane" + controlDashPlaneLabelKey = "node-role.kubernetes.io/control-plane" pvcStorageProvisionerKey = "volume.beta.kubernetes.io/storage-provisioner" labelUpdateMaxRetries = 5 ) @@ -203,7 +205,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kubernetes, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go b/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go index 55fd5806f7..e266dff2f0 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/nodes.go @@ -138,7 +138,8 @@ func (c *Client) IsNodeMaster(node corev1.Node) bool { // for newer k8s these fields exist but they are empty _, hasMasterLabel := node.Labels[masterLabelKey] _, hasControlPlaneLabel := node.Labels[controlplaneLabelKey] - if hasMasterLabel || hasControlPlaneLabel { + _, hasControlDashPlaneLabel := node.Labels[controlDashPlaneLabelKey] + if hasMasterLabel || hasControlPlaneLabel || hasControlDashPlaneLabel { return true } return false diff --git a/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go b/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go index 5605f75940..c958635f42 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go +++ b/vendor/github.com/portworx/sched-ops/k8s/dynamic/dynamic.go @@ -7,6 +7,7 @@ import ( "strings" "sync" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -223,7 +224,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.client, err = dynamic.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go b/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go index b4b13c5bd0..81a0ce8bb1 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go +++ b/vendor/github.com/portworx/sched-ops/k8s/externalsnapshotter/externalsnapshotter.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -138,7 +139,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.client, err = v1beta1.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go b/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go index d038ce200c..4d4a232114 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go +++ b/vendor/github.com/portworx/sched-ops/k8s/externalstorage/externalstorage.go @@ -6,6 +6,7 @@ import ( "sync" snapclient "github.com/kubernetes-incubator/external-storage/snapshot/pkg/client" + "github.com/portworx/sched-ops/k8s/common" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" ) @@ -135,7 +136,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.snap, _, err = snapclient.NewClient(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go b/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go index 449c66372c..d822497826 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go +++ b/vendor/github.com/portworx/sched-ops/k8s/kdmp/kdmp.go @@ -6,6 +6,7 @@ import ( "sync" kdmpclientset "github.com/portworx/kdmp/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -25,7 +26,9 @@ type Ops interface { VolumeBackupOps VolumeBackupDeleteOps BackupLocationMaintenanceOps - + ResourceExportOps + ResourceBackupOps + // SetConfig sets the config and resets the client SetConfig(config *rest.Config) } @@ -152,7 +155,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kube, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourcebackup.go b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourcebackup.go new file mode 100644 index 0000000000..76d6b9ae1a --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourcebackup.go @@ -0,0 +1,64 @@ +package kdmp + +import ( + "context" + + kdmpv1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResourceBackupOps is an interface to perform k8s ResourceExport CR crud operations +type ResourceBackupOps interface { + // CreateResourceExport creates the ResourceExport CR + CreateResourceBackup(*kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) + // GetResourceBackup gets the ResourceBackup CR + GetResourceBackup(string, string) (*kdmpv1alpha1.ResourceBackup, error) + // ListResourceBackup lists all the ResourceBackup CRs + ListResourceBackup(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceBackupList, error) + // UpdateResourceBackup updates the ResourceBackup CR + UpdateResourceBackup(*kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) + // DeleteResourceBackup deletes the ResourceBackup CR + DeleteResourceBackup(string, string) error +} + +// CreateResourceBackup creates the ResourceBackup CR +func (c *Client) CreateResourceBackup(backup *kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(backup.Namespace).Create(context.TODO(), backup, metav1.CreateOptions{}) +} + +// GetResourceBackup gets the ResourceBackup CR +func (c *Client) GetResourceBackup(name, namespace string) (*kdmpv1alpha1.ResourceBackup, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} + +// ListResourceBackup lists all the ResourceBackup CR +func (c *Client) ListResourceBackup(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceBackupList, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(namespace).List(context.TODO(), filterOptions) +} + +// DeleteResourceBackup deletes the ResourceBackup CR +func (c *Client) DeleteResourceBackup(name string, namespace string) error { + if err := c.initClient(); err != nil { + return err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + PropagationPolicy: &deleteForegroundPolicy, + }) +} + +// UpdateResourceBackup deletes the ResourceBackup CR +func (c *Client) UpdateResourceBackup(backup *kdmpv1alpha1.ResourceBackup) (*kdmpv1alpha1.ResourceBackup, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceBackups(backup.Namespace).Update(context.TODO(), backup, metav1.UpdateOptions{}) +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourceexport.go b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourceexport.go new file mode 100644 index 0000000000..1a5610c0a3 --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/kdmp/resourceexport.go @@ -0,0 +1,64 @@ +package kdmp + +import ( + "context" + + kdmpv1alpha1 "github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResourceExportOps is an interface to perform k8s ResourceExport CR crud operations +type ResourceExportOps interface { + // CreateResourceExport creates the ResourceExport CR + CreateResourceExport(*kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) + // GetResourceExport gets the ResourceExport CR + GetResourceExport(string, string) (*kdmpv1alpha1.ResourceExport, error) + // ListResourceExport lists all the ResourceExport CRs + ListResourceExport(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceExportList, error) + // UpdateResourceExport updates the ResourceExport CR + UpdateResourceExport(*kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) + // DeleteResourceExport deletes the ResourceExport CR + DeleteResourceExport(string, string) error +} + +// CreateResourceExport creates the ResourceExport CR +func (c *Client) CreateResourceExport(export *kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(export.Namespace).Create(context.TODO(), export, metav1.CreateOptions{}) +} + +// GetResourceExport gets the ResourceExport CR +func (c *Client) GetResourceExport(name, namespace string) (*kdmpv1alpha1.ResourceExport, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(namespace).Get(context.TODO(), name, metav1.GetOptions{}) +} + +// ListResourceExport lists all the ResourceExport CR +func (c *Client) ListResourceExport(namespace string, filterOptions metav1.ListOptions) (*kdmpv1alpha1.ResourceExportList, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(namespace).List(context.TODO(), filterOptions) +} + +// DeleteResourceExport deletes the ResourceExport CR +func (c *Client) DeleteResourceExport(name string, namespace string) error { + if err := c.initClient(); err != nil { + return err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + PropagationPolicy: &deleteForegroundPolicy, + }) +} + +// UpdateResourceExport deletes the ResourceExport CR +func (c *Client) UpdateResourceExport(export *kdmpv1alpha1.ResourceExport) (*kdmpv1alpha1.ResourceExport, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kdmp.KdmpV1alpha1().ResourceExports(export.Namespace).Update(context.TODO(), export, metav1.UpdateOptions{}) +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go b/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go index 3cdb6205e2..0a40740bb1 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go +++ b/vendor/github.com/portworx/sched-ops/k8s/networking/networking.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" networkingv1betaclient "k8s.io/client-go/kubernetes/typed/networking/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -135,7 +136,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.networking, err = networkingv1betaclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go b/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go index 8aeb1a561e..7c165a78b3 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go +++ b/vendor/github.com/portworx/sched-ops/k8s/openshift/openshift.go @@ -8,6 +8,7 @@ import ( ocpclientset "github.com/openshift/client-go/apps/clientset/versioned" ocpconfigclientset "github.com/openshift/client-go/config/clientset/versioned" ocpsecurityclientset "github.com/openshift/client-go/security/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -172,7 +173,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kube, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go b/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go index 5106e1ef01..84704a1560 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go +++ b/vendor/github.com/portworx/sched-ops/k8s/operator/operator.go @@ -6,6 +6,7 @@ import ( "sync" ostclientset "github.com/libopenstorage/operator/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -139,7 +140,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.ost, err = ostclientset.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go b/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go index 46b3a5d4ca..ee188c5218 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go +++ b/vendor/github.com/portworx/sched-ops/k8s/policy/policy.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" policyv1beta1client "k8s.io/client-go/kubernetes/typed/policy/v1beta1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -136,7 +137,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.policy, err = policyv1beta1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go b/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go index b88a98a52a..4d8b42d557 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go +++ b/vendor/github.com/portworx/sched-ops/k8s/prometheus/prometheus.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" prometheusclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" @@ -143,7 +144,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.prometheus, err = prometheusclient.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go b/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go index 1b1784a3f8..aac939c6fb 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go +++ b/vendor/github.com/portworx/sched-ops/k8s/rbac/rbac.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" @@ -141,7 +142,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.rbac, err = rbacv1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go b/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go index f62584b4cc..740853443e 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go +++ b/vendor/github.com/portworx/sched-ops/k8s/storage/storage.go @@ -5,6 +5,7 @@ import ( "os" "sync" + "github.com/portworx/sched-ops/k8s/common" storagev1client "k8s.io/client-go/kubernetes/typed/storage/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -136,6 +137,10 @@ func (c *Client) loadClient() error { } var err error + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.storage, err = storagev1client.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/portworx/sched-ops/k8s/stork/resourcetransformation.go b/vendor/github.com/portworx/sched-ops/k8s/stork/resourcetransformation.go new file mode 100644 index 0000000000..22ef5bfcb3 --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/stork/resourcetransformation.go @@ -0,0 +1,113 @@ +package stork + +import ( + "context" + "fmt" + "time" + + storkv1alpha1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" + "github.com/portworx/sched-ops/k8s/errors" + "github.com/portworx/sched-ops/task" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResourceTransformOps is an interface to perform k8s ResourceTransformOps operations +type ResourceTransformOps interface { + // CreateResourceTransformation creates the ResourceTransformation + CreateResourceTransformation(*storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) + // GetResourceTransformation gets the ResourceTransformation + GetResourceTransformation(string, string) (*storkv1alpha1.ResourceTransformation, error) + // ListResourceTransformations lists all the ResourceTransformations + ListResourceTransformations(namespace string, filterOptions metav1.ListOptions) (*storkv1alpha1.ResourceTransformationList, error) + // UpdateResourceTransformation updates the ResourceTransformation + UpdateResourceTransformation(*storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) + // DeleteResourceTransformation deletes the ResourceTransformation + DeleteResourceTransformation(string, string) error + // ValidateResourceTransformation validates resource transformation status + ValidateResourceTransformation(string, string, time.Duration, time.Duration) error +} + +// CreateResourceTransformation creates the ResourceTransformation CR +func (c *Client) CreateResourceTransformation(ResourceTransformation *storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.stork.StorkV1alpha1().ResourceTransformations(ResourceTransformation.Namespace).Create(context.TODO(), ResourceTransformation, metav1.CreateOptions{}) +} + +// GetResourceTransformation gets the ResourceTransformation CR +func (c *Client) GetResourceTransformation(name string, namespace string) (*storkv1alpha1.ResourceTransformation, error) { + if err := c.initClient(); err != nil { + return nil, err + } + ResourceTransformation, err := c.stork.StorkV1alpha1().ResourceTransformations(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return ResourceTransformation, nil +} + +// ListResourceTransformations lists all the ResourceTransformations CR +func (c *Client) ListResourceTransformations(namespace string, filterOptions metav1.ListOptions) (*storkv1alpha1.ResourceTransformationList, error) { + if err := c.initClient(); err != nil { + return nil, err + } + ResourceTransformations, err := c.stork.StorkV1alpha1().ResourceTransformations(namespace).List(context.TODO(), filterOptions) + if err != nil { + return nil, err + } + return ResourceTransformations, nil +} + +// UpdateResourceTransformation updates the ResourceTransformation CR +func (c *Client) UpdateResourceTransformation(ResourceTransformation *storkv1alpha1.ResourceTransformation) (*storkv1alpha1.ResourceTransformation, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.stork.StorkV1alpha1().ResourceTransformations(ResourceTransformation.Namespace).Update(context.TODO(), ResourceTransformation, metav1.UpdateOptions{}) +} + +// DeleteResourceTransformation deletes the ResourceTransformation CR +func (c *Client) DeleteResourceTransformation(name string, namespace string) error { + if err := c.initClient(); err != nil { + return err + } + return c.stork.StorkV1alpha1().ResourceTransformations(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{ + PropagationPolicy: &deleteForegroundPolicy, + }) +} + +// ValidateResourceTransformation validates ResourceTransformation CR status +func (c *Client) ValidateResourceTransformation(name string, namespace string, timeout, retryInterval time.Duration) error { + if err := c.initClient(); err != nil { + return err + } + t := func() (interface{}, bool, error) { + transform, err := c.GetResourceTransformation(name, namespace) + if err != nil { + return "", true, err + } + + if transform.Status.Status == storkv1alpha1.ResourceTransformationStatusReady { + return "", false, nil + } else if transform.Status.Status == storkv1alpha1.ResourceTransformationStatusFailed { + return "", true, &errors.ErrFailedToValidateCustomSpec{ + Name: name, + Cause: fmt.Sprintf("Status: %v \t Resource Spec: %v", transform.Status.Status, transform.Status.Resources), + Type: transform, + } + } + + return "", true, &errors.ErrFailedToValidateCustomSpec{ + Name: name, + Cause: fmt.Sprintf("Status: %v", transform.Status.Status), + Type: transform, + } + } + + if _, err := task.DoRetryWithTimeout(t, timeout, retryInterval); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go b/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go index 61e1127dbb..ec58e8465b 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go +++ b/vendor/github.com/portworx/sched-ops/k8s/stork/stork.go @@ -10,6 +10,7 @@ import ( snapclient "github.com/kubernetes-incubator/external-storage/snapshot/pkg/client" storkv1 "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" storkclientset "github.com/libopenstorage/stork/pkg/client/clientset/versioned" + "github.com/portworx/sched-ops/k8s/common" "github.com/portworx/sched-ops/task" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,6 +43,7 @@ type Ops interface { ApplicationCloneOps VolumeSnapshotRestoreOps ApplicationRegistrationOps + ResourceTransformOps // SetConfig sets the config and resets the client SetConfig(config *rest.Config) @@ -174,7 +176,10 @@ func (c *Client) loadClient() error { } var err error - + err = common.SetRateLimiter(c.config) + if err != nil { + return err + } c.kube, err = kubernetes.NewForConfig(c.config) if err != nil { return err diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go new file mode 100644 index 0000000000..c4d0f5c35b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package collectors provides implementations of prometheus.Collector to +// conveniently collect process and Go-related metrics. +package collectors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go new file mode 100644 index 0000000000..e09f149d76 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go @@ -0,0 +1,119 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +type dbStatsCollector struct { + db *sql.DB + + maxOpenConnections *prometheus.Desc + + openConnections *prometheus.Desc + inUseConnections *prometheus.Desc + idleConnections *prometheus.Desc + + waitCount *prometheus.Desc + waitDuration *prometheus.Desc + maxIdleClosed *prometheus.Desc + maxIdleTimeClosed *prometheus.Desc + maxLifetimeClosed *prometheus.Desc +} + +// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. +// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. +func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { + fqName := func(name string) string { + return "go_sql_" + name + } + return &dbStatsCollector{ + db: db, + maxOpenConnections: prometheus.NewDesc( + fqName("max_open_connections"), + "Maximum number of open connections to the database.", + nil, prometheus.Labels{"db_name": dbName}, + ), + openConnections: prometheus.NewDesc( + fqName("open_connections"), + "The number of established connections both in use and idle.", + nil, prometheus.Labels{"db_name": dbName}, + ), + inUseConnections: prometheus.NewDesc( + fqName("in_use_connections"), + "The number of connections currently in use.", + nil, prometheus.Labels{"db_name": dbName}, + ), + idleConnections: prometheus.NewDesc( + fqName("idle_connections"), + "The number of idle connections.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitCount: prometheus.NewDesc( + fqName("wait_count_total"), + "The total number of connections waited for.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitDuration: prometheus.NewDesc( + fqName("wait_duration_seconds_total"), + "The total time blocked waiting for a new connection.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleClosed: prometheus.NewDesc( + fqName("max_idle_closed_total"), + "The total number of connections closed due to SetMaxIdleConns.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleTimeClosed: prometheus.NewDesc( + fqName("max_idle_time_closed_total"), + "The total number of connections closed due to SetConnMaxIdleTime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxLifetimeClosed: prometheus.NewDesc( + fqName("max_lifetime_closed_total"), + "The total number of connections closed due to SetConnMaxLifetime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + } +} + +// Describe implements Collector. +func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.maxOpenConnections + ch <- c.openConnections + ch <- c.inUseConnections + ch <- c.idleConnections + ch <- c.waitCount + ch <- c.waitDuration + ch <- c.maxIdleClosed + ch <- c.maxLifetimeClosed + c.describeNewInGo115(ch) +} + +// Collect implements Collector. +func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { + stats := c.db.Stats() + ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) + ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) + ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) + ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) + ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) + ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) + ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) + ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) + c.collectNewInGo115(ch, stats) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go new file mode 100644 index 0000000000..a6e6268ce3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_go115.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) { + ch <- c.maxIdleTimeClosed +} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) { + ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go new file mode 100644 index 0000000000..0568affe29 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector_pre_go115.go @@ -0,0 +1,26 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.15 + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) {} + +func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) {} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go new file mode 100644 index 0000000000..3aa8d0590b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -0,0 +1,57 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewExpvarCollector returns a newly allocated expvar Collector. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewExpvarCollector(exports) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go new file mode 100644 index 0000000000..edaa4e50b7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector.go @@ -0,0 +1,69 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. +// +// NOTE: The problem is solved in Go 1.15, see +// https://github.com/golang/go/issues/19812 for the related Go issue. +func NewGoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewGoCollector() +} + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewBuildInfoCollector() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go new file mode 100644 index 0000000000..24558f50a7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ + PidFn: opts.PidFn, + Namespace: opts.Namespace, + ReportErrors: opts.ReportErrors, + }) +} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index a98fe77827..0cfcc8463c 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -145,7 +145,6 @@ func (r *Reservation) DelayFrom(now time.Time) time.Duration { // Cancel is shorthand for CancelAt(time.Now()). func (r *Reservation) Cancel() { r.CancelAt(time.Now()) - return } // CancelAt indicates that the reservation holder will not perform the reserved action @@ -186,8 +185,6 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.lastEvent = prevEvent } } - - return } // Reserve is shorthand for ReserveN(time.Now(), 1). @@ -367,20 +364,13 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, last = now } - // Avoid making delta overflow below when last is very old. - maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) - elapsed := now.Sub(last) - if elapsed > maxElapsed { - elapsed = maxElapsed - } - // Calculate the new number of tokens, due to time that passed. + elapsed := now.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens } @@ -388,15 +378,11 @@ func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, // of time it takes to accumulate them at a rate of limit tokens per second. func (limit Limit) durationFromTokens(tokens float64) time.Duration { seconds := tokens / float64(limit) - return time.Nanosecond * time.Duration(1e9*seconds) + return time.Duration(float64(time.Second) * seconds) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens // which could be accumulated during that duration at a rate of limit tokens per second. func (limit Limit) tokensFromDuration(d time.Duration) float64 { - // Split the integer and fractional parts ourself to minimize rounding errors. - // See golang.org/issues/34861. - sec := float64(d/time.Second) * float64(limit) - nsec := float64(d%time.Second) * float64(limit) - return sec + nsec/1e9 + return d.Seconds() * float64(limit) } diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c768..52338d004c 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 6ff2792ee4..ae13ddac14 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -69,7 +69,9 @@ func (a *Attributes) Value(key interface{}) interface{} { // bool' is implemented for a value in the attributes, it is called to // determine if the value matches the one stored in the other attributes. If // Equal is not implemented, standard equality is used to determine if the two -// values are equal. +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451c..f7a7697cad 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -192,7 +193,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a67074a3ad..e8dfc828aa 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -45,6 +45,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -134,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 50cc9da4a9..cb4b3c203c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index fe423af182..6c3402e36c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -413,8 +413,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { // this target is sent in the first message on the stream. if gc != nil { target := lb.dialTarget - if gc.TargetName != "" { - target = gc.TargetName + if gc.ServiceName != "" { + target = gc.ServiceName } if target != lb.target { lb.target = target diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go index b4e23dee01..8942c31310 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_config.go @@ -34,7 +34,7 @@ const ( type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage - TargetName string + ServiceName string } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index 330df4baa2..dab1959418 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -228,7 +228,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { } else if bundle := lb.grpclbClientConnCreds; bundle != nil { dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) } else { - dopts = append(dopts, grpc.WithInsecure()) + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) } if lb.opt.Dialer != nil { dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) @@ -239,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea617468..b1c23eaae0 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,130 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 0000000000..a220c47c59 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 28f09dc870..0d21f2210b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -79,7 +79,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") @@ -146,6 +146,10 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + for _, opt := range extraDialOptions { + opt.apply(&cc.dopts) + } + for _, opt := range opts { opt.apply(&cc.dopts) } @@ -159,23 +163,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -281,7 +282,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -289,7 +290,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -398,7 +399,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -464,34 +465,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -536,14 +539,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -623,9 +619,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +647,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +658,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,56 +672,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -768,17 +716,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -853,16 +805,31 @@ func (ac *addrConn) connect() error { return nil } +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + // tryUpdateAddrs tries to update ac.addrs with the new addresses list. // -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// // If ac is TransientFailure, it updates ac.addrs and returns true. The updated // addresses will be picked up by retry in the next iteration after backoff. // // If ac is Shutdown or Idle, it updates ac.addrs and returns true. // +// If the addresses is the same as the old list, it does nothing and returns +// true. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. // - If true, it updates ac.addrs and returns true. The ac will keep using @@ -879,6 +846,10 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { return true } + if equalAddresses(ac.addrs, addrs) { + return true + } + if ac.state == connectivity.Connecting { return false } @@ -959,14 +930,10 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -991,35 +958,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1070,11 +1028,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1085,22 +1043,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1088,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1284,6 +1242,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() defer connClosed.Fire() + defer hcancel() if !hcStarted || hctx.Err() != nil { // We didn't start the health check or set the state to READY, so // no need to do anything else here. @@ -1294,7 +1253,6 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state, since there may be a new transport in this addrConn. return } - hcancel() ac.transport = nil // Refresh the name resolver ac.cc.resolveNow(resolver.ResolveNowOptions{}) @@ -1312,14 +1270,13 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1332,7 +1289,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) return err } return nil @@ -1497,19 +1454,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index 77d759cd95..2de2c4affd 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -24,6 +24,7 @@ import ( "sync" grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -49,7 +50,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index a02c458281..fd55176b9b 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/google/google.go b/vendor/google.golang.org/grpc/credentials/google/google.go index 63625a4b68..fbdf7dc299 100644 --- a/vendor/google.golang.org/grpc/credentials/google/google.go +++ b/vendor/google.golang.org/grpc/credentials/google/google.go @@ -50,7 +50,7 @@ func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credential ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) defer cancel() var err error - opts.PerRPCCreds, err = oauth.NewApplicationDefault(ctx) + opts.PerRPCCreds, err = newADC(ctx) if err != nil { logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) } @@ -112,6 +112,9 @@ var ( newALTS = func() credentials.TransportCredentials { return alts.NewClientCreds(alts.DefaultClientOptions()) } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } ) // NewWithMode should make a copy of Bundle, and switch mode. Modifying the diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index b8c2e8f920..e32edc0421 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -21,6 +21,7 @@ package google import ( "context" "net" + "net/url" "strings" "google.golang.org/grpc/credentials" @@ -28,12 +29,16 @@ import ( ) const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // clusterTransportCreds is a combo of TLS + ALTS. // // On the client, ClientHandshake picks TLS or ALTS based on address attributes. // - if attributes has cluster name -// - if cluster name has prefix "google_cfe_", use TLS +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS // - otherwise, use ALTS // - else, do TLS // @@ -50,18 +55,49 @@ func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clust } } -func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { chi := credentials.ClientHandshakeInfoFromContext(ctx) if chi.Attributes == nil { - return c.tls.ClientHandshake(ctx, authority, rawConn) + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false } - cn, ok := internal.GetXDSHandshakeClusterName(chi.Attributes) - if !ok || strings.HasPrefix(cn, cfeClusterNamePrefix) { - return c.tls.ClientHandshake(ctx, authority, rawConn) + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) } - // If attributes have cluster name, and cluster name is not cfe, it's a - // backend address, use ALTS. - return c.alts.ClientHandshake(ctx, authority, rawConn) + return c.tls.ClientHandshake(ctx, authority, rawConn) } func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 22a8f996a6..82bee1443b 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -18,11 +18,6 @@ // Package insecure provides an implementation of the // credentials.TransportCredentials interface which disables transport security. -// -// Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. package insecure import ( @@ -75,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 063f1e903c..75d01ba777 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,12 +20,11 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" @@ -36,6 +35,15 @@ import ( "google.golang.org/grpc/stats" ) +func init() { + internal.AddExtraDialOptions = func(opt ...DialOption) { + extraDialOptions = append(extraDialOptions, opt...) + } + internal.ClearExtraDialOptions = func() { + extraDialOptions = nil + } +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,19 +53,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -73,6 +79,8 @@ type DialOption interface { apply(*dialOptions) } +var extraDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // @@ -195,25 +203,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -272,7 +261,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { @@ -304,8 +293,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -402,7 +391,7 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) }) } @@ -498,7 +487,7 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 6d84f74c7d..18e530fc90 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -108,7 +108,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index fcffdceef2..6a760ed743 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -6,14 +6,14 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 - github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 + github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.0 + github.com/golang/protobuf v1.5.2 + github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20200822124328-c89045814202 + golang.org/x/net v0.0.0-20201021035429-f5854403a974 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd + golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.25.0 + google.golang.org/protobuf v1.27.1 ) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 8b542e0beb..5f418dba1b 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -12,8 +12,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= @@ -22,8 +22,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021 h1:fP+fF0up6oPY49OrjPrhIJ8yQfdIM85NXMLkMg1EXVs= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -40,14 +40,18 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -72,8 +76,9 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -84,10 +89,14 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4 h1:myAQVi0cGEoqQVR5POX+8RR2mrocKqNN1hmeMqhX27k= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -117,8 +126,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 34098bb8eb..7c1f664090 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -248,12 +248,12 @@ func (g *loggerT) V(l int) bool { // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf0..bb96ef57be 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 0000000000..08666f62a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb2..e3dfe204f9 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,7 +31,7 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be @@ -42,24 +42,31 @@ var binLogger Logger var grpclogLogger = grpclog.Component("binarylog") -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,57 +107,57 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } @@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error { // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return newMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602f..ab589a76bf 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb418315..24df0a1a0c 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +type methodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +61,8 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +func newMethodLogger(h, m uint64) *methodLogger { + return &methodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in methodLogger as possible. +func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *methodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index cd1807543e..777cbcd792 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 0000000000..c9a27acd37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c88..8e13a3d2ce 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154b..ad0ce4dabf 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 93522d716d..55aaeea8b4 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -26,13 +26,13 @@ import ( const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" @@ -41,7 +41,9 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) @@ -75,16 +77,25 @@ var ( // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true") + XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index d6c9e03fc4..6717b757f8 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go new file mode 100644 index 0000000000..ffa0f1ddee --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer.go @@ -0,0 +1,26 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 0000000000..e53b8ffc83 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "io/ioutil" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return ioutil.ReadFile(linuxProductNameFile) +} diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 0000000000..2d7aaaaa70 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index e6f975cbf6..30a3b4258f 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -115,12 +115,12 @@ type LoggerV2 interface { // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go index 2810a8ba2f..7a092b2b80 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -20,9 +20,12 @@ package grpcutil import "regexp" -// FullMatchWithRegex returns whether the full string matches the regex provided. -func FullMatchWithRegex(re *regexp.Regexp, string string) bool { +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } re.Longest() - rem := re.FindString(string) - return len(rem) == len(string) + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf357..83018be7c7 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -64,6 +63,76 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports interface{} // func(*grpc.Server, string) + // AddExtraServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddExtraServerOptions interface{} // func(opt ...ServerOption) + // ClearExtraServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + ClearExtraServerOptions func() + // AddExtraDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + AddExtraDialOptions interface{} // func(opt ...DialOption) + // ClearExtraDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + ClearExtraDialOptions func() + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // RegisterOutlierDetectionBalancerForTesting registers the Outlier + // Detection Balancer for testing purposes, regardless of the Outlier + // Detection environment variable. + // + // TODO: Remove this function once the Outlier Detection env var is removed. + RegisterOutlierDetectionBalancerForTesting func() + + // UnregisterOutlierDetectionBalancerForTesting unregisters the Outlier + // Detection Balancer for testing purposes. This is needed because there is + // no way to unregister the Outlier Detection Balancer after registering it + // solely for testing purposes using + // RegisterOutlierDetectionBalancerForTesting(). + // + // TODO: Remove this function once the Outlier Detection env var is removed. + UnregisterOutlierDetectionBalancerForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -86,3 +155,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf34..b2980f8ac4 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 0000000000..0177af4b51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 8394d252df..244f4b081d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -137,6 +137,7 @@ type earlyAbortStream struct { streamID uint32 contentSubtype string status *status.Status + rst bool } func (*earlyAbortStream) isTransportResponseFrame() bool { return false } @@ -786,6 +787,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { return err } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } return nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 1c3459c2b4..090120925b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -49,7 +49,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC // from inside an http.Handler. It requires that the http Server // supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { return nil, errors.New("gRPC requires HTTP/2") } @@ -138,7 +138,7 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler } func (ht *serverHandlerTransport) Close() { @@ -228,10 +228,10 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -314,10 +314,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -369,14 +369,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d3371..be371c6e0f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -90,7 +90,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -132,7 +132,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -311,7 +311,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, onPrefaceReceipt: onPrefaceReceipt, nextID: 1, @@ -341,18 +341,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -630,8 +631,8 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // the wire. However, there are two notable exceptions: // // 1. If the stream headers violate the max header list size allowed by the -// server. In this case there is no reason to retry at all, as it is -// assumed the RPC would continue to fail on subsequent attempts. +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. // 2. If the credentials errored when requesting their headers. In this case, // it's possible a retry can fix the problem, but indefinitely transparently // retrying is not appropriate as it is likely the credentials, if they can @@ -639,8 +640,7 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call type NewStreamError struct { Err error - DoNotRetry bool - DoNotTransparentRetry bool + AllowTransparentRetry bool } func (e NewStreamError) Error() string { @@ -649,11 +649,11 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, &NewStreamError{Err: err, DoNotTransparentRetry: true} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -753,13 +753,14 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea return true }, hdr) if err != nil { - return nil, &NewStreamError{Err: err} + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true} + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { @@ -767,29 +768,32 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea case <-ctx.Done(): return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, &NewStreamError{Err: errStreamDrain} + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, &NewStreamError{Err: ErrConnClosing} + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) } - t.statsHandler.HandleRPC(s.ctx, outHeader) } return s, nil } @@ -898,9 +902,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -917,11 +919,11 @@ func (t *http2Client) Close(err error) { for _, s := range streams { t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } @@ -1433,7 +1435,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { if isHeader { inHeader := &stats.InHeader{ Client: true, @@ -1441,14 +1443,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { Header: metadata.MD(mdata).Copy(), Compression: s.recvCompress, } - t.statsHandler.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), Trailer: metadata.MD(mdata).Copy(), } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + sh.HandleRPC(s.ctx, inTrailer) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e59..2b0fde334c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,7 +21,6 @@ package transport import ( "bytes" "context" - "errors" "fmt" "io" "math" @@ -36,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -52,10 +52,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -82,7 +82,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -117,7 +117,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -231,6 +231,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -252,7 +257,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -267,20 +272,20 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -443,6 +448,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) return false } @@ -516,14 +522,16 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if httpMethod != http.MethodPost { t.mu.Unlock() + errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) + logger.Infof("transport: %v", errMsg) } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), }) s.cancel() return false @@ -544,6 +552,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( streamID: s.id, contentSubtype: s.contentSubtype, status: stat, + rst: !frame.StreamEnded(), }) return false } @@ -561,8 +570,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, @@ -571,7 +580,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( WireLength: int(frame.Header().Length), Header: metadata.MD(mdata).Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -925,11 +934,25 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } + + if s.getState() == streamDone { + return t.streamContextErr(s) + } + s.hdrMu.Lock() if md.Len() > 0 { if s.header.Len() > 0 { @@ -940,7 +963,7 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } if err := t.writeHeaderLocked(s); err != nil { s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } s.hdrMu.Unlock() return nil @@ -973,14 +996,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -1041,10 +1064,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -1056,23 +1079,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -1082,12 +1094,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1210,25 +1217,19 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1250,6 +1251,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1269,6 +1275,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index d8247bcdf6..b775130686 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -322,8 +322,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -360,9 +358,6 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) w.offset = 0 return w.err diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2bd..6c3ba85159 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -522,14 +523,14 @@ type ServerConfig struct { ConnectionTimeout time.Duration Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -552,8 +553,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -563,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -741,6 +742,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819f..8e0f6abe89 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } return out, true } @@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index e8367cb899..843633c910 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -131,7 +131,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if _, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + return nil, nil, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -175,3 +175,9 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b07..fb7a99e0a2 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index 7d05c14ebd..4e6a6b1a85 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: reflection/grpc_reflection_v1alpha/reflection.proto diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go index 82a5ba7f24..81344abd77 100644 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -37,21 +37,17 @@ To register server reflection on a gRPC server: package reflection // import "google.golang.org/grpc/reflection" import ( - "bytes" - "compress/gzip" - "fmt" "io" - "io/ioutil" - "reflect" "sort" - "sync" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // GRPCServer is the interface provided by a gRPC server. It is implemented by @@ -59,339 +55,174 @@ import ( // as a registry, for accumulating the services exposed by the server. type GRPCServer interface { grpc.ServiceRegistrar - GetServiceInfo() map[string]grpc.ServiceInfo + ServiceInfoProvider } var _ GRPCServer = (*grpc.Server)(nil) -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s GRPCServer - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files -} - // Register registers the server reflection service on the given gRPC server. func Register(s GRPCServer) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) + svr := NewServer(ServerOptions{Services: s}) + rpb.RegisterServerReflectionServer(s, svr) } -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) -} - -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - serviceInfo := s.s.GetServiceInfo() - - s.symbols = map[string]*dpb.FileDescriptorProto{} - s.serviceNames = make([]string, 0, len(serviceInfo)) - processed := map[string]struct{}{} - for svc, info := range serviceInfo { - s.serviceNames = append(s.serviceNames, svc) - fdenc, ok := parseMetadata(info.Metadata) - if !ok { - continue - } - fd, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols -} - -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} - - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) - } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) - } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) - } -} - -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd - - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } -} - -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } -} - -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd -} - -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name -} - -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) -} - -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %v", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := proto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %v", err) - } - return fd, nil +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo } -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - out, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - return out, nil +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) } -func typeForName(name string) (reflect.Type, error) { - pt := proto.MessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil +// ServerOptions represents the options used to construct a reflection server. +// +// Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver } -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) rpb.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, } - - var extDesc *proto.ExtensionDesc - for id, desc := range proto.RegisteredExtensions(m) { - if id == ext { - extDesc = desc - break - } - } - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) } -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - exts := proto.RegisteredExtensions(m) - out := make([]int32, 0, len(exts)) - for id := range exts { - out = append(out, id) - } - return out, nil +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver } // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := proto.Marshal(currentfd) + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) if err != nil { return nil, err } r = append(r, currentfdEncoded) } - for _, dep := range currentfd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) } } return r, nil } -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshalling -// on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := proto.FileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// parseMetadata finds the file descriptor bytes specified meta. -// For SupportPackageIsVersion4, m is the name of the proto file, we -// call proto.FileDescriptor to get the byte slice. -// For SupportPackageIsVersion3, m is a byte slice itself. -func parseMetadata(meta interface{}) ([]byte, bool) { - // Check if meta is the file name. - if fileNameForMeta, ok := meta.(string); ok { - return proto.FileDescriptor(fileNameForMeta), true - } - - // Check if meta is the byte slice. - if enc, ok := meta.([]byte); ok { - return enc, true - } - - return nil, false -} - // fileDescEncodingContainingSymbol finds the file descriptor containing the // given symbol, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. The given symbol // can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } - } - } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } - - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) } // fileDescEncodingContainingExtension finds the file descriptor containing // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) - if err != nil { - return nil, err - } - fd, err := fileDescContainingExtension(st, extNum) + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) if err != nil { return nil, err } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) - if err != nil { - return nil, err + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*rpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*rpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &rpb.ServiceResponse{Name: svc}) } - return extNums, nil + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp } // ServerReflectionInfo is the reflection service handler. @@ -412,7 +243,11 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } switch req := in.MessageRequest.(type) { case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } if err != nil { out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ ErrorResponse: &rpb.ErrorResponse{ @@ -473,16 +308,9 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio } } case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ - Name: n, - } - } out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ ListServicesResponse: &rpb.ListServiceResponse{ - Service: serviceResponses, + Service: s.listServices(), }, } default: diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a0a71aae96..99db79fafc 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -68,7 +68,6 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto ) @@ -76,7 +75,20 @@ SOURCES=( # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -85,7 +97,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -96,7 +107,6 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -105,12 +115,9 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go - -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/testing does not have a go_package option. mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index e87ecd0eeb..efcb7f3efd 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -28,25 +28,40 @@ type addressMapEntry struct { // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. type AddressMap struct { - m map[string]addressMapEntryList + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} } type addressMapEntryList []*addressMapEntry // NewAddressMap creates a new AddressMap. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[string]addressMapEntryList)} + return &AddressMap{m: make(map[Address]addressMapEntryList)} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. func (l addressMapEntryList) find(addr Address) int { - if len(l) == 0 { - return -1 - } for i, entry := range l { - if entry.addr.ServerName == addr.ServerName && - entry.addr.Attributes.Equal(addr.Attributes) { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { return i } } @@ -55,7 +70,8 @@ func (l addressMapEntryList) find(addr Address) int { // Get returns the value for the address in the map, if present. func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } @@ -64,17 +80,19 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { // Set updates or adds the value to the address in the map. func (a *AddressMap) Set(addr Address, value interface{}) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { - a.m[addr.Addr][entry].value = value + entryList[entry].value = value return } - a.m[addr.Addr] = append(a.m[addr.Addr], &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) } // Delete removes addr from the map. func (a *AddressMap) Delete(addr Address) { - entryList := a.m[addr.Addr] + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] entry := entryList.find(addr) if entry == -1 { return @@ -85,7 +103,7 @@ func (a *AddressMap) Delete(addr Address) { copy(entryList[entry:], entryList[entry+1:]) entryList = entryList[:len(entryList)-1] } - a.m[addr.Addr] = entryList + a.m[addrKey] = entryList } // Len returns the number of entries in the map. @@ -107,3 +125,14 @@ func (a *AddressMap) Keys() []Address { } return ret } + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e28b680260..ca2e35a359 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f0..05a9d4e0ba 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eadf9e05fd..b54f5bb572 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -73,6 +73,12 @@ func init() { internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } + internal.AddExtraServerOptions = func(opt ...ServerOption) { + extraServerOptions = opt + } + internal.ClearExtraServerOptions = func() { + extraServerOptions = nil + } } var statusOK = status.New(codes.OK, "") @@ -134,7 +140,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -150,7 +156,7 @@ type serverOptions struct { chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -174,6 +180,7 @@ var defaultServerOptions = serverOptions{ writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, } +var extraServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -435,7 +442,7 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + o.statsHandlers = append(o.statsHandlers, h) }) } @@ -560,6 +567,9 @@ func (s *Server) stopServerWorkers() { // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range extraServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } @@ -584,9 +594,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +721,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +733,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +767,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +776,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -866,7 +877,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ConnectionTimeout: s.opts.connectionTimeout, Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -962,7 +973,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -1075,8 +1086,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1123,13 +1136,13 @@ func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerIn } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, @@ -1160,7 +1173,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1242,7 +1255,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || binlog != nil { payInfo = &payloadInfo{} } d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) @@ -1259,7 +1272,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ RecvTime: time.Now(), Payload: v, @@ -1283,9 +1296,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1416,16 +1430,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ BeginTime: beginTime, IsClientStream: sd.ClientStreams, IsServerStream: sd.ServerStreams, } - sh.HandleRPC(stream.Context(), statsBegin) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1437,10 +1453,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1454,7 +1470,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1462,7 +1478,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1549,7 +1567,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1706,11 +1726,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1748,11 +1764,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1805,12 +1817,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1822,8 +1848,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1837,6 +1869,10 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf7..b01c548bb9 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -218,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -381,6 +381,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 625d47b34e..6d82e0d7cc 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -46,10 +47,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -164,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -295,14 +303,28 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } cs.binlog = binarylog.GetMethodLogger(method) - if err := cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */) + if err != nil { cs.finish(err) return nil, err } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } @@ -341,14 +363,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) method := cs.callHdr.Method - sh := cs.cc.dopts.copts.StatsHandler var beginTime time.Time - if sh != nil { + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) beginTime = time.Now() begin := &stats.Begin{ @@ -377,27 +405,6 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { ctx = trace.NewContext(ctx, trInfo.tr) } - newAttempt := &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, - } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) - } - }() - - if err := ctx.Err(); err != nil { - return toRPCErr(err) - } - if cs.cc.parsedTarget.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. @@ -405,16 +412,32 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) { "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } @@ -423,12 +446,21 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - // Return without converting to an RPC error so retry code can - // inspect. - return err + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s} return nil } @@ -454,7 +486,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlog binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -504,8 +536,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler - beginTime time.Time + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -525,41 +562,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. If the RPC should be // retried, the bool indicates whether it is being retried transparently. -func (cs *clientStream) shouldRetry(err error) (bool, error) { - if cs.attempt.s == nil { - // Error from NewClientStream. - nse, ok := err.(*transport.NewStreamError) - if !ok { - // Unexpected, but assume no I/O was performed and the RPC is not - // fatal, so retry indefinitely. - return true, nil - } - - // Unwrap and convert error. - err = toRPCErr(nse.Err) - - // Never retry DoNotRetry errors, which indicate the RPC should not be - // retried due to max header list size violation, etc. - if nse.DoNotRetry { - return false, err - } +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs - // In the event of a non-IO operation error from NewStream, we never - // attempted to write anything to the wire, so we can retry - // indefinitely. - if !nse.DoNotTransparentRetry { - return true, nil - } - } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } // Wait for the trailers. unprocessed := false - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -571,14 +588,14 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { + if a.s != nil { + if !a.s.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -595,10 +612,10 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy @@ -643,19 +660,24 @@ func (cs *clientStream) shouldRetry(err error) (bool, error) { } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(toRPCErr(lastErr)) - isTransparent, err := cs.shouldRetry(lastErr) + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(isTransparent); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -665,7 +687,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -695,7 +720,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -726,7 +751,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -744,10 +769,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -795,22 +819,17 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if cs.binlog != nil && err == nil { cs.binlog.Log(&binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, + Message: data, }) } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { @@ -941,8 +960,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -952,7 +971,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -989,8 +1008,8 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, @@ -1049,7 +1068,7 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, BeginTime: a.beginTime, @@ -1057,7 +1076,7 @@ func (a *csAttempt) finish(err error) { Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1362,8 +1381,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1424,9 +1445,9 @@ type serverStream struct { maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1446,11 +1467,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ @@ -1465,6 +1495,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1522,8 +1555,10 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { Message: data, }) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1557,7 +1592,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || ss.binlog != nil { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { @@ -1572,15 +1607,17 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength + headerLen, + Length: len(payInfo.uncompressedBytes), + }) + } } if ss.binlog != nil { ss.binlog.Log(&binarylog.ClientMessage{ diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 8ef0958797..0eb2998cbe 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.43.0" +const Version = "1.48.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index d923187a7b..ceb436c6ce 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go index df36e3a30f..0173b6982e 100644 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -320,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) @@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -918,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go index ac66fccc05..268558a0d6 100644 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { } token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { return } @@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/vendor/helm.sh/helm/v3/pkg/action/install.go b/vendor/helm.sh/helm/v3/pkg/action/install.go index af99717d13..25274fcd26 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/install.go +++ b/vendor/helm.sh/helm/v3/pkg/action/install.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "io/ioutil" + "net/url" "os" "path" "path/filepath" @@ -113,6 +114,7 @@ type ChartPathOptions struct { InsecureSkipTLSverify bool // --insecure-skip-verify Keyring string // --keyring Password string // --password + PassCredentialsAll bool // --pass-credentials RepoURL string // --repo Username string // --username Verify bool // --verify @@ -654,7 +656,7 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( Keyring: c.Keyring, Getters: getter.All(settings), Options: []getter.Option{ - getter.WithBasicAuth(c.Username, c.Password), + getter.WithPassCredentialsAll(c.PassCredentialsAll), getter.WithTLSClientConfig(c.CertFile, c.KeyFile, c.CaFile), getter.WithInsecureSkipVerifyTLS(c.InsecureSkipTLSverify), }, @@ -665,12 +667,34 @@ func (c *ChartPathOptions) LocateChart(name string, settings *cli.EnvSettings) ( dl.Verify = downloader.VerifyAlways } if c.RepoURL != "" { - chartURL, err := repo.FindChartInAuthAndTLSRepoURL(c.RepoURL, c.Username, c.Password, name, version, - c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, getter.All(settings)) + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(c.RepoURL, c.Username, c.Password, name, version, + c.CertFile, c.KeyFile, c.CaFile, c.InsecureSkipTLSverify, c.PassCredentialsAll, getter.All(settings)) if err != nil { return "", err } name = chartURL + + // Only pass the user/pass on when the user has said to or when the + // location of the chart repo and the chart are the same domain. + u1, err := url.Parse(c.RepoURL) + if err != nil { + return "", err + } + u2, err := url.Parse(chartURL) + if err != nil { + return "", err + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if c.PassCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth("", "")) + } + } else { + dl.Options = append(dl.Options, getter.WithBasicAuth(c.Username, c.Password)) } if err := os.MkdirAll(settings.RepositoryCache, 0755); err != nil { diff --git a/vendor/helm.sh/helm/v3/pkg/action/pull.go b/vendor/helm.sh/helm/v3/pkg/action/pull.go index 04faa3b6bb..fa1247054b 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/pull.go +++ b/vendor/helm.sh/helm/v3/pkg/action/pull.go @@ -82,6 +82,7 @@ func (p *Pull) Run(chartRef string) (string, error) { Getters: getter.All(p.Settings), Options: []getter.Option{ getter.WithBasicAuth(p.Username, p.Password), + getter.WithPassCredentialsAll(p.PassCredentialsAll), getter.WithTLSClientConfig(p.CertFile, p.KeyFile, p.CaFile), getter.WithInsecureSkipVerifyTLS(p.InsecureSkipTLSverify), }, @@ -118,7 +119,7 @@ func (p *Pull) Run(chartRef string) (string, error) { } if p.RepoURL != "" { - chartURL, err := repo.FindChartInAuthAndTLSRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, getter.All(p.Settings)) + chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, p.PassCredentialsAll, getter.All(p.Settings)) if err != nil { return out.String(), err } diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go index 6c600bebb7..2c0d55a556 100644 --- a/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go +++ b/vendor/helm.sh/helm/v3/pkg/downloader/chart_downloader.go @@ -195,6 +195,7 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er c.Options = append( c.Options, getter.WithBasicAuth(rc.Username, rc.Password), + getter.WithPassCredentialsAll(rc.PassCredentialsAll), ) } return u, nil @@ -224,7 +225,10 @@ func (c *ChartDownloader) ResolveChartVersion(ref, version string) (*url.URL, er c.Options = append(c.Options, getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile)) } if r.Config.Username != "" && r.Config.Password != "" { - c.Options = append(c.Options, getter.WithBasicAuth(r.Config.Username, r.Config.Password)) + c.Options = append(c.Options, + getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), + ) } } diff --git a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go index e89ac7c024..e0fd8a4220 100644 --- a/vendor/helm.sh/helm/v3/pkg/downloader/manager.go +++ b/vendor/helm.sh/helm/v3/pkg/downloader/manager.go @@ -310,7 +310,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error { // Any failure to resolve/download a chart should fail: // https://github.com/helm/helm/issues/1439 - churl, username, password, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) + churl, username, password, passcredentialsall, err := m.findChartURL(dep.Name, dep.Version, dep.Repository, repos) if err != nil { saveError = errors.Wrapf(err, "could not find %s", churl) break @@ -332,6 +332,7 @@ func (m *Manager) downloadAll(deps []*chart.Dependency) error { Getters: m.Getters, Options: []getter.Option{ getter.WithBasicAuth(username, password), + getter.WithPassCredentialsAll(passcredentialsall), }, } @@ -685,9 +686,9 @@ func (m *Manager) parallelRepoUpdate(repos []*repo.Entry) error { // repoURL is the repository to search // // If it finds a URL that is "relative", it will prepend the repoURL. -func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, err error) { +func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]*repo.ChartRepository) (url, username, password string, passcredentialsall bool, err error) { if strings.HasPrefix(repoURL, "oci://") { - return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", nil + return fmt.Sprintf("%s/%s:%s", repoURL, name, version), "", "", false, nil } for _, cr := range repos { @@ -709,15 +710,16 @@ func (m *Manager) findChartURL(name, version, repoURL string, repos map[string]* } username = cr.Config.Username password = cr.Config.Password + passcredentialsall = cr.Config.PassCredentialsAll return } } url, err = repo.FindChartInRepoURL(repoURL, name, version, "", "", "", m.Getters) if err == nil { - return url, username, password, err + return url, username, password, false, err } err = errors.Errorf("chart %s not found in %s: %s", name, repoURL, err) - return url, username, password, err + return url, username, password, false, err } // findEntryByName finds an entry in the chart repository whose name matches the given name. diff --git a/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/vendor/helm.sh/helm/v3/pkg/getter/getter.go index 4653484560..78add728ad 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/getter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/getter.go @@ -38,6 +38,7 @@ type options struct { insecureSkipVerifyTLS bool username string password string + passCredentialsAll bool userAgent string version string registryClient *registry.Client @@ -64,6 +65,12 @@ func WithBasicAuth(username, password string) Option { } } +func WithPassCredentialsAll(pass bool) Option { + return func(opts *options) { + opts.passCredentialsAll = pass + } +} + // WithUserAgent sets the request's User-Agent header to use the provided agent name. func WithUserAgent(userAgent string) Option { return func(opts *options) { diff --git a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go index bd60629ae8..822abad2ef 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/httpgetter.go @@ -20,6 +20,7 @@ import ( "crypto/tls" "io" "net/http" + "net/url" "github.com/pkg/errors" @@ -56,8 +57,24 @@ func (g *HTTPGetter) get(href string) (*bytes.Buffer, error) { req.Header.Set("User-Agent", g.opts.userAgent) } - if g.opts.username != "" && g.opts.password != "" { - req.SetBasicAuth(g.opts.username, g.opts.password) + // Before setting the basic auth credentials, make sure the URL associated + // with the basic auth is the one being fetched. + u1, err := url.Parse(g.opts.url) + if err != nil { + return buf, errors.Wrap(err, "Unable to parse getter URL") + } + u2, err := url.Parse(href) + if err != nil { + return buf, errors.Wrap(err, "Unable to parse URL getting from") + } + + // Host on URL (returned from url.Parse) contains the port if present. + // This check ensures credentials are not passed between different + // services on different ports. + if g.opts.passCredentialsAll || (u1.Scheme == u2.Scheme && u1.Host == u2.Host) { + if g.opts.username != "" && g.opts.password != "" { + req.SetBasicAuth(g.opts.username, g.opts.password) + } } client, err := g.httpClient() diff --git a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go index 09b94fd42b..67ede93fd8 100644 --- a/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go +++ b/vendor/helm.sh/helm/v3/pkg/repo/chartrepo.go @@ -48,6 +48,7 @@ type Entry struct { KeyFile string `json:"keyFile"` CAFile string `json:"caFile"` InsecureSkipTLSverify bool `json:"insecure_skip_tls_verify"` + PassCredentialsAll bool `json:"pass_credentials_all"` } // ChartRepository represents a chart repository @@ -129,6 +130,7 @@ func (r *ChartRepository) DownloadIndexFile() (string, error) { getter.WithInsecureSkipVerifyTLS(r.Config.InsecureSkipTLSverify), getter.WithTLSClientConfig(r.Config.CertFile, r.Config.KeyFile, r.Config.CAFile), getter.WithBasicAuth(r.Config.Username, r.Config.Password), + getter.WithPassCredentialsAll(r.Config.PassCredentialsAll), ) if err != nil { return "", err @@ -217,6 +219,15 @@ func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion // but it also receives credentials and TLS verify flag for the chart repository. // TODO Helm 4, FindChartInAuthAndTLSRepoURL should be integrated into FindChartInAuthRepoURL. func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify bool, getters getter.Providers) (string, error) { + return FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile, false, false, getters) +} + +// FindChartInAuthAndTLSAndPassRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials, TLS verify flag, and if credentials should +// be passed on to other domains. +// TODO Helm 4, FindChartInAuthAndTLSAndPassRepoURL should be integrated into FindChartInAuthRepoURL. +func FindChartInAuthAndTLSAndPassRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, insecureSkipTLSverify, passCredentialsAll bool, getters getter.Providers) (string, error) { // Download and write the index file to a temporary location buf := make([]byte, 20) @@ -227,6 +238,7 @@ func FindChartInAuthAndTLSRepoURL(repoURL, username, password, chartName, chartV URL: repoURL, Username: username, Password: password, + PassCredentialsAll: passCredentialsAll, CertFile: certFile, KeyFile: keyFile, CAFile: caFile, diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/cloud-provider/go.mod b/vendor/k8s.io/cloud-provider/go.mod index c89e3fbc58..12681e47a4 100644 --- a/vendor/k8s.io/cloud-provider/go.mod +++ b/vendor/k8s.io/cloud-provider/go.mod @@ -5,25 +5,25 @@ module k8s.io/cloud-provider go 1.16 require ( - github.com/google/go-cmp v0.5.4 + github.com/google/go-cmp v0.5.5 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 - k8s.io/api v0.21.4 - k8s.io/apimachinery v0.21.4 - k8s.io/apiserver v0.21.4 - k8s.io/client-go v0.21.4 - k8s.io/component-base v0.21.4 - k8s.io/controller-manager v0.21.4 + k8s.io/api v0.21.5 + k8s.io/apimachinery v0.21.5 + k8s.io/apiserver v0.21.5 + k8s.io/client-go v0.21.5 + k8s.io/component-base v0.21.5 + k8s.io/controller-manager v0.21.5 k8s.io/klog/v2 v2.8.0 k8s.io/utils v0.0.0-20201110183641-67b214c5f920 ) replace ( - k8s.io/api => k8s.io/api v0.21.4 - k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 - k8s.io/apiserver => k8s.io/apiserver v0.21.4 - k8s.io/client-go => k8s.io/client-go v0.21.4 - k8s.io/component-base => k8s.io/component-base v0.21.4 - k8s.io/controller-manager => k8s.io/controller-manager v0.21.4 + k8s.io/api => k8s.io/api v0.21.5 + k8s.io/apimachinery => k8s.io/apimachinery v0.21.5 + k8s.io/apiserver => k8s.io/apiserver v0.21.5 + k8s.io/client-go => k8s.io/client-go v0.21.5 + k8s.io/component-base => k8s.io/component-base v0.21.5 + k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 ) diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 3580e1c154..515a784e90 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -160,8 +160,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -169,9 +169,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -340,8 +339,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= @@ -628,8 +627,9 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -668,18 +668,18 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc= -k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= -k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw= -k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= -k8s.io/apiserver v0.21.4 h1:egJgdhW0ueq5iJSY0c5YedPvRM2Ft/D3dcXOgwvs9jY= -k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= -k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc= -k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew= -k8s.io/component-base v0.21.4 h1:Bc0AttSyhJFVXEIHz+VX+D11j/5z7SPPhl6whiXaRzs= -k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= -k8s.io/controller-manager v0.21.4 h1:XzQn1SnU0rMYQN91CiKuhXTKMYOmp9yqmECZ/uPRlgs= -k8s.io/controller-manager v0.21.4/go.mod h1:a/iL7W19zkyirHDaupk9cyC11nejVznGwZI6I8tbyQY= +k8s.io/api v0.21.5 h1:9zp3SslPRB+rqxhGKqqTo6VsN3HX0Ype1nWV6UQQ+Sk= +k8s.io/api v0.21.5/go.mod h1:Un8C5Hemo2r3MfPOjZvwQQ9KkBbiTBUCGrjlivo9uJ0= +k8s.io/apimachinery v0.21.5 h1:56bnsHcUNboSCbD779GGi4Lh5kHTDFUoDrnHbhLTiaw= +k8s.io/apimachinery v0.21.5/go.mod h1:3PfBV+4PPXNs0aueD+7fHcGyhdkFFYqXeshQtsKCi+4= +k8s.io/apiserver v0.21.5 h1:iEPvJ2uwmyb7C4eScOj1fgPKCyCUGgMQU5+UREE87vE= +k8s.io/apiserver v0.21.5/go.mod h1:0bWmrAx3dxUUFSEw71U91Si5obhIvBcAmf8oVZUO58E= +k8s.io/client-go v0.21.5 h1:zkVidiWVgciPKYqWpMFMjCUF+4rRXcfkKoyQS1Ue21k= +k8s.io/client-go v0.21.5/go.mod h1:EUornVlr3rBrPKXUoMPNggJdEQmvFNMpYO3Kb6432kw= +k8s.io/component-base v0.21.5 h1:icFqcFDrO9S+FQpGohzVm6qce9vlo131K0r3NhElxiQ= +k8s.io/component-base v0.21.5/go.mod h1:UyRaqQfPkBL/haEFaMWgVQvtom5TqAT+jqlFGlh6LuU= +k8s.io/controller-manager v0.21.5 h1:4BECve2i35C9lSV7Eyg8jAtakcbPsJNOGT7iGyyWc4Y= +k8s.io/controller-manager v0.21.5/go.mod h1:65guJmOoIEklTPwwYOmiHOsbEoAnQ7xKSZ9v+RdJC6g= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= diff --git a/vendor/k8s.io/code-generator/go.mod b/vendor/k8s.io/code-generator/go.mod index 59b6c8a156..d6b20bec4b 100644 --- a/vendor/k8s.io/code-generator/go.mod +++ b/vendor/k8s.io/code-generator/go.mod @@ -8,8 +8,6 @@ require ( github.com/emicklei/go-restful v2.9.5+incompatible // indirect github.com/go-openapi/spec v0.19.5 github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.4.3 // indirect - github.com/google/go-cmp v0.5.4 // indirect github.com/googleapis/gnostic v0.4.1 github.com/json-iterator/go v1.1.10 // indirect github.com/kr/text v0.2.0 // indirect @@ -24,7 +22,7 @@ require ( golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 // indirect golang.org/x/text v0.3.4 // indirect golang.org/x/tools v0.1.0 // indirect - google.golang.org/protobuf v1.25.0 // indirect + google.golang.org/protobuf v1.26.0 // indirect gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/yaml.v2 v2.4.0 k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 diff --git a/vendor/k8s.io/code-generator/go.sum b/vendor/k8s.io/code-generator/go.sum index 50afc6f97a..bea8fa8872 100644 --- a/vendor/k8s.io/code-generator/go.sum +++ b/vendor/k8s.io/code-generator/go.sum @@ -1,13 +1,9 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -16,8 +12,6 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -37,26 +31,13 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -100,7 +81,6 @@ github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -115,18 +95,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -137,13 +110,10 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -161,10 +131,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -177,24 +143,9 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -212,8 +163,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/component-helpers/storage/volume/helpers.go b/vendor/k8s.io/component-helpers/storage/volume/helpers.go index a76e933448..7ec376f34a 100644 --- a/vendor/k8s.io/component-helpers/storage/volume/helpers.go +++ b/vendor/k8s.io/component-helpers/storage/volume/helpers.go @@ -16,7 +16,13 @@ limitations under the License. package volume -import v1 "k8s.io/api/core/v1" +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/component-helpers/scheduling/corev1" +) // GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was // requested, it returns "". @@ -42,3 +48,23 @@ func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { return volume.Spec.StorageClassName } + +// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels +// This ensures that we don't mount a volume that doesn't belong to this node +func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error { + if pv.Spec.NodeAffinity == nil { + return nil + } + + if pv.Spec.NodeAffinity.Required != nil { + node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: nodeLabels}} + terms := pv.Spec.NodeAffinity.Required + if matches, err := corev1.MatchNodeSelectorTerms(node, terms); err != nil { + return err + } else if !matches { + return fmt.Errorf("no matching NodeSelectorTerms") + } + } + + return nil +} diff --git a/vendor/k8s.io/component-helpers/storage/volume/pv_helpers.go b/vendor/k8s.io/component-helpers/storage/volume/pv_helpers.go new file mode 100644 index 0000000000..f927b72314 --- /dev/null +++ b/vendor/k8s.io/component-helpers/storage/volume/pv_helpers.go @@ -0,0 +1,342 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + storage "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes/scheme" + storagelisters "k8s.io/client-go/listers/storage/v1" + "k8s.io/client-go/tools/reference" +) + +const ( + // AnnBindCompleted Annotation applies to PVCs. It indicates that the lifecycle + // of the PVC has passed through the initial setup. This information changes how + // we interpret some observations of the state of the objects. Value of this + // Annotation does not matter. + AnnBindCompleted = "pv.kubernetes.io/bind-completed" + + // AnnBoundByController annotation applies to PVs and PVCs. It indicates that + // the binding (PV->PVC or PVC->PV) was installed by the controller. The + // absence of this annotation means the binding was done by the user (i.e. + // pre-bound). Value of this annotation does not matter. + // External PV binders must bind PV the same way as PV controller, otherwise PV + // controller may not handle it correctly. + AnnBoundByController = "pv.kubernetes.io/bound-by-controller" + + // AnnSelectedNode annotation is added to a PVC that has been triggered by scheduler to + // be dynamically provisioned. Its value is the name of the selected node. + AnnSelectedNode = "volume.kubernetes.io/selected-node" + + // NotSupportedProvisioner is a special provisioner name which can be set + // in storage class to indicate dynamic provisioning is not supported by + // the storage. + NotSupportedProvisioner = "kubernetes.io/no-provisioner" + + // AnnDynamicallyProvisioned annotation is added to a PV that has been dynamically provisioned by + // Kubernetes. Its value is name of volume plugin that created the volume. + // It serves both user (to show where a PV comes from) and Kubernetes (to + // recognize dynamically provisioned PVs in its decisions). + AnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" + + // AnnMigratedTo annotation is added to a PVC and PV that is supposed to be + // dynamically provisioned/deleted by by its corresponding CSI driver + // through the CSIMigration feature flags. When this annotation is set the + // Kubernetes components will "stand-down" and the external-provisioner will + // act on the objects + AnnMigratedTo = "pv.kubernetes.io/migrated-to" + + // AnnStorageProvisioner annotation is added to a PVC that is supposed to be dynamically + // provisioned. Its value is name of volume plugin that is supposed to provision + // a volume for this PVC. + // TODO: remove beta anno once deprecation period ends + AnnStorageProvisioner = "volume.kubernetes.io/storage-provisioner" + AnnBetaStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner" + + //PVDeletionProtectionFinalizer is the finalizer added by the external-provisioner on the PV + PVDeletionProtectionFinalizer = "external-provisioner.volume.kubernetes.io/finalizer" + + // PVDeletionInTreeProtectionFinalizer is the finalizer added to protect PV deletion for in-tree volumes. + PVDeletionInTreeProtectionFinalizer = "kubernetes.io/pv-controller" +) + +// IsDelayBindingProvisioning checks if claim provisioning with selected-node annotation +func IsDelayBindingProvisioning(claim *v1.PersistentVolumeClaim) bool { + // When feature VolumeScheduling enabled, + // Scheduler signal to the PV controller to start dynamic + // provisioning by setting the "AnnSelectedNode" annotation + // in the PVC + _, ok := claim.Annotations[AnnSelectedNode] + return ok +} + +// IsDelayBindingMode checks if claim is in delay binding mode. +func IsDelayBindingMode(claim *v1.PersistentVolumeClaim, classLister storagelisters.StorageClassLister) (bool, error) { + className := GetPersistentVolumeClaimClass(claim) + if className == "" { + return false, nil + } + + class, err := classLister.Get(className) + if err != nil { + if apierrors.IsNotFound(err) { + return false, nil + } + return false, err + } + + if class.VolumeBindingMode == nil { + return false, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", className) + } + + return *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer, nil +} + +// GetBindVolumeToClaim returns a new volume which is bound to given claim. In +// addition, it returns a bool which indicates whether we made modification on +// original volume. +func GetBindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, bool, error) { + dirty := false + + // Check if the volume was already bound (either by user or by controller) + shouldSetBoundByController := false + if !IsVolumeBoundToClaim(volume, claim) { + shouldSetBoundByController = true + } + + // The volume from method args can be pointing to watcher cache. We must not + // modify these, therefore create a copy. + volumeClone := volume.DeepCopy() + + // Bind the volume to the claim if it is not bound yet + if volume.Spec.ClaimRef == nil || + volume.Spec.ClaimRef.Name != claim.Name || + volume.Spec.ClaimRef.Namespace != claim.Namespace || + volume.Spec.ClaimRef.UID != claim.UID { + + claimRef, err := reference.GetReference(scheme.Scheme, claim) + if err != nil { + return nil, false, fmt.Errorf("unexpected error getting claim reference: %w", err) + } + volumeClone.Spec.ClaimRef = claimRef + dirty = true + } + + // Set AnnBoundByController if it is not set yet + if shouldSetBoundByController && !metav1.HasAnnotation(volumeClone.ObjectMeta, AnnBoundByController) { + metav1.SetMetaDataAnnotation(&volumeClone.ObjectMeta, AnnBoundByController, "yes") + dirty = true + } + + return volumeClone, dirty, nil +} + +// IsVolumeBoundToClaim returns true, if given volume is pre-bound or bound +// to specific claim. Both claim.Name and claim.Namespace must be equal. +// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. +func IsVolumeBoundToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) bool { + if volume.Spec.ClaimRef == nil { + return false + } + if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { + return false + } + if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { + return false + } + return true +} + +// FindMatchingVolume goes through the list of volumes to find the best matching volume +// for the claim. +// +// This function is used by both the PV controller and scheduler. +// +// delayBinding is true only in the PV controller path. When set, prebound PVs are still returned +// as a match for the claim, but unbound PVs are skipped. +// +// node is set only in the scheduler path. When set, the PV node affinity is checked against +// the node's labels. +// +// excludedVolumes is only used in the scheduler path, and is needed for evaluating multiple +// unbound PVCs for a single Pod at one time. As each PVC finds a matching PV, the chosen +// PV needs to be excluded from future matching. +func FindMatchingVolume( + claim *v1.PersistentVolumeClaim, + volumes []*v1.PersistentVolume, + node *v1.Node, + excludedVolumes map[string]*v1.PersistentVolume, + delayBinding bool) (*v1.PersistentVolume, error) { + + var smallestVolume *v1.PersistentVolume + var smallestVolumeQty resource.Quantity + requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + requestedClass := GetPersistentVolumeClaimClass(claim) + + var selector labels.Selector + if claim.Spec.Selector != nil { + internalSelector, err := metav1.LabelSelectorAsSelector(claim.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("error creating internal label selector for claim: %v: %v", claimToClaimKey(claim), err) + } + selector = internalSelector + } + + // Go through all available volumes with two goals: + // - find a volume that is either pre-bound by user or dynamically + // provisioned for this claim. Because of this we need to loop through + // all volumes. + // - find the smallest matching one if there is no volume pre-bound to + // the claim. + for _, volume := range volumes { + if _, ok := excludedVolumes[volume.Name]; ok { + // Skip volumes in the excluded list + continue + } + if volume.Spec.ClaimRef != nil && !IsVolumeBoundToClaim(volume, claim) { + continue + } + + volumeQty := volume.Spec.Capacity[v1.ResourceStorage] + if volumeQty.Cmp(requestedQty) < 0 { + continue + } + // filter out mismatching volumeModes + if CheckVolumeModeMismatches(&claim.Spec, &volume.Spec) { + continue + } + + // check if PV's DeletionTimeStamp is set, if so, skip this volume. + if volume.ObjectMeta.DeletionTimestamp != nil { + continue + } + + nodeAffinityValid := true + if node != nil { + // Scheduler path, check that the PV NodeAffinity + // is satisfied by the node + // CheckNodeAffinity is the most expensive call in this loop. + // We should check cheaper conditions first or consider optimizing this function. + err := CheckNodeAffinity(volume, node.Labels) + if err != nil { + nodeAffinityValid = false + } + } + + if IsVolumeBoundToClaim(volume, claim) { + // If PV node affinity is invalid, return no match. + // This means the prebound PV (and therefore PVC) + // is not suitable for this node. + if !nodeAffinityValid { + return nil, nil + } + + return volume, nil + } + + if node == nil && delayBinding { + // PV controller does not bind this claim. + // Scheduler will handle binding unbound volumes + // Scheduler path will have node != nil + continue + } + + // filter out: + // - volumes in non-available phase + // - volumes whose labels don't match the claim's selector, if specified + // - volumes in Class that is not requested + // - volumes whose NodeAffinity does not match the node + if volume.Status.Phase != v1.VolumeAvailable { + // We ignore volumes in non-available phase, because volumes that + // satisfies matching criteria will be updated to available, binding + // them now has high chance of encountering unnecessary failures + // due to API conflicts. + continue + } else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) { + continue + } + if GetPersistentVolumeClass(volume) != requestedClass { + continue + } + if !nodeAffinityValid { + continue + } + + if node != nil { + // Scheduler path + // Check that the access modes match + if !CheckAccessModes(claim, volume) { + continue + } + } + + if smallestVolume == nil || smallestVolumeQty.Cmp(volumeQty) > 0 { + smallestVolume = volume + smallestVolumeQty = volumeQty + } + } + + if smallestVolume != nil { + // Found a matching volume + return smallestVolume, nil + } + + return nil, nil +} + +// CheckVolumeModeMismatches is a convenience method that checks volumeMode for PersistentVolume +// and PersistentVolumeClaims +func CheckVolumeModeMismatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) bool { + // In HA upgrades, we cannot guarantee that the apiserver is on a version >= controller-manager. + // So we default a nil volumeMode to filesystem + requestedVolumeMode := v1.PersistentVolumeFilesystem + if pvcSpec.VolumeMode != nil { + requestedVolumeMode = *pvcSpec.VolumeMode + } + pvVolumeMode := v1.PersistentVolumeFilesystem + if pvSpec.VolumeMode != nil { + pvVolumeMode = *pvSpec.VolumeMode + } + return requestedVolumeMode != pvVolumeMode +} + +// CheckAccessModes returns true if PV satisfies all the PVC's requested AccessModes +func CheckAccessModes(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) bool { + pvModesMap := map[v1.PersistentVolumeAccessMode]bool{} + for _, mode := range volume.Spec.AccessModes { + pvModesMap[mode] = true + } + + for _, mode := range claim.Spec.AccessModes { + _, ok := pvModesMap[mode] + if !ok { + return false + } + } + return true +} + +func claimToClaimKey(claim *v1.PersistentVolumeClaim) string { + return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name) +} diff --git a/vendor/k8s.io/klog/v2/go.mod b/vendor/k8s.io/klog/v2/go.mod index e396e31c06..eb297b6a1e 100644 --- a/vendor/k8s.io/klog/v2/go.mod +++ b/vendor/k8s.io/klog/v2/go.mod @@ -2,4 +2,4 @@ module k8s.io/klog/v2 go 1.13 -require github.com/go-logr/logr v0.2.0 +require github.com/go-logr/logr v0.4.0 diff --git a/vendor/k8s.io/klog/v2/go.sum b/vendor/k8s.io/klog/v2/go.sum index 8dfa785428..5778f81742 100644 --- a/vendor/k8s.io/klog/v2/go.sum +++ b/vendor/k8s.io/klog/v2/go.sum @@ -1,2 +1,2 @@ -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 23cced6250..25483fad13 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -81,6 +81,7 @@ import ( "math" "os" "path/filepath" + "reflect" "runtime" "strconv" "strings" @@ -433,7 +434,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level") + flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") @@ -772,7 +773,7 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -780,11 +781,11 @@ func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, msg st loggr.Error(err, msg, keysAndValues...) return } - l.printS(err, msg, keysAndValues...) + l.printS(err, errorLog, depth+1, msg, keysAndValues...) } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -792,12 +793,12 @@ func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, msg string, keysAn loggr.Info(msg, keysAndValues...) return } - l.printS(nil, msg, keysAndValues...) + l.printS(nil, infoLog, depth+1, msg, keysAndValues...) } // printS is called from infoS and errorS if loggr is not specified. -// if err arguments is specified, will output to errorLog severity -func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { +// set log severity by s +func (l *loggingT) printS(err error, s severity, depth int, msg string, keysAndValues ...interface{}) { b := &bytes.Buffer{} b.WriteString(fmt.Sprintf("%q", msg)) if err != nil { @@ -805,13 +806,7 @@ func (l *loggingT) printS(err error, msg string, keysAndValues ...interface{}) { b.WriteString(fmt.Sprintf("err=%q", err.Error())) } kvListFormat(b, keysAndValues...) - var s severity - if err == nil { - s = infoLog - } else { - s = errorLog - } - l.printDepth(s, logging.logr, nil, 2, b) + l.printDepth(s, logging.logr, nil, depth+1, b) } const missingValue = "(MISSING)" @@ -1359,14 +1354,20 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, v.filter, msg, keysAndValues...) + logging.infoS(v.logr, v.filter, 0, msg, keysAndValues...) } } +// InfoSDepth acts as InfoS but uses depth to determine which call frame to log. +// InfoSDepth(0, "msg") is the same as InfoS("msg"). +func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { + logging.infoS(logging.logr, logging.filter, depth, msg, keysAndValues...) +} + // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, msg, args...) + logging.errorS(err, v.logr, v.filter, 0, msg, args...) } } @@ -1374,7 +1375,7 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, v.filter, msg, keysAndValues...) + logging.errorS(err, v.logr, v.filter, 0, msg, keysAndValues...) } } @@ -1411,7 +1412,7 @@ func Infof(format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(logging.logr, logging.filter, msg, keysAndValues...) + logging.infoS(logging.logr, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. @@ -1472,7 +1473,13 @@ func Errorf(format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, logging.logr, logging.filter, msg, keysAndValues...) + logging.errorS(err, logging.logr, logging.filter, 0, msg, keysAndValues...) +} + +// ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. +// ErrorSDepth(0, "msg") is the same as ErrorS("msg"). +func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { + logging.errorS(err, logging.logr, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, @@ -1571,6 +1578,13 @@ type KMetadata interface { // KObj returns ObjectRef from ObjectMeta func KObj(obj KMetadata) ObjectRef { + if obj == nil { + return ObjectRef{} + } + if val := reflect.ValueOf(obj); val.Kind() == reflect.Ptr && val.IsNil() { + return ObjectRef{} + } + return ObjectRef{ Name: obj.GetName(), Namespace: obj.GetNamespace(), diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go index 1140f75ce5..84cdf5e105 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/subpath/subpath_linux.go @@ -209,8 +209,9 @@ func doBindSubPath(mounter mount.Interface, subpath Subpath) (hostPath string, e // Do the bind mount options := []string{"bind"} + mountFlags := []string{"--no-canonicalize"} klog.V(5).Infof("bind mounting %q at %q", mountSource, bindPathTarget) - if err = mounter.MountSensitiveWithoutSystemd(mountSource, bindPathTarget, "" /*fstype*/, options, nil); err != nil { + if err = mounter.MountSensitiveWithoutSystemdWithMountFlags(mountSource, bindPathTarget, "" /*fstype*/, options, nil /* sensitiveOptions */, mountFlags); err != nil { return "", fmt.Errorf("error mounting %s: %s", subpath.Path, err) } success = true diff --git a/vendor/k8s.io/mount-utils/fake_mounter.go b/vendor/k8s.io/mount-utils/fake_mounter.go index 393ed043ba..55ea5e2986 100644 --- a/vendor/k8s.io/mount-utils/fake_mounter.go +++ b/vendor/k8s.io/mount-utils/fake_mounter.go @@ -136,6 +136,10 @@ func (f *FakeMounter) MountSensitiveWithoutSystemd(source string, target string, return f.MountSensitive(source, target, fstype, options, nil /* sensitiveOptions */) } +func (f *FakeMounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return f.MountSensitive(source, target, fstype, options, nil /* sensitiveOptions */) +} + // Unmount records the unmount event and updates the in-memory mount points for FakeMounter func (f *FakeMounter) Unmount(target string) error { f.mutex.Lock() diff --git a/vendor/k8s.io/mount-utils/mount.go b/vendor/k8s.io/mount-utils/mount.go index 93b60d3f92..a882fcc739 100644 --- a/vendor/k8s.io/mount-utils/mount.go +++ b/vendor/k8s.io/mount-utils/mount.go @@ -49,6 +49,8 @@ type Interface interface { MountSensitive(source string, target string, fstype string, options []string, sensitiveOptions []string) error // MountSensitiveWithoutSystemd is the same as MountSensitive() but this method disable using systemd mount. MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error + // MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd() with additional mount flags + MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error // Unmount unmounts given target. Unmount(target string) error // List returns a list of all mounted filesystems. This can be large. diff --git a/vendor/k8s.io/mount-utils/mount_linux.go b/vendor/k8s.io/mount-utils/mount_linux.go index 10a1c3f010..7097eae087 100644 --- a/vendor/k8s.io/mount-utils/mount_linux.go +++ b/vendor/k8s.io/mount-utils/mount_linux.go @@ -87,11 +87,11 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri mounterPath := "" bind, bindOpts, bindRemountOpts, bindRemountOptsSensitive := MakeBindOptsSensitive(options, sensitiveOptions) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, true) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, nil /* mountFlags */, true) if err != nil { return err } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, true) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, nil /* mountFlags */, true) } // The list of filesystems that require containerized mounter on GCI image cluster fsTypesNeedMounter := map[string]struct{}{ @@ -103,19 +103,24 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri if _, ok := fsTypesNeedMounter[fstype]; ok { mounterPath = mounter.mounterPath } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, true) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, nil /* mountFlags */, true) } // MountSensitiveWithoutSystemd is the same as MountSensitive() but disable using systemd mount. func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target string, fstype string, options []string, sensitiveOptions []string) error { + return mounter.MountSensitiveWithoutSystemdWithMountFlags(source, target, fstype, options, sensitiveOptions, nil /* mountFlags */) +} + +// MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd with additional mount flags. +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { mounterPath := "" bind, bindOpts, bindRemountOpts, bindRemountOptsSensitive := MakeBindOptsSensitive(options, sensitiveOptions) if bind { - err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, false) + err := mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindOpts, bindRemountOptsSensitive, mountFlags, false) if err != nil { return err } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, false) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts, bindRemountOptsSensitive, mountFlags, false) } // The list of filesystems that require containerized mounter on GCI image cluster fsTypesNeedMounter := map[string]struct{}{ @@ -127,14 +132,14 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin if _, ok := fsTypesNeedMounter[fstype]; ok { mounterPath = mounter.mounterPath } - return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, false) + return mounter.doMount(mounterPath, defaultMountCommand, source, target, fstype, options, sensitiveOptions, mountFlags, false) } // doMount runs the mount command. mounterPath is the path to mounter binary if containerized mounter is used. // sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) // systemdMountRequired is an extension of option to decide whether uses systemd mount. -func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string, sensitiveOptions []string, systemdMountRequired bool) error { - mountArgs, mountArgsLogStr := MakeMountArgsSensitive(source, target, fstype, options, sensitiveOptions) +func (mounter *Mounter) doMount(mounterPath string, mountCmd string, source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string, systemdMountRequired bool) error { + mountArgs, mountArgsLogStr := MakeMountArgsSensitiveWithMountFlags(source, target, fstype, options, sensitiveOptions, mountFlags) if len(mounterPath) > 0 { mountArgs = append([]string{mountCmd}, mountArgs...) mountArgsLogStr = mountCmd + " " + mountArgsLogStr @@ -217,10 +222,22 @@ func MakeMountArgs(source, target, fstype string, options []string) (mountArgs [ // MakeMountArgsSensitive makes the arguments to the mount(8) command. // sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) func MakeMountArgsSensitive(source, target, fstype string, options []string, sensitiveOptions []string) (mountArgs []string, mountArgsLogStr string) { + return MakeMountArgsSensitiveWithMountFlags(source, target, fstype, options, sensitiveOptions, nil /* mountFlags */) +} + +// MakeMountArgsSensitiveWithMountFlags makes the arguments to the mount(8) command. +// sensitiveOptions is an extension of options except they will not be logged (because they may contain sensitive material) +// mountFlags are additional mount flags that are not related with the fstype +// and mount options +func MakeMountArgsSensitiveWithMountFlags(source, target, fstype string, options []string, sensitiveOptions []string, mountFlags []string) (mountArgs []string, mountArgsLogStr string) { // Build mount command as follows: - // mount [-t $fstype] [-o $options] [$source] $target + // mount [$mountFlags] [-t $fstype] [-o $options] [$source] $target mountArgs = []string{} mountArgsLogStr = "" + + mountArgs = append(mountArgs, mountFlags...) + mountArgsLogStr += strings.Join(mountFlags, " ") + if len(fstype) > 0 { mountArgs = append(mountArgs, "-t", fstype) mountArgsLogStr += strings.Join(mountArgs, " ") diff --git a/vendor/k8s.io/mount-utils/mount_unsupported.go b/vendor/k8s.io/mount-utils/mount_unsupported.go index 0e8e683ae3..d2aac9a748 100644 --- a/vendor/k8s.io/mount-utils/mount_unsupported.go +++ b/vendor/k8s.io/mount-utils/mount_unsupported.go @@ -53,6 +53,11 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin return errUnsupported } +// MountSensitiveWithoutSystemdWithMountFlags always returns an error on unsupported platforms +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return errUnsupported +} + // Unmount always returns an error on unsupported platforms func (mounter *Mounter) Unmount(target string) error { return errUnsupported diff --git a/vendor/k8s.io/mount-utils/mount_windows.go b/vendor/k8s.io/mount-utils/mount_windows.go index 29d3bbbd37..a893f52131 100644 --- a/vendor/k8s.io/mount-utils/mount_windows.go +++ b/vendor/k8s.io/mount-utils/mount_windows.go @@ -64,6 +64,12 @@ func (mounter *Mounter) MountSensitiveWithoutSystemd(source string, target strin return mounter.MountSensitive(source, target, fstype, options, sensitiveOptions /* sensitiveOptions */) } +// MountSensitiveWithoutSystemdWithMountFlags is the same as MountSensitiveWithoutSystemd with additional mount flags +// Windows not supported systemd mount, this function degrades to MountSensitive(). +func (mounter *Mounter) MountSensitiveWithoutSystemdWithMountFlags(source string, target string, fstype string, options []string, sensitiveOptions []string, mountFlags []string) error { + return mounter.MountSensitive(source, target, fstype, options, sensitiveOptions /* sensitiveOptions */) +} + // MountSensitive is the same as Mount() but this method allows // sensitiveOptions to be passed in a separate parameter from the normal // mount options and ensures the sensitiveOptions are never logged. This diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE b/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS b/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go new file mode 100644 index 0000000000..4340b6e748 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/ip.go @@ -0,0 +1,236 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// IP address manipulations +// +// IPv4 addresses are 4 bytes; IPv6 addresses are 16 bytes. +// An IPv4 address can be converted to an IPv6 address by +// adding a canonical prefix (10 zeros, 2 0xFFs). +// This library accepts either size of byte slice but always +// returns 16-byte addresses. + +package net + +/////////////////////////////////////////////////////////////////////////////// +// NOTE: This file was forked because we need to maintain backwards-compatible +// IP parsing logic, which was changed in a correct but incompatible way in +// go-1.17. +// +// See https://issue.k8s.io/100895 +/////////////////////////////////////////////////////////////////////////////// + +import ( + stdnet "net" +) + +// +// Lean on the standard net lib as much as possible. +// + +type IP = stdnet.IP +type IPNet = stdnet.IPNet +type ParseError = stdnet.ParseError + +const IPv4len = stdnet.IPv4len +const IPv6len = stdnet.IPv6len + +var CIDRMask = stdnet.CIDRMask +var IPv4 = stdnet.IPv4 + +// Parse IPv4 address (d.d.d.d). +func parseIPv4(s string) IP { + var p [IPv4len]byte + for i := 0; i < IPv4len; i++ { + if len(s) == 0 { + // Missing octets. + return nil + } + if i > 0 { + if s[0] != '.' { + return nil + } + s = s[1:] + } + n, c, ok := dtoi(s) + if !ok || n > 0xFF { + return nil + } + // + // NOTE: This correct check was added for go-1.17, but is a + // backwards-incompatible change for kubernetes users, who might have + // stored data which uses these leading zeroes already. + // + // See https://issue.k8s.io/100895 + // + //if c > 1 && s[0] == '0' { + // // Reject non-zero components with leading zeroes. + // return nil + //} + s = s[c:] + p[i] = byte(n) + } + if len(s) != 0 { + return nil + } + return IPv4(p[0], p[1], p[2], p[3]) +} + +// parseIPv6 parses s as a literal IPv6 address described in RFC 4291 +// and RFC 5952. +func parseIPv6(s string) (ip IP) { + ip = make(IP, IPv6len) + ellipsis := -1 // position of ellipsis in ip + + // Might have leading ellipsis + if len(s) >= 2 && s[0] == ':' && s[1] == ':' { + ellipsis = 0 + s = s[2:] + // Might be only ellipsis + if len(s) == 0 { + return ip + } + } + + // Loop, parsing hex numbers followed by colon. + i := 0 + for i < IPv6len { + // Hex number. + n, c, ok := xtoi(s) + if !ok || n > 0xFFFF { + return nil + } + + // If followed by dot, might be in trailing IPv4. + if c < len(s) && s[c] == '.' { + if ellipsis < 0 && i != IPv6len-IPv4len { + // Not the right place. + return nil + } + if i+IPv4len > IPv6len { + // Not enough room. + return nil + } + ip4 := parseIPv4(s) + if ip4 == nil { + return nil + } + ip[i] = ip4[12] + ip[i+1] = ip4[13] + ip[i+2] = ip4[14] + ip[i+3] = ip4[15] + s = "" + i += IPv4len + break + } + + // Save this 16-bit chunk. + ip[i] = byte(n >> 8) + ip[i+1] = byte(n) + i += 2 + + // Stop at end of string. + s = s[c:] + if len(s) == 0 { + break + } + + // Otherwise must be followed by colon and more. + if s[0] != ':' || len(s) == 1 { + return nil + } + s = s[1:] + + // Look for ellipsis. + if s[0] == ':' { + if ellipsis >= 0 { // already have one + return nil + } + ellipsis = i + s = s[1:] + if len(s) == 0 { // can be at end + break + } + } + } + + // Must have used entire string. + if len(s) != 0 { + return nil + } + + // If didn't parse enough, expand ellipsis. + if i < IPv6len { + if ellipsis < 0 { + return nil + } + n := IPv6len - i + for j := i - 1; j >= ellipsis; j-- { + ip[j+n] = ip[j] + } + for j := ellipsis + n - 1; j >= ellipsis; j-- { + ip[j] = 0 + } + } else if ellipsis >= 0 { + // Ellipsis must represent at least one 0 group. + return nil + } + return ip +} + +// ParseIP parses s as an IP address, returning the result. +// The string s can be in IPv4 dotted decimal ("192.0.2.1"), IPv6 +// ("2001:db8::68"), or IPv4-mapped IPv6 ("::ffff:192.0.2.1") form. +// If s is not a valid textual representation of an IP address, +// ParseIP returns nil. +func ParseIP(s string) IP { + for i := 0; i < len(s); i++ { + switch s[i] { + case '.': + return parseIPv4(s) + case ':': + return parseIPv6(s) + } + } + return nil +} + +// ParseCIDR parses s as a CIDR notation IP address and prefix length, +// like "192.0.2.0/24" or "2001:db8::/32", as defined in +// RFC 4632 and RFC 4291. +// +// It returns the IP address and the network implied by the IP and +// prefix length. +// For example, ParseCIDR("192.0.2.1/24") returns the IP address +// 192.0.2.1 and the network 192.0.2.0/24. +func ParseCIDR(s string) (IP, *IPNet, error) { + i := indexByteString(s, '/') + if i < 0 { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + addr, mask := s[:i], s[i+1:] + iplen := IPv4len + ip := parseIPv4(addr) + if ip == nil { + iplen = IPv6len + ip = parseIPv6(addr) + } + n, i, ok := dtoi(mask) + if ip == nil || !ok || i != len(mask) || n < 0 || n > 8*iplen { + return nil, nil, &ParseError{Type: "CIDR address", Text: s} + } + m := CIDRMask(n, 8*iplen) + return ip, &IPNet{IP: ip.Mask(m), Mask: m}, nil +} + +// This is copied from go/src/internal/bytealg, which includes versions +// optimized for various platforms. Those optimizations are elided here so we +// don't have to maintain them. +func indexByteString(s string, c byte) int { + for i := 0; i < len(s); i++ { + if s[i] == c { + return i + } + } + return -1 +} diff --git a/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go new file mode 100644 index 0000000000..cc2fdcb958 --- /dev/null +++ b/vendor/k8s.io/utils/internal/third_party/forked/golang/net/parse.go @@ -0,0 +1,59 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simple file i/o and string manipulation, to avoid +// depending on strconv and bufio and strings. + +package net + +/////////////////////////////////////////////////////////////////////////////// +// NOTE: This file was forked because it is used by other code that needed to +// be forked, not because it is used on its own. +/////////////////////////////////////////////////////////////////////////////// + +// Bigger than we need, not too big to worry about overflow +const big = 0xFFFFFF + +// Decimal to integer. +// Returns number, characters consumed, success. +func dtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { + n = n*10 + int(s[i]-'0') + if n >= big { + return big, i, false + } + } + if i == 0 { + return 0, 0, false + } + return n, i, true +} + +// Hexadecimal to integer. +// Returns number, characters consumed, success. +func xtoi(s string) (n int, i int, ok bool) { + n = 0 + for i = 0; i < len(s); i++ { + if '0' <= s[i] && s[i] <= '9' { + n *= 16 + n += int(s[i] - '0') + } else if 'a' <= s[i] && s[i] <= 'f' { + n *= 16 + n += int(s[i]-'a') + 10 + } else if 'A' <= s[i] && s[i] <= 'F' { + n *= 16 + n += int(s[i]-'A') + 10 + } else { + break + } + if n >= big { + return 0, i, false + } + } + if i == 0 { + return 0, i, false + } + return n, i, true +} diff --git a/vendor/k8s.io/utils/net/ipnet.go b/vendor/k8s.io/utils/net/ipnet.go index c2e844bf5d..2f3ee37f0b 100644 --- a/vendor/k8s.io/utils/net/ipnet.go +++ b/vendor/k8s.io/utils/net/ipnet.go @@ -30,7 +30,7 @@ func ParseIPNets(specs ...string) (IPNetSet, error) { ipnetset := make(IPNetSet) for _, spec := range specs { spec = strings.TrimSpace(spec) - _, ipnet, err := net.ParseCIDR(spec) + _, ipnet, err := ParseCIDRSloppy(spec) if err != nil { return nil, err } @@ -128,7 +128,7 @@ type IPSet map[string]net.IP func ParseIPSet(items ...string) (IPSet, error) { ipset := make(IPSet) for _, item := range items { - ip := net.ParseIP(strings.TrimSpace(item)) + ip := ParseIPSloppy(strings.TrimSpace(item)) if ip == nil { return nil, fmt.Errorf("error parsing IP %q", item) } diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go index 077e447276..b7c08e2e00 100644 --- a/vendor/k8s.io/utils/net/net.go +++ b/vendor/k8s.io/utils/net/net.go @@ -30,7 +30,7 @@ import ( func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { cidrs := make([]*net.IPNet, 0, len(cidrsString)) for _, cidrString := range cidrsString { - _, cidr, err := net.ParseCIDR(cidrString) + _, cidr, err := ParseCIDRSloppy(cidrString) if err != nil { return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) } @@ -71,7 +71,7 @@ func IsDualStackIPs(ips []net.IP) (bool, error) { func IsDualStackIPStrings(ips []string) (bool, error) { parsedIPs := make([]net.IP, 0, len(ips)) for _, ip := range ips { - parsedIP := net.ParseIP(ip) + parsedIP := ParseIPSloppy(ip) parsedIPs = append(parsedIPs, parsedIP) } return IsDualStackIPs(parsedIPs) @@ -120,14 +120,14 @@ func IsIPv6(netIP net.IP) bool { // IsIPv6String returns if ip is IPv6. func IsIPv6String(ip string) bool { - netIP := net.ParseIP(ip) + netIP := ParseIPSloppy(ip) return IsIPv6(netIP) } // IsIPv6CIDRString returns if cidr is IPv6. // This assumes cidr is a valid CIDR. func IsIPv6CIDRString(cidr string) bool { - ip, _, _ := net.ParseCIDR(cidr) + ip, _, _ := ParseCIDRSloppy(cidr) return IsIPv6(ip) } @@ -144,7 +144,7 @@ func IsIPv4(netIP net.IP) bool { // IsIPv4String returns if ip is IPv4. func IsIPv4String(ip string) bool { - netIP := net.ParseIP(ip) + netIP := ParseIPSloppy(ip) return IsIPv4(netIP) } @@ -157,7 +157,7 @@ func IsIPv4CIDR(cidr *net.IPNet) bool { // IsIPv4CIDRString returns if cidr is IPv4. // This assumes cidr is a valid CIDR. func IsIPv4CIDRString(cidr string) bool { - ip, _, _ := net.ParseCIDR(cidr) + ip, _, _ := ParseCIDRSloppy(cidr) return IsIPv4(ip) } diff --git a/vendor/k8s.io/utils/net/parse.go b/vendor/k8s.io/utils/net/parse.go new file mode 100644 index 0000000000..400d364d89 --- /dev/null +++ b/vendor/k8s.io/utils/net/parse.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + forkednet "k8s.io/utils/internal/third_party/forked/golang/net" +) + +// ParseIPSloppy is identical to Go's standard net.ParseIP, except that it allows +// leading '0' characters on numbers. Go used to allow this and then changed +// the behavior in 1.17. We're choosing to keep it for compat with potential +// stored values. +var ParseIPSloppy = forkednet.ParseIP + +// ParseCIDRSloppy is identical to Go's standard net.ParseCIDR, except that it allows +// leading '0' characters on numbers. Go used to allow this and then changed +// the behavior in 1.17. We're choosing to keep it for compat with potential +// stored values. +var ParseCIDRSloppy = forkednet.ParseCIDR diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go index b4ff128e0d..7ac04f0dc9 100644 --- a/vendor/k8s.io/utils/net/port.go +++ b/vendor/k8s.io/utils/net/port.go @@ -71,7 +71,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco return nil, fmt.Errorf("Invalid IP family %s", ipFamily) } if ip != "" { - parsedIP := net.ParseIP(ip) + parsedIP := ParseIPSloppy(ip) if parsedIP == nil { return nil, fmt.Errorf("invalid ip address %s", ip) } diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 1da6f6664a..f5802d2e81 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -19,6 +19,7 @@ package pointer import ( "fmt" "reflect" + "time" ) // AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, @@ -46,6 +47,24 @@ func AllPtrFieldsNil(obj interface{}) bool { return true } +// Int returns a pointer to an int +func Int(i int) *int { + return &i +} + +var IntPtr = Int // for back-compat + +// IntDeref dereferences the int ptr and returns it if not nil, or else +// returns def. +func IntDeref(ptr *int, def int) int { + if ptr != nil { + return *ptr + } + return def +} + +var IntPtrDerefOr = IntDeref // for back-compat + // Int32 returns a pointer to an int32. func Int32(i int32) *int32 { return &i @@ -166,7 +185,7 @@ func StringEqual(a, b *string) bool { return *a == *b } -// Float32 returns a pointer to the a float32. +// Float32 returns a pointer to a float32. func Float32(i float32) *float32 { return &i } @@ -196,7 +215,7 @@ func Float32Equal(a, b *float32) bool { return *a == *b } -// Float64 returns a pointer to the a float64. +// Float64 returns a pointer to a float64. func Float64(i float64) *float64 { return &i } @@ -225,3 +244,29 @@ func Float64Equal(a, b *float64) bool { } return *a == *b } + +// Duration returns a pointer to a time.Duration. +func Duration(d time.Duration) *time.Duration { + return &d +} + +// DurationDeref dereferences the time.Duration ptr and returns it if not nil, or else +// returns def. +func DurationDeref(ptr *time.Duration, def time.Duration) time.Duration { + if ptr != nil { + return *ptr + } + return def +} + +// DurationEqual returns true if both arguments are nil or both arguments +// dereference to the same value. +func DurationEqual(a, b *time.Duration) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 16ce1e126b..a72da997b5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -481,7 +481,7 @@ github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/scheme github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1 github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1beta1 -# github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 +# github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 ## explicit github.com/kubernetes-incubator/external-storage/snapshot/pkg/apis/crd/v1 github.com/kubernetes-incubator/external-storage/snapshot/pkg/client @@ -691,20 +691,25 @@ github.com/pierrec/lz4/internal/xxh32 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib -# github.com/portworx/kdmp v0.4.1-0.20220710173715-5d42efc7d149 +# github.com/portworx/kdmp v0.4.1-0.20230523125352-aae43abbbd80 ## explicit github.com/portworx/kdmp/pkg/apis/kdmp github.com/portworx/kdmp/pkg/apis/kdmp/v1alpha1 github.com/portworx/kdmp/pkg/client/clientset/versioned github.com/portworx/kdmp/pkg/client/clientset/versioned/scheme github.com/portworx/kdmp/pkg/client/clientset/versioned/typed/kdmp/v1alpha1 +github.com/portworx/kdmp/pkg/controllers github.com/portworx/kdmp/pkg/controllers/dataexport +github.com/portworx/kdmp/pkg/controllers/resourceexport github.com/portworx/kdmp/pkg/drivers github.com/portworx/kdmp/pkg/drivers/driversinstance github.com/portworx/kdmp/pkg/drivers/kopiabackup github.com/portworx/kdmp/pkg/drivers/kopiadelete github.com/portworx/kdmp/pkg/drivers/kopiamaintenance github.com/portworx/kdmp/pkg/drivers/kopiarestore +github.com/portworx/kdmp/pkg/drivers/nfsbackup +github.com/portworx/kdmp/pkg/drivers/nfsdelete +github.com/portworx/kdmp/pkg/drivers/nfsrestore github.com/portworx/kdmp/pkg/drivers/resticbackup github.com/portworx/kdmp/pkg/drivers/resticrestore github.com/portworx/kdmp/pkg/drivers/rsync @@ -717,7 +722,7 @@ github.com/portworx/kdmp/pkg/version github.com/portworx/kvdb github.com/portworx/kvdb/common github.com/portworx/kvdb/mem -# github.com/portworx/px-object-controller v0.0.0-20220727220448-306ddfd28652 +# github.com/portworx/px-object-controller v0.0.0-20220804234424-40d3b8a84987 ## explicit github.com/portworx/px-object-controller/client/apis/objectservice/v1alpha1 github.com/portworx/px-object-controller/client/clientset/versioned @@ -731,7 +736,7 @@ github.com/portworx/px-object-controller/client/listers/objectservice/v1alpha1 github.com/portworx/px-object-controller/pkg/client github.com/portworx/px-object-controller/pkg/controller github.com/portworx/px-object-controller/pkg/utils -# github.com/portworx/sched-ops v1.20.4-rc1.0.20220401024625-dbc61a336f65 => github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca +# github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 ## explicit github.com/portworx/sched-ops/k8s/admissionregistration github.com/portworx/sched-ops/k8s/apiextensions @@ -792,6 +797,7 @@ github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/mo # github.com/prometheus/client_golang v1.11.0 ## explicit github.com/prometheus/client_golang/prometheus +github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil @@ -989,7 +995,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba +# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/time/rate # golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a golang.org/x/tools/cmd/goimports @@ -1051,7 +1057,7 @@ google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.43.0 +# google.golang.org/grpc v1.48.0 ## explicit google.golang.org/grpc google.golang.org/grpc/attributes @@ -1063,6 +1069,7 @@ google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials @@ -1081,6 +1088,7 @@ google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/internal google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch google.golang.org/grpc/internal/balancerload google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer @@ -1093,6 +1101,7 @@ google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough @@ -1112,8 +1121,6 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/grpc/examples v0.0.0-20220715232852-f601dfac73c9 -## explicit # google.golang.org/protobuf v1.28.0 google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo google.golang.org/protobuf/compiler/protogen @@ -1166,9 +1173,9 @@ gopkg.in/square/go-jose.v2/jwt # gopkg.in/yaml.v2 v2.4.0 ## explicit gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +# gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.0.0-00010101000000-000000000000 => helm.sh/helm/v3 v3.6.0 +# helm.sh/helm/v3 v3.0.0-00010101000000-000000000000 => helm.sh/helm/v3 v3.6.1 helm.sh/helm/v3/internal/experimental/registry helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/ignore @@ -1206,7 +1213,7 @@ helm.sh/helm/v3/pkg/storage helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/strvals helm.sh/helm/v3/pkg/time -# k8s.io/api v0.24.0 => k8s.io/api v0.21.4 +# k8s.io/api v0.24.0 => k8s.io/api v0.21.5 ## explicit k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1254,7 +1261,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.21.4 => k8s.io/apiextensions-apiserver v0.21.4 +# k8s.io/apiextensions-apiserver v0.21.5 => k8s.io/apiextensions-apiserver v0.21.5 ## explicit k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -1266,7 +1273,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake -# k8s.io/apimachinery v0.24.3 => k8s.io/apimachinery v0.21.4 +# k8s.io/apimachinery v0.24.3 => k8s.io/apimachinery v0.21.5 ## explicit k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1326,19 +1333,19 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.21.4 => k8s.io/apiserver v0.21.4 +# k8s.io/apiserver v0.21.5 => k8s.io/apiserver v0.21.5 ## explicit k8s.io/apiserver/pkg/authentication/serviceaccount k8s.io/apiserver/pkg/authentication/user k8s.io/apiserver/pkg/endpoints/deprecation k8s.io/apiserver/pkg/features k8s.io/apiserver/pkg/util/feature -# k8s.io/cli-runtime v0.21.4 => k8s.io/cli-runtime v0.21.4 +# k8s.io/cli-runtime v0.21.5 => k8s.io/cli-runtime v0.21.5 ## explicit k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.21.4 +# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.21.5 ## explicit k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -1624,9 +1631,9 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.21.4 => k8s.io/cloud-provider v0.21.4 +# k8s.io/cloud-provider v0.21.5 => k8s.io/cloud-provider v0.21.5 k8s.io/cloud-provider -# k8s.io/code-generator v0.21.4 => k8s.io/code-generator v0.21.4 +# k8s.io/code-generator v0.22.1 => k8s.io/code-generator v0.21.5 ## explicit k8s.io/code-generator k8s.io/code-generator/cmd/client-gen @@ -1661,14 +1668,14 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.22.0 => k8s.io/component-base v0.21.4 +# k8s.io/component-base v0.22.0 => k8s.io/component-base v0.21.5 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 k8s.io/component-base/featuregate k8s.io/component-base/metrics k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/version -# k8s.io/component-helpers v0.21.4 => k8s.io/component-helpers v0.21.4 +# k8s.io/component-helpers v0.24.0 => k8s.io/component-helpers v0.24.0 ## explicit k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity @@ -1686,7 +1693,7 @@ k8s.io/gengo/parser k8s.io/gengo/types # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/klog/v2 v2.60.1 => k8s.io/klog/v2 v2.4.0 +# k8s.io/klog/v2 v2.60.1 => k8s.io/klog/v2 v2.8.0 k8s.io/klog/v2 # k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 k8s.io/kube-openapi/cmd/openapi-gen/args @@ -1697,10 +1704,10 @@ k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/testing k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/sets -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.21.4 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.21.5 ## explicit k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.21.4 => k8s.io/kubectl v0.21.4 +# k8s.io/kubectl v0.21.5 => k8s.io/kubectl v0.21.5 ## explicit k8s.io/kubectl/pkg/cmd/testing k8s.io/kubectl/pkg/cmd/util @@ -1712,7 +1719,7 @@ k8s.io/kubectl/pkg/util/openapi/validation k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubernetes v1.21.4 => k8s.io/kubernetes v1.21.4 +# k8s.io/kubernetes v1.21.5 => k8s.io/kubernetes v1.21.5 ## explicit k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/v1/pod @@ -1750,12 +1757,13 @@ k8s.io/kubernetes/pkg/volume/util/recyclerclient k8s.io/kubernetes/pkg/volume/util/subpath k8s.io/kubernetes/pkg/volume/util/types k8s.io/kubernetes/pkg/volume/util/volumepathhandler -# k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.21.4 +# k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.21.5 k8s.io/mount-utils -# k8s.io/utils v0.0.0-20210527160623-6fdb442a123b +# k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 k8s.io/utils/buffer k8s.io/utils/exec k8s.io/utils/integer +k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/io k8s.io/utils/keymutex k8s.io/utils/net @@ -1767,7 +1775,7 @@ k8s.io/utils/trace # sigs.k8s.io/cluster-api v0.2.11 sigs.k8s.io/cluster-api/errors sigs.k8s.io/cluster-api/pkg/apis/deprecated/v1alpha1 -# sigs.k8s.io/controller-runtime v0.9.0 => sigs.k8s.io/controller-runtime v0.9.0 +# sigs.k8s.io/controller-runtime v0.9.7 => sigs.k8s.io/controller-runtime v0.9.6 ## explicit sigs.k8s.io/controller-runtime/pkg/cache sigs.k8s.io/controller-runtime/pkg/cache/internal @@ -1900,38 +1908,39 @@ sigs.k8s.io/yaml # github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.1 # github.com/heptio/velero => github.com/heptio/velero v1.0.0 # github.com/kubernetes-csi/external-snapshotter/client/v4 => github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0 -# github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc7 +# github.com/kubernetes-incubator/external-storage => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 +# github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc10 => github.com/libopenstorage/external-storage v0.20.4-openstorage-rc10 # github.com/libopenstorage/autopilot-api => github.com/libopenstorage/autopilot-api v0.6.1-0.20210301232050-ca2633c6e114 # github.com/libopenstorage/openstorage => github.com/libopenstorage/openstorage v1.0.1-0.20220707215604-afbea03c04c5 -# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20220714042759-8f183fe386ca +# github.com/portworx/sched-ops => github.com/portworx/sched-ops v1.20.4-rc1.0.20221102055014-b3a55a3df5c8 # github.com/portworx/torpedo => github.com/portworx/torpedo v0.0.0-20220714042817-25f6ab6dc5d1 # gopkg.in/fsnotify.v1 v1.4.7 => github.com/fsnotify/fsnotify v1.4.7 -# helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.0 -# k8s.io/api => k8s.io/api v0.21.4 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.4 -# k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 -# k8s.io/apiserver => k8s.io/apiserver v0.21.4 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.4 -# k8s.io/client-go => k8s.io/client-go v0.21.4 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.4 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.4 -# k8s.io/code-generator => k8s.io/code-generator v0.21.4 -# k8s.io/component-base => k8s.io/component-base v0.21.4 -# k8s.io/component-helpers => k8s.io/component-helpers v0.21.4 -# k8s.io/controller-manager => k8s.io/controller-manager v0.21.4 -# k8s.io/cri-api => k8s.io/cri-api v0.21.4 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.4 -# k8s.io/klog/v2 => k8s.io/klog/v2 v2.4.0 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.4 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.4 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.4 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.4 -# k8s.io/kubectl => k8s.io/kubectl v0.21.4 -# k8s.io/kubelet => k8s.io/kubelet v0.21.4 -# k8s.io/kubernetes => k8s.io/kubernetes v1.21.4 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.4 -# k8s.io/metrics => k8s.io/metrics v0.21.4 -# k8s.io/mount-utils => k8s.io/mount-utils v0.21.4 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.4 -# sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.0 +# helm.sh/helm/v3 => helm.sh/helm/v3 v3.6.1 +# k8s.io/api => k8s.io/api v0.21.5 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.5 +# k8s.io/apimachinery => k8s.io/apimachinery v0.21.5 +# k8s.io/apiserver => k8s.io/apiserver v0.21.5 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.21.5 +# k8s.io/client-go => k8s.io/client-go v0.21.5 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.21.5 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.21.5 +# k8s.io/code-generator => k8s.io/code-generator v0.21.5 +# k8s.io/component-base => k8s.io/component-base v0.21.5 +# k8s.io/component-helpers => k8s.io/component-helpers v0.24.0 +# k8s.io/controller-manager => k8s.io/controller-manager v0.21.5 +# k8s.io/cri-api => k8s.io/cri-api v0.21.5 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.21.5 +# k8s.io/klog/v2 => k8s.io/klog/v2 v2.8.0 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.21.5 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.21.5 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.21.5 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.21.5 +# k8s.io/kubectl => k8s.io/kubectl v0.21.5 +# k8s.io/kubelet => k8s.io/kubelet v0.21.5 +# k8s.io/kubernetes => k8s.io/kubernetes v1.21.5 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.21.5 +# k8s.io/metrics => k8s.io/metrics v0.21.5 +# k8s.io/mount-utils => k8s.io/mount-utils v0.21.5 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.21.5 +# sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.6 # sigs.k8s.io/sig-storage-lib-external-provisioner/v6 => sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0 diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go index dee523fe23..6862fd62bd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -69,7 +69,7 @@ type Informers interface { client.FieldIndexer } -// Informer - informer allows you interact with the underlying informer +// Informer - informer allows you interact with the underlying informer. type Informer interface { // AddEventHandler adds an event handler to the shared informer using the shared informer's resync // period. Events to a single handler are delivered sequentially, but there is no coordination @@ -82,14 +82,14 @@ type Informer interface { // AddIndexers adds more indexers to this store. If you call this after you already have data // in the store, the results are undefined. AddIndexers(indexers toolscache.Indexers) error - //HasSynced return true if the informers underlying store has synced + // HasSynced return true if the informers underlying store has synced. HasSynced() bool } -// SelectorsByObject associate a client.Object's GVK to a field/label selector +// SelectorsByObject associate a client.Object's GVK to a field/label selector. type SelectorsByObject map[client.Object]internal.Selector -// Options are the optional arguments for creating a new InformersMap object +// Options are the optional arguments for creating a new InformersMap object. type Options struct { // Scheme is the scheme to use for mapping objects to GroupVersionKinds Scheme *runtime.Scheme diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go index 8ec3b921d9..90647c8e33 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -50,7 +50,7 @@ type informerCache struct { *internal.InformersMap } -// Get implements Reader +// Get implements Reader. func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object) error { gvk, err := apiutil.GVKForObject(out, ip.Scheme) if err != nil { @@ -68,9 +68,8 @@ func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out clie return cache.Reader.Get(ctx, key, out) } -// List implements Reader +// List implements Reader. func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { - gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) if err != nil { return err @@ -130,7 +129,7 @@ func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schem return &gvk, cacheTypeObj, nil } -// GetInformerForKind returns the informer for the GroupVersionKind +// GetInformerForKind returns the informer for the GroupVersionKind. func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { // Map the gvk to an object obj, err := ip.Scheme.New(gvk) @@ -145,7 +144,7 @@ func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.Grou return i.Informer, err } -// GetInformer returns the informer for the obj +// GetInformer returns the informer for the obj. func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { gvk, err := apiutil.GVKForObject(obj, ip.Scheme) if err != nil { @@ -160,7 +159,7 @@ func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (In } // NeedLeaderElection implements the LeaderElectionRunnable interface -// to indicate that this can be started without requiring the leader lock +// to indicate that this can be started without requiring the leader lock. func (ip *informerCache) NeedLeaderElection() bool { return false } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go index bd546b934a..5a495693ed 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -21,7 +21,7 @@ import ( "fmt" "reflect" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -33,10 +33,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -// CacheReader is a client.Reader +// CacheReader is a client.Reader. var _ client.Reader = &CacheReader{} -// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type +// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type. type CacheReader struct { // indexer is the underlying indexer wrapped by this cache. indexer cache.Indexer @@ -48,7 +48,7 @@ type CacheReader struct { scopeName apimeta.RESTScopeName } -// Get checks the indexer for the object and writes a copy of it if found +// Get checks the indexer for the object and writes a copy of it if found. func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object) error { if c.scopeName == apimeta.RESTScopeNameRoot { key.Namespace = "" @@ -64,7 +64,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob // Not found, return an error if !exists { // Resource gets transformed into Kind in the error anyway, so this is fine - return errors.NewNotFound(schema.GroupResource{ + return apierrors.NewNotFound(schema.GroupResource{ Group: c.groupVersionKind.Group, Resource: c.groupVersionKind.Kind, }, key.Name) @@ -93,7 +93,7 @@ func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Ob return nil } -// List lists items out of the indexer and writes them to out +// List lists items out of the indexer and writes them to out. func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { var objs []interface{} var err error @@ -101,7 +101,8 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) - if listOpts.FieldSelector != nil { + switch { + case listOpts.FieldSelector != nil: // TODO(directxman12): support more complicated field selectors by // combining multiple indices, GetIndexers, etc field, val, requiresExact := requiresExactMatch(listOpts.FieldSelector) @@ -112,9 +113,9 @@ func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...cli // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" // namespace. objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) - } else if listOpts.Namespace != "" { + case listOpts.Namespace != "": objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) - } else { + default: objs = c.indexer.List() } if err != nil { @@ -186,7 +187,7 @@ func FieldIndexName(field string) string { return "field:" + field } -// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces +// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. const allNamespacesNamespace = "__all_namespaces" // KeyToNamespacedKey prefixes the given index key with a namespace diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go index 2242d9b674..841f1657eb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -52,7 +52,6 @@ func NewInformersMap(config *rest.Config, namespace string, selectors SelectorsByGVK, ) *InformersMap { - return &InformersMap{ structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors), unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors), diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go index 5c9bd0b0a0..007a28e727 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -37,7 +37,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientListWatcherFunc knows how to create a ListWatcher +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// clientListWatcherFunc knows how to create a ListWatcher. type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) // newSpecificInformersMap returns a new specificInformersMap (like @@ -65,7 +69,7 @@ func newSpecificInformersMap(config *rest.Config, return ip } -// MapEntry contains the cached data for an Informer +// MapEntry contains the cached data for an Informer. type MapEntry struct { // Informer is the cached informer Informer cache.SharedIndexInformer @@ -270,8 +274,9 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { ip.selectors[gvk].ApplyToList(&opts) res := listObj.DeepCopyObject() - isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - err := client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) return res, err }, // Setup the watch function @@ -279,8 +284,9 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true - isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot - return client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) }, }, nil } @@ -309,8 +315,9 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { ip.selectors[gvk].ApplyToList(&opts) - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) } return dynamicClient.Resource(mapping.Resource).List(ctx, opts) }, @@ -319,8 +326,9 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) } return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) }, @@ -354,8 +362,9 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { ip.selectors[gvk].ApplyToList(&opts) - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return client.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) } return client.Resource(mapping.Resource).List(ctx, opts) }, @@ -364,8 +373,9 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM ip.selectors[gvk].ApplyToList(&opts) // Watch needs to be set to true separately opts.Watch = true - if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { - return client.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk]) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) } return client.Resource(mapping.Resource).Watch(ctx, opts) }, @@ -378,7 +388,27 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM func resyncPeriod(resync time.Duration) func() time.Duration { return func() time.Duration { // the factor will fall into [0.9, 1.1) - factor := rand.Float64()/5.0 + 0.9 + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec return time.Duration(float64(resync.Nanoseconds()) * factor) } } + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go index 0e872eaf02..cd9c580008 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -23,16 +23,16 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// SelectorsByGVK associate a GroupVersionKind to a field/label selector +// SelectorsByGVK associate a GroupVersionKind to a field/label selector. type SelectorsByGVK map[schema.GroupVersionKind]Selector -// Selector specify the label/field selector to fill in ListOptions +// Selector specify the label/field selector to fill in ListOptions. type Selector struct { Label labels.Selector Field fields.Selector } -// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed. func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { if s.Label != nil { listOpts.LabelSelector = s.Label.String() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go index f3520bf8d7..dc29651b01 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -22,7 +22,6 @@ import ( "time" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,10 +31,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" ) -// NewCacheFunc - Function for creating a new cache from the options and a rest config +// NewCacheFunc - Function for creating a new cache from the options and a rest config. type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) -// a new global namespaced cache to handle cluster scoped resources +// a new global namespaced cache to handle cluster scoped resources. const globalCache = "_cluster-scope" // MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. @@ -78,13 +77,13 @@ func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { type multiNamespaceCache struct { namespaceToCache map[string]Cache Scheme *runtime.Scheme - RESTMapper meta.RESTMapper + RESTMapper apimeta.RESTMapper clusterCache Cache } var _ Cache = &multiNamespaceCache{} -// Methods for multiNamespaceCache to conform to the Informers interface +// Methods for multiNamespaceCache to conform to the Informers interface. func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { informers := map[string]Informer{} @@ -186,7 +185,7 @@ func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) if err != nil { - return nil + return nil //nolint:nilerr } if !isNamespaced { @@ -242,7 +241,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, return cache.List(ctx, list, opts...) } - listAccessor, err := meta.ListAccessor(list) + listAccessor, err := apimeta.ListAccessor(list) if err != nil { return err } @@ -265,7 +264,7 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, if err != nil { return err } - accessor, err := meta.ListAccessor(listObj) + accessor, err := apimeta.ListAccessor(listObj) if err != nil { return fmt.Errorf("object: %T must be a list type", list) } @@ -289,28 +288,28 @@ func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, return apimeta.SetList(list, allItems) } -// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces. type multiNamespaceInformer struct { namespaceToInformer map[string]Informer } var _ Informer = &multiNamespaceInformer{} -// AddEventHandler adds the handler to each namespaced informer +// AddEventHandler adds the handler to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) { for _, informer := range i.namespaceToInformer { informer.AddEventHandler(handler) } } -// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) { for _, informer := range i.namespaceToInformer { informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) } } -// AddIndexers adds the indexer for each namespaced informer +// AddIndexers adds the indexer for each namespaced informer. func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { for _, informer := range i.namespaceToInformer { err := informer.AddIndexers(indexers) @@ -321,7 +320,7 @@ func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error return nil } -// HasSynced checks if each namespaced informer has synced +// HasSynced checks if each namespaced informer has synced. func (i *multiNamespaceInformer) HasSynced() bool { for _, informer := range i.namespaceToInformer { if ok := informer.HasSynced(); !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go index bb66a6dfdd..2611a20c64 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -80,7 +80,7 @@ func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersi // (unstructured, partial, etc) // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds - _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartial := obj.(*metav1.PartialObjectMetadata) //nolint:ifshort _, isPartialList := obj.(*metav1.PartialObjectMetadataList) if isPartial || isPartialList { // we require that the GVK be populated in order to recognize the object @@ -134,7 +134,7 @@ func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ return serializer } -//createRestConfig copies the base config and updates needed fields for a new rest config +// createRestConfig copies the base config and updates needed fields for a new rest config. func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { gv := gvk.GroupVersion() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go index 5e9a7b5f53..56a00371ff 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -41,7 +41,7 @@ type dynamicRESTMapper struct { initOnce sync.Once } -// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. type DynamicRESTMapperOption func(*dynamicRESTMapper) error // WithLimiter sets the RESTMapper's underlying limiter to lim. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go index 3444ab52b4..bbe36c4673 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -49,7 +49,7 @@ type WarningHandlerOptions struct { AllowDuplicateLogs bool } -// Options are creation options for a Client +// Options are creation options for a Client. type Options struct { // Scheme, if provided, will be used to map go structs to GroupVersionKinds Scheme *runtime.Scheme @@ -178,7 +178,7 @@ func (c *client) RESTMapper() meta.RESTMapper { return c.mapper } -// Create implements client.Client +// Create implements client.Client. func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -190,7 +190,7 @@ func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) e } } -// Update implements client.Client +// Update implements client.Client. func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -203,7 +203,7 @@ func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) e } } -// Delete implements client.Client +// Delete implements client.Client. func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -215,7 +215,7 @@ func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) e } } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { switch obj.(type) { case *unstructured.Unstructured: @@ -227,7 +227,7 @@ func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllO } } -// Patch implements client.Client +// Patch implements client.Client. func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -240,7 +240,7 @@ func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...Pat } } -// Get implements client.Client +// Get implements client.Client. func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { switch obj.(type) { case *unstructured.Unstructured: @@ -254,7 +254,7 @@ func (c *client) Get(ctx context.Context, key ObjectKey, obj Object) error { } } -// List implements client.Client +// List implements client.Client. func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { switch x := obj.(type) { case *unstructured.UnstructuredList: @@ -288,20 +288,20 @@ func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) e } } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (c *client) Status() StatusWriter { return &statusWriter{client: c} } -// statusWriter is client.StatusWriter that writes status subresource +// statusWriter is client.StatusWriter that writes status subresource. type statusWriter struct { client *client } -// ensure statusWriter implements client.StatusWriter +// ensure statusWriter implements client.StatusWriter. var _ StatusWriter = &statusWriter{} -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { @@ -314,7 +314,7 @@ func (sw *statusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOp } } -// Patch implements client.Client +// Patch implements client.Client. func (sw *statusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { defer sw.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) switch obj.(type) { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go index b3493cb025..857a0b38a7 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// clientCache creates and caches rest clients and metadata for Kubernetes types +// clientCache creates and caches rest clients and metadata for Kubernetes types. type clientCache struct { // config is the rest.Config to talk to an apiserver config *rest.Config @@ -107,7 +107,7 @@ func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { return r, err } -// getObjMeta returns objMeta containing both type and object metadata and state +// getObjMeta returns objMeta containing both type and object metadata and state. func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { r, err := c.getResource(obj) if err != nil { @@ -130,17 +130,17 @@ type resourceMeta struct { mapping *meta.RESTMapping } -// isNamespaced returns true if the type is namespaced +// isNamespaced returns true if the type is namespaced. func (r *resourceMeta) isNamespaced() bool { return r.mapping.Scope.Name() != meta.RESTScopeNameRoot } -// resource returns the resource name of the type +// resource returns the resource name of the type. func (r *resourceMeta) resource() string { return r.mapping.Resource.Resource } -// objMeta stores type and object information about a Kubernetes type +// objMeta stores type and object information about a Kubernetes type. type objMeta struct { // resourceMeta contains type information for the object *resourceMeta diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go index 67e80e0551..ea25ea2530 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -46,47 +46,47 @@ func (c *dryRunClient) RESTMapper() meta.RESTMapper { return c.client.RESTMapper() } -// Create implements client.Client +// Create implements client.Client. func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { return c.client.Create(ctx, obj, append(opts, DryRunAll)...) } -// Update implements client.Client +// Update implements client.Client. func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return c.client.Update(ctx, obj, append(opts, DryRunAll)...) } -// Delete implements client.Client +// Delete implements client.Client. func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) } -// Patch implements client.Client +// Patch implements client.Client. func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } -// Get implements client.Client +// Get implements client.Client. func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object) error { return c.client.Get(ctx, key, obj) } -// List implements client.Client +// List implements client.Client. func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { return c.client.List(ctx, obj, opts...) } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (c *dryRunClient) Status() StatusWriter { return &dryRunStatusWriter{client: c.client.Status()} } -// ensure dryRunStatusWriter implements client.StatusWriter +// ensure dryRunStatusWriter implements client.StatusWriter. var _ StatusWriter = &dryRunStatusWriter{} // dryRunStatusWriter is client.StatusWriter that writes status subresource with dryRun mode @@ -95,12 +95,12 @@ type dryRunStatusWriter struct { client StatusWriter } -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (sw *dryRunStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) } -// Patch implements client.StatusWriter +// Patch implements client.StatusWriter. func (sw *dryRunStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go index 0dfea4d6c5..58c2ece15b 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -30,7 +30,7 @@ import ( // ObjectKey identifies a Kubernetes Object. type ObjectKey = types.NamespacedName -// ObjectKeyFromObject returns the ObjectKey given a runtime.Object +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. func ObjectKeyFromObject(obj Object) ObjectKey { return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go index c0fc72c5b7..59747463a4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -49,7 +49,7 @@ func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns s return mc.client.Resource(mapping.Resource).Namespace(ns), nil } -// Delete implements client.Client +// Delete implements client.Client. func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -67,7 +67,7 @@ func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...Delete return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -85,7 +85,7 @@ func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...D return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) } -// Patch implements client.Client +// Patch implements client.Client. func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -115,7 +115,7 @@ func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, op return nil } -// Get implements client.Client +// Get implements client.Client. func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) error { metadata, ok := obj.(*metav1.PartialObjectMetadata) if !ok { @@ -138,7 +138,7 @@ func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object) er return nil } -// List implements client.Client +// List implements client.Client. func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { metadata, ok := obj.(*metav1.PartialObjectMetadataList) if !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index cedcfb5961..d73cc5135a 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -86,7 +86,7 @@ func isNamespaced(c Client, obj runtime.Object) (bool, error) { scope := restmapping.Scope.Name() if scope == "" { - return false, errors.New("Scope cannot be identified. Empty scope returned") + return false, errors.New("scope cannot be identified, empty scope returned") } if scope != meta.RESTScopeNameRoot { @@ -95,7 +95,7 @@ func isNamespaced(c Client, obj runtime.Object) (bool, error) { return false, nil } -// Create implements clinet.Client +// Create implements clinet.Client. func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -104,7 +104,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -113,7 +113,7 @@ func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...Creat return n.client.Create(ctx, obj, opts...) } -// Update implements client.Client +// Update implements client.Client. func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -122,7 +122,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -131,7 +131,7 @@ func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...Updat return n.client.Update(ctx, obj, opts...) } -// Delete implements client.Client +// Delete implements client.Client. func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -140,7 +140,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -149,7 +149,7 @@ func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...Delet return n.client.Delete(ctx, obj, opts...) } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -162,7 +162,7 @@ func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ... return n.client.DeleteAllOf(ctx, obj, opts...) } -// Patch implements client.Client +// Patch implements client.Client. func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -171,7 +171,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o objectNamespace := obj.GetNamespace() if objectNamespace != n.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -180,7 +180,7 @@ func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, o return n.client.Patch(ctx, obj, patch, opts...) } -// Get implements client.Client +// Get implements client.Client. func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { isNamespaceScoped, err := isNamespaced(n.client, obj) if err != nil { @@ -188,14 +188,14 @@ func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object) e } if isNamespaceScoped { if key.Namespace != "" && key.Namespace != n.namespace { - return fmt.Errorf("Namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) + return fmt.Errorf("namespace %s provided for the object %s does not match the namesapce %s on the client", key.Namespace, obj.GetName(), n.namespace) } key.Namespace = n.namespace } return n.client.Get(ctx, key, obj) } -// List implements client.Client +// List implements client.Client. func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { if n.namespace != "" { opts = append(opts, InNamespace(n.namespace)) @@ -203,12 +203,12 @@ func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...Lis return n.client.List(ctx, obj, opts...) } -// Status implements client.StatusClient +// Status implements client.StatusClient. func (n *namespacedClient) Status() StatusWriter { return &namespacedClientStatusWriter{StatusClient: n.client.Status(), namespace: n.namespace, namespacedclient: n} } -// ensure namespacedClientStatusWriter implements client.StatusWriter +// ensure namespacedClientStatusWriter implements client.StatusWriter. var _ StatusWriter = &namespacedClientStatusWriter{} type namespacedClientStatusWriter struct { @@ -217,7 +217,7 @@ type namespacedClientStatusWriter struct { namespacedclient Client } -// Update implements client.StatusWriter +// Update implements client.StatusWriter. func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) if err != nil { @@ -226,7 +226,7 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, objectNamespace := obj.GetNamespace() if objectNamespace != nsw.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) } if isNamespaceScoped && objectNamespace == "" { @@ -235,7 +235,7 @@ func (nsw *namespacedClientStatusWriter) Update(ctx context.Context, obj Object, return nsw.StatusClient.Update(ctx, obj, opts...) } -// Patch implements client.StatusWriter +// Patch implements client.StatusWriter. func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { isNamespaceScoped, err := isNamespaced(nsw.namespacedclient, obj) if err != nil { @@ -244,7 +244,7 @@ func (nsw *namespacedClientStatusWriter) Patch(ctx context.Context, obj Object, objectNamespace := obj.GetNamespace() if objectNamespace != nsw.namespace && objectNamespace != "" { - return fmt.Errorf("Namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) } if isNamespaceScoped && objectNamespace == "" { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go index f253276466..aa2299eac0 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -158,7 +158,7 @@ func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { return o } -// ApplyToCreate implements CreateOption +// ApplyToCreate implements CreateOption. func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { if o.DryRun != nil { co.DryRun = o.DryRun @@ -239,7 +239,7 @@ func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { var _ DeleteOption = &DeleteOptions{} -// ApplyToDelete implements DeleteOption +// ApplyToDelete implements DeleteOption. func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { if o.GracePeriodSeconds != nil { do.GracePeriodSeconds = o.GracePeriodSeconds @@ -349,7 +349,7 @@ type ListOptions struct { var _ ListOption = &ListOptions{} -// ApplyToList implements ListOption for ListOptions +// ApplyToList implements ListOption for ListOptions. func (o *ListOptions) ApplyToList(lo *ListOptions) { if o.LabelSelector != nil { lo.LabelSelector = o.LabelSelector @@ -569,7 +569,7 @@ func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { var _ UpdateOption = &UpdateOptions{} -// ApplyToUpdate implements UpdateOption +// ApplyToUpdate implements UpdateOption. func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { if o.DryRun != nil { uo.DryRun = o.DryRun @@ -636,7 +636,7 @@ func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { var _ PatchOption = &PatchOptions{} -// ApplyToPatch implements PatchOptions +// ApplyToPatch implements PatchOptions. func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { if o.DryRun != nil { po.DryRun = o.DryRun @@ -688,7 +688,7 @@ func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOf var _ DeleteAllOfOption = &DeleteAllOfOptions{} -// ApplyToDeleteAllOf implements DeleteAllOfOption +// ApplyToDeleteAllOf implements DeleteAllOfOption. func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { o.ApplyToList(&do.ListOptions) o.ApplyToDelete(&do.DeleteOptions) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go index a1b32653ca..dde7b21f25 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -33,7 +33,7 @@ type typedClient struct { paramCodec runtime.ParameterCodec } -// Create implements client.Client +// Create implements client.Client. func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -51,7 +51,7 @@ func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOpti Into(obj) } -// Update implements client.Client +// Update implements client.Client. func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -70,7 +70,7 @@ func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOpti Into(obj) } -// Delete implements client.Client +// Delete implements client.Client. func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -89,7 +89,7 @@ func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOpti Error() } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -108,7 +108,7 @@ func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...Delet Error() } -// Patch implements client.Client +// Patch implements client.Client. func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { o, err := c.cache.getObjMeta(obj) if err != nil { @@ -131,7 +131,7 @@ func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts . Into(obj) } -// Get implements client.Client +// Get implements client.Client. func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error { r, err := c.cache.getResource(obj) if err != nil { @@ -143,7 +143,7 @@ func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object) error Name(key.Name).Do(ctx).Into(obj) } -// List implements client.Client +// List implements client.Client. func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { r, err := c.cache.getResource(obj) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go index f8fb3ccec1..dcf15be275 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -36,7 +36,7 @@ type unstructuredClient struct { paramCodec runtime.ParameterCodec } -// Create implements client.Client +// Create implements client.Client. func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -64,7 +64,7 @@ func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...Cr return result } -// Update implements client.Client +// Update implements client.Client. func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -93,7 +93,7 @@ func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...Up return result } -// Delete implements client.Client +// Delete implements client.Client. func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -116,7 +116,7 @@ func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...De Error() } -// DeleteAllOf implements client.Client +// DeleteAllOf implements client.Client. func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -139,7 +139,7 @@ func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts Error() } -// Patch implements client.Client +// Patch implements client.Client. func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { _, ok := obj.(*unstructured.Unstructured) if !ok { @@ -167,7 +167,7 @@ func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch Into(obj) } -// Get implements client.Client +// Get implements client.Client. func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object) error { u, ok := obj.(*unstructured.Unstructured) if !ok { @@ -193,7 +193,7 @@ func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object return result } -// List implements client.Client +// List implements client.Client. func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { u, ok := obj.(*unstructured.UnstructuredList) if !ok { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go index 76fa72ad76..dfd0fa9dd8 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cluster/cluster.go @@ -139,10 +139,10 @@ type Options struct { newRecorderProvider func(config *rest.Config, scheme *runtime.Scheme, logger logr.Logger, makeBroadcaster intrec.EventBroadcasterProducer) (*intrec.Provider, error) } -// Option can be used to manipulate Options +// Option can be used to manipulate Options. type Option func(*Options) -// New constructs a brand new cluster +// New constructs a brand new cluster. func New(config *rest.Config, opts ...Option) (Cluster, error) { if config == nil { return nil, errors.New("must specify Config") @@ -204,7 +204,7 @@ func New(config *rest.Config, opts ...Option) (Cluster, error) { }, nil } -// setOptionsDefaults set default values for Options fields +// setOptionsDefaults set default values for Options fields. func setOptionsDefaults(options Options) Options { // Use the Kubernetes client-go scheme if none is specified if options.Scheme == nil { @@ -252,10 +252,10 @@ func setOptionsDefaults(options Options) Options { return options } -// NewClientFunc allows a user to define how to create a client +// NewClientFunc allows a user to define how to create a client. type NewClientFunc func(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) -// DefaultNewClient creates the default caching client +// DefaultNewClient creates the default caching client. func DefaultNewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) { c, err := client.New(config, options) if err != nil { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go index fce75d7bfb..f23b02df00 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go @@ -28,7 +28,7 @@ import ( ) // ControllerManagerConfiguration defines the functions necessary to parse a config file -// and to configure the Options struct for the ctrl.Manager +// and to configure the Options struct for the ctrl.Manager. type ControllerManagerConfiguration interface { runtime.Object @@ -37,7 +37,7 @@ type ControllerManagerConfiguration interface { } // DeferredFileLoader is used to configure the decoder for loading controller -// runtime component config types +// runtime component config types. type DeferredFileLoader struct { ControllerManagerConfiguration path string @@ -62,7 +62,7 @@ func File() *DeferredFileLoader { } } -// Complete will use sync.Once to set the scheme +// Complete will use sync.Once to set the scheme. func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfigurationSpec, error) { d.once.Do(d.loadFile) if d.err != nil { @@ -71,25 +71,25 @@ func (d *DeferredFileLoader) Complete() (v1alpha1.ControllerManagerConfiguration return d.ControllerManagerConfiguration.Complete() } -// AtPath will set the path to load the file for the decoder +// AtPath will set the path to load the file for the decoder. func (d *DeferredFileLoader) AtPath(path string) *DeferredFileLoader { d.path = path return d } -// OfKind will set the type to be used for decoding the file into +// OfKind will set the type to be used for decoding the file into. func (d *DeferredFileLoader) OfKind(obj ControllerManagerConfiguration) *DeferredFileLoader { d.ControllerManagerConfiguration = obj return d } -// InjectScheme will configure the scheme to be used for decoding the file +// InjectScheme will configure the scheme to be used for decoding the file. func (d *DeferredFileLoader) InjectScheme(scheme *runtime.Scheme) error { d.scheme = scheme return nil } -// loadFile is used from the mutex.Once to load the file +// loadFile is used from the mutex.Once to load the file. func (d *DeferredFileLoader) loadFile() { if d.scheme == nil { d.err = fmt.Errorf("scheme not supplied to controller configuration loader") @@ -109,6 +109,4 @@ func (d *DeferredFileLoader) loadFile() { if err = runtime.DecodeInto(codecs.UniversalDecoder(), content, d.ControllerManagerConfiguration); err != nil { d.err = fmt.Errorf("could not decode file into runtime.Object") } - - return } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go index 72baa27f19..9efdbc0668 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go @@ -22,10 +22,10 @@ import ( ) var ( - // GroupVersion is group version used to register these objects + // GroupVersion is group version used to register these objects. GroupVersion = schema.GroupVersion{Group: "controller-runtime.sigs.k8s.io", Version: "v1alpha1"} - // SchemeBuilder is used to add go types to the GroupVersionKind scheme + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} // AddToScheme adds the types in this group-version to the given scheme. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go index e13f1c0090..e67b62e514 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go @@ -24,7 +24,7 @@ import ( configv1alpha1 "k8s.io/component-base/config/v1alpha1" ) -// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration +// ControllerManagerConfigurationSpec defines the desired state of GenericControllerManagerConfiguration. type ControllerManagerConfigurationSpec struct { // SyncPeriod determines the minimum frequency at which watched resources are // reconciled. A lower period will correct entropy more quickly, but reduce @@ -96,7 +96,7 @@ type ControllerConfigurationSpec struct { CacheSyncTimeout *time.Duration `json:"cacheSyncTimeout,omitempty"` } -// ControllerMetrics defines the metrics configs +// ControllerMetrics defines the metrics configs. type ControllerMetrics struct { // BindAddress is the TCP address that the controller should bind to // for serving prometheus metrics. @@ -105,7 +105,7 @@ type ControllerMetrics struct { BindAddress string `json:"bindAddress,omitempty"` } -// ControllerHealth defines the health configs +// ControllerHealth defines the health configs. type ControllerHealth struct { // HealthProbeBindAddress is the TCP address that the controller should bind to // for serving health probes @@ -121,7 +121,7 @@ type ControllerHealth struct { LivenessEndpointName string `json:"livenessEndpointName,omitempty"` } -// ControllerWebhook defines the webhook server for the controller +// ControllerWebhook defines the webhook server for the controller. type ControllerWebhook struct { // Port is the port that the webhook server serves at. // It is used to set webhook.Server.Port. @@ -143,7 +143,7 @@ type ControllerWebhook struct { // +kubebuilder:object:root=true -// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API +// ControllerManagerConfiguration is the Schema for the GenericControllerManagerConfigurations API. type ControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` @@ -151,7 +151,7 @@ type ControllerManagerConfiguration struct { ControllerManagerConfigurationSpec `json:",inline"` } -// Complete returns the configuration for controller-runtime +// Complete returns the configuration for controller-runtime. func (c *ControllerManagerConfigurationSpec) Complete() (ControllerManagerConfigurationSpec, error) { return *c, nil } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go index 85d8d6d54c..c9e07562a3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -// Options are the arguments for creating a new Controller +// Options are the arguments for creating a new Controller. type Options struct { // MaxConcurrentReconciles is the maximum number of concurrent Reconciles which can be run. Defaults to 1. MaxConcurrentReconciles int diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go index fb8987cfe9..e6d3a4eaab 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go @@ -35,7 +35,7 @@ var _ EventHandler = &EnqueueRequestForObject{} // Controllers that have associated Resources (e.g. CRDs) to reconcile the associated Resource. type EnqueueRequestForObject struct{} -// Create implements EventHandler +// Create implements EventHandler. func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "CreateEvent received with no metadata", "event", evt) @@ -47,24 +47,25 @@ func (e *EnqueueRequestForObject) Create(evt event.CreateEvent, q workqueue.Rate }}) } -// Update implements EventHandler +// Update implements EventHandler. func (e *EnqueueRequestForObject) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { - if evt.ObjectNew != nil { + switch { + case evt.ObjectNew != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ Name: evt.ObjectNew.GetName(), Namespace: evt.ObjectNew.GetNamespace(), }}) - } else if evt.ObjectOld != nil { + case evt.ObjectOld != nil: q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ Name: evt.ObjectOld.GetName(), Namespace: evt.ObjectOld.GetNamespace(), }}) - } else { + default: enqueueLog.Error(nil, "UpdateEvent received with no metadata", "event", evt) } } -// Delete implements EventHandler +// Delete implements EventHandler. func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "DeleteEvent received with no metadata", "event", evt) @@ -76,7 +77,7 @@ func (e *EnqueueRequestForObject) Delete(evt event.DeleteEvent, q workqueue.Rate }}) } -// Generic implements EventHandler +// Generic implements EventHandler. func (e *EnqueueRequestForObject) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { if evt.Object == nil { enqueueLog.Error(nil, "GenericEvent received with no metadata", "event", evt) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go index 585c21e718..17401b1fdb 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go @@ -51,26 +51,26 @@ type enqueueRequestsFromMapFunc struct { toRequests MapFunc } -// Create implements EventHandler +// Create implements EventHandler. func (e *enqueueRequestsFromMapFunc) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.Object, reqs) } -// Update implements EventHandler +// Update implements EventHandler. func (e *enqueueRequestsFromMapFunc) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.ObjectOld, reqs) e.mapAndEnqueue(q, evt.ObjectNew, reqs) } -// Delete implements EventHandler +// Delete implements EventHandler. func (e *enqueueRequestsFromMapFunc) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.Object, reqs) } -// Generic implements EventHandler +// Generic implements EventHandler. func (e *enqueueRequestsFromMapFunc) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.mapAndEnqueue(q, evt.Object, reqs) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go index 8aa4ec52b2..63699893fc 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go @@ -57,7 +57,7 @@ type EnqueueRequestForOwner struct { mapper meta.RESTMapper } -// Create implements EventHandler +// Create implements EventHandler. func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) @@ -66,7 +66,7 @@ func (e *EnqueueRequestForOwner) Create(evt event.CreateEvent, q workqueue.RateL } } -// Update implements EventHandler +// Update implements EventHandler. func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.ObjectOld, reqs) @@ -76,7 +76,7 @@ func (e *EnqueueRequestForOwner) Update(evt event.UpdateEvent, q workqueue.RateL } } -// Delete implements EventHandler +// Delete implements EventHandler. func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) @@ -85,7 +85,7 @@ func (e *EnqueueRequestForOwner) Delete(evt event.DeleteEvent, q workqueue.RateL } } -// Generic implements EventHandler +// Generic implements EventHandler. func (e *EnqueueRequestForOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { reqs := map[reconcile.Request]empty{} e.getOwnerReconcileRequest(evt.Object, reqs) @@ -105,10 +105,9 @@ func (e *EnqueueRequestForOwner) parseOwnerTypeGroupKind(scheme *runtime.Scheme) } // Expect only 1 kind. If there is more than one kind this is probably an edge case such as ListOptions. if len(kinds) != 1 { - err := fmt.Errorf("Expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) - log.Error(nil, "Expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) + err := fmt.Errorf("expected exactly 1 kind for OwnerType %T, but found %s kinds", e.OwnerType, kinds) + log.Error(nil, "expected exactly 1 kind for OwnerType", "owner type", fmt.Sprintf("%T", e.OwnerType), "kinds", kinds) return err - } // Cache the Group and Kind for the OwnerType e.groupKind = schema.GroupKind{Group: kinds[0].Group, Kind: kinds[0].Kind} @@ -156,7 +155,7 @@ func (e *EnqueueRequestForOwner) getOwnerReconcileRequest(object metav1.Object, // getOwnersReferences returns the OwnerReferences for an object as specified by the EnqueueRequestForOwner // - if IsController is true: only take the Controller OwnerReference (if found) -// - if IsController is false: take all OwnerReferences +// - if IsController is false: take all OwnerReferences. func (e *EnqueueRequestForOwner) getOwnersReferences(object metav1.Object) []metav1.OwnerReference { if object == nil { return nil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go index c9b93f8b97..8652d22d72 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go @@ -75,28 +75,28 @@ type Funcs struct { GenericFunc func(event.GenericEvent, workqueue.RateLimitingInterface) } -// Create implements EventHandler +// Create implements EventHandler. func (h Funcs) Create(e event.CreateEvent, q workqueue.RateLimitingInterface) { if h.CreateFunc != nil { h.CreateFunc(e, q) } } -// Delete implements EventHandler +// Delete implements EventHandler. func (h Funcs) Delete(e event.DeleteEvent, q workqueue.RateLimitingInterface) { if h.DeleteFunc != nil { h.DeleteFunc(e, q) } } -// Update implements EventHandler +// Update implements EventHandler. func (h Funcs) Update(e event.UpdateEvent, q workqueue.RateLimitingInterface) { if h.UpdateFunc != nil { h.UpdateFunc(e, q) } } -// Generic implements EventHandler +// Generic implements EventHandler. func (h Funcs) Generic(e event.GenericEvent, q workqueue.RateLimitingInterface) { if h.GenericFunc != nil { h.GenericFunc(e, q) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go index e7f4b1c279..bd1cc151af 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/healthz/healthz.go @@ -35,7 +35,7 @@ type Handler struct { Checks map[string]Checker } -// checkStatus holds the output of a particular check +// checkStatus holds the output of a particular check. type checkStatus struct { name string healthy bool @@ -173,8 +173,7 @@ type CheckHandler struct { } func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { - err := h.Checker(req) - if err != nil { + if err := h.Checker(req); err != nil { http.Error(resp, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError) } else { fmt.Fprint(resp, "ok") @@ -184,10 +183,10 @@ func (h CheckHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { // Checker knows how to perform a health check. type Checker func(req *http.Request) error -// Ping returns true automatically when checked +// Ping returns true automatically when checked. var Ping Checker = func(_ *http.Request) error { return nil } -// getExcludedChecks extracts the health check names to be excluded from the query param +// getExcludedChecks extracts the health check names to be excluded from the query param. func getExcludedChecks(r *http.Request) sets.String { checks, found := r.URL.Query()["exclude"] if found { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go index f5024502d9..224d300b89 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go @@ -37,7 +37,7 @@ import ( var _ inject.Injector = &Controller{} -// Controller implements controller.Controller +// Controller implements controller.Controller. type Controller struct { // Name is used to uniquely identify a Controller in tracing, logging and monitoring. Name is required. Name string @@ -94,14 +94,14 @@ type watchDescription struct { predicates []predicate.Predicate } -// Reconcile implements reconcile.Reconciler +// Reconcile implements reconcile.Reconciler. func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { log := c.Log.WithValues("name", req.Name, "namespace", req.Namespace) ctx = logf.IntoContext(ctx, log) return c.Do.Reconcile(ctx, req) } -// Watch implements controller.Controller +// Watch implements controller.Controller. func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prct ...predicate.Predicate) error { c.mu.Lock() defer c.mu.Unlock() @@ -131,7 +131,7 @@ func (c *Controller) Watch(src source.Source, evthdler handler.EventHandler, prc return src.Start(c.ctx, evthdler, c.Queue, prct...) } -// Start implements controller.Controller +// Start implements controller.Controller. func (c *Controller) Start(ctx context.Context) error { // use an IIFE to get proper lock handling // but lock outside to get proper handling of the queue shutdown @@ -295,13 +295,14 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { // RunInformersAndControllers the syncHandler, passing it the Namespace/Name string of the // resource to be synced. - if result, err := c.Do.Reconcile(ctx, req); err != nil { + result, err := c.Do.Reconcile(ctx, req) + switch { + case err != nil: c.Queue.AddRateLimited(req) ctrlmetrics.ReconcileErrors.WithLabelValues(c.Name).Inc() ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelError).Inc() log.Error(err, "Reconciler error") - return - } else if result.RequeueAfter > 0 { + case result.RequeueAfter > 0: // The result.RequeueAfter request will be lost, if it is returned // along with a non-nil error. But this is intended as // We need to drive to stable reconcile loops before queuing due @@ -309,18 +310,15 @@ func (c *Controller) reconcileHandler(ctx context.Context, obj interface{}) { c.Queue.Forget(obj) c.Queue.AddAfter(req, result.RequeueAfter) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeueAfter).Inc() - return - } else if result.Requeue { + case result.Requeue: c.Queue.AddRateLimited(req) ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelRequeue).Inc() - return + default: + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + c.Queue.Forget(obj) + ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() } - - // Finally, if no error occurs we Forget this item so it does not - // get queued again until another change happens. - c.Queue.Forget(obj) - - ctrlmetrics.ReconcileTotal.WithLabelValues(c.Name, labelSuccess).Inc() } // GetLogger returns this controller's logger. @@ -328,13 +326,13 @@ func (c *Controller) GetLogger() logr.Logger { return c.Log } -// InjectFunc implement SetFields.Injector +// InjectFunc implement SetFields.Injector. func (c *Controller) InjectFunc(f inject.Func) error { c.SetFields = f return nil } -// updateMetrics updates prometheus metrics within the controller +// updateMetrics updates prometheus metrics within the controller. func (c *Controller) updateMetrics(reconcileTime time.Duration) { ctrlmetrics.ReconcileTime.WithLabelValues(c.Name).Observe(reconcileTime.Seconds()) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go index 126ded6609..baec669277 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go @@ -18,6 +18,7 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" "sigs.k8s.io/controller-runtime/pkg/metrics" ) @@ -25,21 +26,21 @@ var ( // ReconcileTotal is a prometheus counter metrics which holds the total // number of reconciliations per controller. It has two labels. controller label refers // to the controller name and result label refers to the reconcile result i.e - // success, error, requeue, requeue_after + // success, error, requeue, requeue_after. ReconcileTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "controller_runtime_reconcile_total", Help: "Total number of reconciliations per controller", }, []string{"controller", "result"}) // ReconcileErrors is a prometheus counter metrics which holds the total - // number of errors from the Reconciler + // number of errors from the Reconciler. ReconcileErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "controller_runtime_reconcile_errors_total", Help: "Total number of reconciliation errors per controller", }, []string{"controller"}) // ReconcileTime is a prometheus metric which keeps track of the duration - // of reconciliations + // of reconciliations. ReconcileTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "controller_runtime_reconcile_time_seconds", Help: "Length of time per reconciliation per controller", @@ -48,14 +49,14 @@ var ( }, []string{"controller"}) // WorkerCount is a prometheus metric which holds the number of - // concurrent reconciles per controller + // concurrent reconciles per controller. WorkerCount = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "controller_runtime_max_concurrent_reconciles", Help: "Maximum number of concurrent reconciles per controller", }, []string{"controller"}) // ActiveWorkers is a prometheus metric which holds the number - // of active workers per controller + // of active workers per controller. ActiveWorkers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "controller_runtime_active_workers", Help: "Number of currently used workers per controller", @@ -70,8 +71,8 @@ func init() { WorkerCount, ActiveWorkers, // expose process metrics like CPU, Memory, file descriptor usage etc. - prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), // expose Go runtime metrics like GC stats, memory stats etc. - prometheus.NewGoCollector(), + collectors.NewGoCollector(), ) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go index 5264da3cc1..7057f3dbe4 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" - "k8s.io/apimachinery/pkg/api/meta" apimeta "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -28,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) -// FilterWithLabels returns a copy of the items in objs matching labelSel +// FilterWithLabels returns a copy of the items in objs matching labelSel. func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { outItems := make([]runtime.Object, 0, len(objs)) for _, obj := range objs { @@ -69,10 +68,10 @@ func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, scope := restmapping.Scope.Name() if scope == "" { - return false, errors.New("Scope cannot be identified. Empty scope returned") + return false, errors.New("scope cannot be identified, empty scope returned") } - if scope != meta.RESTScopeNameRoot { + if scope != apimeta.RESTScopeNameRoot { return true, nil } return false, nil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go index 0173f6e2f4..55fd228690 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go @@ -31,7 +31,7 @@ import ( const inClusterNamespacePath = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" -// Options provides the required configuration to create a new resource lock +// Options provides the required configuration to create a new resource lock. type Options struct { // LeaderElection determines whether or not to use leader election when // starting the manager. @@ -104,8 +104,7 @@ func NewResourceLock(config *rest.Config, recorderProvider recorder.Provider, op func getInClusterNamespace() (string, error) { // Check whether the namespace file exists. // If not, we are not running in cluster so can't guess the namespace. - _, err := os.Stat(inClusterNamespacePath) - if os.IsNotExist(err) { + if _, err := os.Stat(inClusterNamespacePath); os.IsNotExist(err) { return "", fmt.Errorf("not running in-cluster, please specify LeaderElectionNamespace") } else if err != nil { return "", fmt.Errorf("error checking namespace file: %w", err) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go index ed18ae6d11..bbd9c9c756 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -47,7 +47,7 @@ func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromis return res } -// WithValues provides a new Logger with the tags appended +// WithValues provides a new Logger with the tags appended. func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise { res := &loggerPromise{ logger: l, @@ -74,7 +74,7 @@ func (p *loggerPromise) V(l *DelegatingLogger, level int) *loggerPromise { return res } -// Fulfill instantiates the Logger with the provided logger +// Fulfill instantiates the Logger with the provided logger. func (p *loggerPromise) Fulfill(parentLogger logr.Logger) { var logger = parentLogger if p.name != nil { @@ -163,7 +163,7 @@ func (l *DelegatingLogger) V(level int) logr.Logger { return res } -// WithName provides a new Logger with the name appended +// WithName provides a new Logger with the name appended. func (l *DelegatingLogger) WithName(name string) logr.Logger { l.lock.RLock() defer l.lock.RUnlock() @@ -179,7 +179,7 @@ func (l *DelegatingLogger) WithName(name string) logr.Logger { return res } -// WithValues provides a new Logger with the tags appended +// WithValues provides a new Logger with the tags appended. func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger { l.lock.RLock() defer l.lock.RUnlock() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go index 4c56f3427b..09a5a02eb6 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go @@ -29,32 +29,32 @@ type NullLogger struct{} var _ logr.Logger = NullLogger{} -// Info implements logr.InfoLogger +// Info implements logr.InfoLogger. func (NullLogger) Info(_ string, _ ...interface{}) { // Do nothing. } -// Enabled implements logr.InfoLogger +// Enabled implements logr.InfoLogger. func (NullLogger) Enabled() bool { return false } -// Error implements logr.Logger +// Error implements logr.Logger. func (NullLogger) Error(_ error, _ string, _ ...interface{}) { // Do nothing. } -// V implements logr.Logger -func (log NullLogger) V(_ int) logr.InfoLogger { +// V implements logr.Logger. +func (log NullLogger) V(_ int) logr.Logger { return log } -// WithName implements logr.Logger +// WithName implements logr.Logger. func (log NullLogger) WithName(_ string) logr.Logger { return log } -// WithValues implements logr.Logger +// WithValues implements logr.Logger. func (log NullLogger) WithValues(_ ...interface{}) logr.Logger { return log } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go index d4ea12cebf..3012fdd411 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -23,7 +23,7 @@ import ( ) // KubeAPIWarningLoggerOptions controls the behavior -// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger() +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). type KubeAPIWarningLoggerOptions struct { // Deduplicate indicates a given warning message should only be written once. // Setting this to true in a long-running process handling many warnings can diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go index c16a5bb5f3..5f85e10c90 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go @@ -29,7 +29,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/rest" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" @@ -251,8 +251,7 @@ func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Ha cm.mu.Lock() defer cm.mu.Unlock() - _, found := cm.metricsExtraHandlers[path] - if found { + if _, found := cm.metricsExtraHandlers[path]; found { return fmt.Errorf("can't register extra handler by duplicate path %q on metrics http server", path) } @@ -261,7 +260,7 @@ func (cm *controllerManager) AddMetricsExtraHandler(path string, handler http.Ha return nil } -// AddHealthzCheck allows you to add Healthz checker +// AddHealthzCheck allows you to add Healthz checker. func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) error { cm.mu.Lock() defer cm.mu.Unlock() @@ -282,7 +281,7 @@ func (cm *controllerManager) AddHealthzCheck(name string, check healthz.Checker) return nil } -// AddReadyzCheck allows you to add Readyz checker +// AddReadyzCheck allows you to add Readyz checker. func (cm *controllerManager) AddReadyzCheck(name string, check healthz.Checker) error { cm.mu.Lock() defer cm.mu.Unlock() @@ -451,7 +450,7 @@ func (cm *controllerManager) Start(ctx context.Context) (err error) { // Utilerrors.Aggregate allows to use errors.Is for all contained errors // whereas fmt.Errorf allows wrapping at most one error which means the // other one can not be found anymore. - err = utilerrors.NewAggregate([]error{err, stopErr}) + err = kerrors.NewAggregate([]error{err, stopErr}) } else { err = stopErr } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go index 843919427d..903e3e47f9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go @@ -95,7 +95,7 @@ type Manager interface { GetControllerOptions() v1alpha1.ControllerConfigurationSpec } -// Options are the arguments for creating a new Manager +// Options are the arguments for creating a new Manager. type Options struct { // Scheme is the scheme used to resolve runtime.Objects to GroupVersionKinds / Resources // Defaults to the kubernetes/client-go scheme.Scheme, but it's almost always better @@ -292,7 +292,7 @@ type Runnable interface { // until it's done running. type RunnableFunc func(context.Context) error -// Start implements Runnable +// Start implements Runnable. func (r RunnableFunc) Start(ctx context.Context) error { return r(ctx) } @@ -319,7 +319,7 @@ func New(config *rest.Config, options Options) (Manager, error) { clusterOptions.NewClient = options.NewClient clusterOptions.ClientDisableCacheFor = options.ClientDisableCacheFor clusterOptions.DryRunClient = options.DryRunClient - clusterOptions.EventBroadcaster = options.EventBroadcaster + clusterOptions.EventBroadcaster = options.EventBroadcaster //nolint:staticcheck }) if err != nil { return nil, err @@ -393,7 +393,7 @@ func New(config *rest.Config, options Options) (Manager, error) { // AndFrom will use a supplied type and convert to Options // any options already set on Options will be ignored, this is used to allow -// cli flags to override anything specified in the config file +// cli flags to override anything specified in the config file. func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, error) { if inj, wantsScheme := loader.(inject.Scheme); wantsScheme { err := inj.InjectScheme(o.Scheme) @@ -458,7 +458,7 @@ func (o Options) AndFrom(loader config.ControllerManagerConfiguration) (Options, return o, nil } -// AndFromOrDie will use options.AndFrom() and will panic if there are errors +// AndFromOrDie will use options.AndFrom() and will panic if there are errors. func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Options { o, err := o.AndFrom(loader) if err != nil { @@ -468,7 +468,7 @@ func (o Options) AndFromOrDie(loader config.ControllerManagerConfiguration) Opti } func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigurationSpec) Options { - if o.LeaderElection == false && obj.LeaderElection.LeaderElect != nil { + if !o.LeaderElection && obj.LeaderElection.LeaderElect != nil { o.LeaderElection = *obj.LeaderElection.LeaderElect } @@ -499,7 +499,7 @@ func (o Options) setLeaderElectionConfig(obj v1alpha1.ControllerManagerConfigura return o } -// defaultHealthProbeListener creates the default health probes listener bound to the given address +// defaultHealthProbeListener creates the default health probes listener bound to the given address. func defaultHealthProbeListener(addr string) (net.Listener, error) { if addr == "" || addr == "0" { return nil, nil @@ -512,9 +512,8 @@ func defaultHealthProbeListener(addr string) (net.Listener, error) { return ln, nil } -// setOptionsDefaults set default values for Options fields +// setOptionsDefaults set default values for Options fields. func setOptionsDefaults(options Options) Options { - // Allow newResourceLock to be mocked if options.newResourceLock == nil { options.newResourceLock = leaderelection.NewResourceLock diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go index 3df9b0b0b0..90754269dd 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go @@ -51,7 +51,7 @@ const ( ) var ( - // client metrics + // client metrics. requestLatency = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Subsystem: RestClientSubsystem, Name: LatencyKey, @@ -65,7 +65,7 @@ var ( Help: "Number of HTTP requests, partitioned by status code, method, and host.", }, []string{"code", "method", "host"}) - // reflector metrics + // reflector metrics. // TODO(directxman12): update these to be histograms once the metrics overhaul KEP // PRs start landing. @@ -124,7 +124,7 @@ func init() { registerReflectorMetrics() } -// registerClientMetrics sets up the client latency metrics from client-go +// registerClientMetrics sets up the client latency metrics from client-go. func registerClientMetrics() { // register the metrics with our registry Registry.MustRegister(requestLatency) @@ -137,7 +137,7 @@ func registerClientMetrics() { }) } -// registerReflectorMetrics sets up reflector (reconcile) loop metrics +// registerReflectorMetrics sets up reflector (reconcile) loop metrics. func registerReflectorMetrics() { Registry.MustRegister(listsTotal) Registry.MustRegister(listsDuration) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go index 9ba3d600ea..ce17124d53 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go @@ -26,5 +26,5 @@ type RegistererGatherer interface { } // Registry is a prometheus registry for storing metrics within the -// controller-runtime +// controller-runtime. var Registry RegistererGatherer = prometheus.NewRegistry() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go index bab2ce346e..fc59d89ba3 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -65,7 +65,7 @@ type Funcs struct { GenericFunc func(event.GenericEvent) bool } -// Create implements Predicate +// Create implements Predicate. func (p Funcs) Create(e event.CreateEvent) bool { if p.CreateFunc != nil { return p.CreateFunc(e) @@ -73,7 +73,7 @@ func (p Funcs) Create(e event.CreateEvent) bool { return true } -// Delete implements Predicate +// Delete implements Predicate. func (p Funcs) Delete(e event.DeleteEvent) bool { if p.DeleteFunc != nil { return p.DeleteFunc(e) @@ -81,7 +81,7 @@ func (p Funcs) Delete(e event.DeleteEvent) bool { return true } -// Update implements Predicate +// Update implements Predicate. func (p Funcs) Update(e event.UpdateEvent) bool { if p.UpdateFunc != nil { return p.UpdateFunc(e) @@ -89,7 +89,7 @@ func (p Funcs) Update(e event.UpdateEvent) bool { return true } -// Generic implements Predicate +// Generic implements Predicate. func (p Funcs) Generic(e event.GenericEvent) bool { if p.GenericFunc != nil { return p.GenericFunc(e) @@ -117,12 +117,12 @@ func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { } } -// ResourceVersionChangedPredicate implements a default update predicate function on resource version change +// ResourceVersionChangedPredicate implements a default update predicate function on resource version change. type ResourceVersionChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for validating resource version change +// Update implements default UpdateEvent filter for validating resource version change. func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) @@ -156,7 +156,7 @@ type GenerationChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for validating generation change +// Update implements default UpdateEvent filter for validating generation change. func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) @@ -186,7 +186,7 @@ type AnnotationChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for validating annotation change +// Update implements default UpdateEvent filter for validating annotation change. func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) @@ -216,7 +216,7 @@ type LabelChangedPredicate struct { Funcs } -// Update implements default UpdateEvent filter for checking label change +// Update implements default UpdateEvent filter for checking label change. func (LabelChangedPredicate) Update(e event.UpdateEvent) bool { if e.ObjectOld == nil { log.Error(nil, "Update event has no old object to update", "event", e) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go index 8874c75727..c8c56ba817 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go @@ -29,7 +29,7 @@ import ( ) // Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Cache interface { InjectCache(cache cache.Cache) error } @@ -49,7 +49,7 @@ type APIReader interface { } // APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. -// Returns false if i does not implement APIReader +// Returns false if i does not implement APIReader. func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { if s, ok := i.(APIReader); ok { return true, s.InjectAPIReader(reader) @@ -58,7 +58,7 @@ func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { } // Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Config interface { InjectConfig(*rest.Config) error } @@ -73,7 +73,7 @@ func ConfigInto(config *rest.Config, i interface{}) (bool, error) { } // Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Client interface { InjectClient(client.Client) error } @@ -88,7 +88,7 @@ func ClientInto(client client.Client, i interface{}) (bool, error) { } // Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and -// Reconciles +// Reconciles. type Scheme interface { InjectScheme(scheme *runtime.Scheme) error } @@ -117,7 +117,7 @@ func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { return false, nil } -// Mapper is used to inject the rest mapper to components that may need it +// Mapper is used to inject the rest mapper to components that may need it. type Mapper interface { InjectMapper(meta.RESTMapper) error } @@ -134,7 +134,7 @@ func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { // Func injects dependencies into i. type Func func(i interface{}) error -// Injector is used by the ControllerManager to inject Func into Controllers +// Injector is used by the ControllerManager to inject Func into Controllers. type Injector interface { InjectFunc(f Func) error } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go index 33c4c41348..f0cfe212ed 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go @@ -33,14 +33,14 @@ var log = logf.RuntimeLog.WithName("source").WithName("EventHandler") var _ cache.ResourceEventHandler = EventHandler{} -// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface +// EventHandler adapts a handler.EventHandler interface to a cache.ResourceEventHandler interface. type EventHandler struct { EventHandler handler.EventHandler Queue workqueue.RateLimitingInterface Predicates []predicate.Predicate } -// OnAdd creates CreateEvent and calls Create on EventHandler +// OnAdd creates CreateEvent and calls Create on EventHandler. func (e EventHandler) OnAdd(obj interface{}) { c := event.CreateEvent{} @@ -63,7 +63,7 @@ func (e EventHandler) OnAdd(obj interface{}) { e.EventHandler.Create(c, e.Queue) } -// OnUpdate creates UpdateEvent and calls Update on EventHandler +// OnUpdate creates UpdateEvent and calls Update on EventHandler. func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { u := event.UpdateEvent{} @@ -94,7 +94,7 @@ func (e EventHandler) OnUpdate(oldObj, newObj interface{}) { e.EventHandler.Update(u, e.Queue) } -// OnDelete creates DeleteEvent and calls Delete on EventHandler +// OnDelete creates DeleteEvent and calls Delete on EventHandler. func (e EventHandler) OnDelete(obj interface{}) { d := event.DeleteEvent{} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go index adabbaf917..a63b37c443 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go @@ -66,7 +66,7 @@ type SyncingSource interface { // NewKindWithCache creates a Source without InjectCache, so that it is assured that the given cache is used // and not overwritten. It can be used to watch objects in a different cluster by passing the cache -// from that other cluster +// from that other cluster. func NewKindWithCache(object client.Object, cache cache.Cache) SyncingSource { return &kindWithCache{kind: Kind{Type: object, cache: cache}} } @@ -84,7 +84,7 @@ func (ks *kindWithCache) WaitForSync(ctx context.Context) error { return ks.kind.WaitForSync(ctx) } -// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) +// Kind is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). type Kind struct { // Type is the type of object to watch. e.g. &v1.Pod{} Type client.Object @@ -104,7 +104,6 @@ var _ SyncingSource = &Kind{} // to enqueue reconcile.Requests. func (ks *Kind) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { - // Type should have been specified by the user. if ks.Type == nil { return fmt.Errorf("must specify Kind.Type") @@ -146,7 +145,7 @@ func (ks *Kind) String() string { if ks.Type != nil && ks.Type.GetObjectKind() != nil { return fmt.Sprintf("kind source: %v", ks.Type.GetObjectKind().GroupVersionKind().String()) } - return fmt.Sprintf("kind source: unknown GVK") + return "kind source: unknown GVK" } // WaitForSync implements SyncingSource to allow controllers to wait with starting @@ -307,7 +306,7 @@ func (cs *Channel) syncLoop(ctx context.Context) { } } -// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create) +// Informer is used to provide a source of events originating inside the cluster from Watches (e.g. Pod Create). type Informer struct { // Informer is the controller-runtime Informer Informer cache.Informer @@ -319,7 +318,6 @@ var _ Source = &Informer{} // to enqueue reconcile.Requests. func (is *Informer) Start(ctx context.Context, handler handler.EventHandler, queue workqueue.RateLimitingInterface, prct ...predicate.Predicate) error { - // Informer should have been specified by the user. if is.Informer == nil { return fmt.Errorf("must specify Informer.Informer") @@ -335,10 +333,10 @@ func (is *Informer) String() string { var _ Source = Func(nil) -// Func is a function that implements Source +// Func is a function that implements Source. type Func func(context.Context, handler.EventHandler, workqueue.RateLimitingInterface, ...predicate.Predicate) error -// Start implements Source +// Start implements Source. func (f Func) Start(ctx context.Context, evt handler.EventHandler, queue workqueue.RateLimitingInterface, pr ...predicate.Predicate) error { return f(ctx, evt, queue, pr...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go index 9583b5e9ac..c7cb71b755 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go @@ -31,7 +31,7 @@ type Decoder struct { codecs serializer.CodecFactory } -// NewDecoder creates a Decoder given the runtime.Scheme +// NewDecoder creates a Decoder given the runtime.Scheme. func NewDecoder(scheme *runtime.Scheme) (*Decoder, error) { return &Decoder{codecs: serializer.NewCodecFactory(scheme)}, nil } @@ -64,11 +64,7 @@ func (d *Decoder) DecodeRaw(rawObj runtime.RawExtension, into runtime.Object) er } if unstructuredInto, isUnstructured := into.(*unstructured.Unstructured); isUnstructured { // unmarshal into unstructured's underlying object to avoid calling the decoder - if err := json.Unmarshal(rawObj.Raw, &unstructuredInto.Object); err != nil { - return err - } - - return nil + return json.Unmarshal(rawObj.Raw, &unstructuredInto.Object) } deserializer := d.codecs.UniversalDeserializer() diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go index 8b255894ba..0d9aa7a838 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// Defaulter defines functions for setting defaults on resources +// Defaulter defines functions for setting defaults on resources. type Defaulter interface { runtime.Object Default() @@ -58,8 +58,7 @@ func (h *mutatingHandler) Handle(ctx context.Context, req Request) Response { // Get the object in the request obj := h.defaulter.DeepCopyObject().(Defaulter) - err := h.decoder.Decode(req, obj) - if err != nil { + if err := h.decoder.Decode(req, obj); err != nil { return Errored(http.StatusBadRequest, err) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go index 052f803161..3fa8872ff2 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go @@ -68,8 +68,7 @@ func (wh *Webhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // verify the content type is accurate - contentType := r.Header.Get("Content-Type") - if contentType != "application/json" { + if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { err = fmt.Errorf("contentType=%s, expected application/json", contentType) wh.log.Error(err, "unable to process a request with an unknown content type", "content type", contentType) reviewResponse = Errored(http.StatusBadRequest, err) @@ -125,8 +124,7 @@ func (wh *Webhook) writeResponseTyped(w io.Writer, response Response, admRevGVK // writeAdmissionResponse writes ar to w. func (wh *Webhook) writeAdmissionResponse(w io.Writer, ar v1.AdmissionReview) { - err := json.NewEncoder(w).Encode(ar) - if err != nil { + if err := json.NewEncoder(w).Encode(ar); err != nil { wh.log.Error(err, "unable to encode the response") wh.writeResponse(w, Errored(http.StatusInternalServerError, err)) } else { diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go index 926d4a5bd1..4b27e75ede 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go @@ -22,11 +22,11 @@ import ( "net/http" v1 "k8s.io/api/admission/v1" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ) -// Validator defines functions for validating an operation +// Validator defines functions for validating an operation. type Validator interface { runtime.Object ValidateCreate() error @@ -70,7 +70,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateCreate() if err != nil { - var apiStatus errors.APIStatus + var apiStatus apierrors.APIStatus if goerrors.As(err, &apiStatus) { return validationResponseFromStatus(false, apiStatus.Status()) } @@ -92,7 +92,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateUpdate(oldObj) if err != nil { - var apiStatus errors.APIStatus + var apiStatus apierrors.APIStatus if goerrors.As(err, &apiStatus) { return validationResponseFromStatus(false, apiStatus.Status()) } @@ -110,7 +110,7 @@ func (h *validatingHandler) Handle(ctx context.Context, req Request) Response { err = obj.ValidateDelete() if err != nil { - var apiStatus errors.APIStatus + var apiStatus apierrors.APIStatus if goerrors.As(err, &apiStatus) { return validationResponseFromStatus(false, apiStatus.Status()) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go index d8c7721501..cf7dbcf68d 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go @@ -133,8 +133,8 @@ type Webhook struct { } // InjectLogger gets a handle to a logging instance, hopefully with more info about this particular webhook. -func (w *Webhook) InjectLogger(l logr.Logger) error { - w.log = l +func (wh *Webhook) InjectLogger(l logr.Logger) error { + wh.log = l return nil } @@ -142,10 +142,10 @@ func (w *Webhook) InjectLogger(l logr.Logger) error { // If the webhook is mutating type, it delegates the AdmissionRequest to each handler and merge the patches. // If the webhook is validating type, it delegates the AdmissionRequest to each handler and // deny the request if anyone denies. -func (w *Webhook) Handle(ctx context.Context, req Request) Response { - resp := w.Handler.Handle(ctx, req) +func (wh *Webhook) Handle(ctx context.Context, req Request) Response { + resp := wh.Handler.Handle(ctx, req) if err := resp.Complete(req); err != nil { - w.log.Error(err, "unable to encode response") + wh.log.Error(err, "unable to encode response") return Errored(http.StatusInternalServerError, errUnableToEncodeResponse) } @@ -153,19 +153,19 @@ func (w *Webhook) Handle(ctx context.Context, req Request) Response { } // InjectScheme injects a scheme into the webhook, in order to construct a Decoder. -func (w *Webhook) InjectScheme(s *runtime.Scheme) error { +func (wh *Webhook) InjectScheme(s *runtime.Scheme) error { // TODO(directxman12): we should have a better way to pass this down var err error - w.decoder, err = NewDecoder(s) + wh.decoder, err = NewDecoder(s) if err != nil { return err } // inject the decoder here too, just in case the order of calling this is not // scheme first, then inject func - if w.Handler != nil { - if _, err := InjectDecoderInto(w.GetDecoder(), w.Handler); err != nil { + if wh.Handler != nil { + if _, err := InjectDecoderInto(wh.GetDecoder(), wh.Handler); err != nil { return err } } @@ -175,12 +175,12 @@ func (w *Webhook) InjectScheme(s *runtime.Scheme) error { // GetDecoder returns a decoder to decode the objects embedded in admission requests. // It may be nil if we haven't received a scheme to use to determine object types yet. -func (w *Webhook) GetDecoder() *Decoder { - return w.decoder +func (wh *Webhook) GetDecoder() *Decoder { + return wh.decoder } // InjectFunc injects the field setter into the webhook. -func (w *Webhook) InjectFunc(f inject.Func) error { +func (wh *Webhook) InjectFunc(f inject.Func) error { // inject directly into the handlers. It would be more correct // to do this in a sync.Once in Handle (since we don't have some // other start/finalize-type method), but it's more efficient to @@ -200,14 +200,14 @@ func (w *Webhook) InjectFunc(f inject.Func) error { return err } - if _, err := InjectDecoderInto(w.GetDecoder(), target); err != nil { + if _, err := InjectDecoderInto(wh.GetDecoder(), target); err != nil { return err } return nil } - return setFields(w.Handler) + return setFields(wh.Handler) } // StandaloneOptions let you configure a StandaloneWebhook. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go index 276784efb2..1a831016af 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/alias.go @@ -23,10 +23,10 @@ import ( // define some aliases for common bits of the webhook functionality -// Defaulter defines functions for setting defaults on resources +// Defaulter defines functions for setting defaults on resources. type Defaulter = admission.Defaulter -// Validator defines functions for validating an operation +// Validator defines functions for validating an operation. type Validator = admission.Validator // AdmissionRequest defines the input for an admission handler. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go index cdd34c9660..d2338d0b77 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/server.go @@ -28,10 +28,12 @@ import ( "path/filepath" "strconv" "sync" + "time" "k8s.io/apimachinery/pkg/runtime" kscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/certwatcher" + "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/runtime/inject" "sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics" ) @@ -70,6 +72,10 @@ type Server struct { // Defaults to "", which means server does not verify client's certificate. ClientCAName string + // TLSVersion is the minimum version of TLS supported. Accepts + // "", "1.0", "1.1", "1.2" and "1.3" only ("" is equivalent to "1.0" for backwards compatibility) + TLSMinVersion string + // WebhookMux is the multiplexer that handles different webhooks. WebhookMux *http.ServeMux @@ -83,6 +89,10 @@ type Server struct { // defaultingOnce ensures that the default fields are only ever set once. defaultingOnce sync.Once + // started is set to true immediately before the server is started + // and thus can be used to check if the server has been started + started bool + // mu protects access to the webhook map & setFields for Start, Register, etc mu sync.Mutex } @@ -124,8 +134,7 @@ func (s *Server) Register(path string, hook http.Handler) { defer s.mu.Unlock() s.defaultingOnce.Do(s.setDefaults) - _, found := s.webhooks[path] - if found { + if _, found := s.webhooks[path]; found { panic(fmt.Errorf("can't register duplicate path: %v", path)) } // TODO(directxman12): call setfields if we've already started the server @@ -175,6 +184,26 @@ func (s *Server) StartStandalone(ctx context.Context, scheme *runtime.Scheme) er return s.Start(ctx) } +// tlsVersion converts from human-readable TLS version (for example "1.1") +// to the values accepted by tls.Config (for example 0x301). +func tlsVersion(version string) (uint16, error) { + switch version { + // default is previous behaviour + case "": + return tls.VersionTLS10, nil + case "1.0": + return tls.VersionTLS10, nil + case "1.1": + return tls.VersionTLS11, nil + case "1.2": + return tls.VersionTLS12, nil + case "1.3": + return tls.VersionTLS13, nil + default: + return 0, fmt.Errorf("invalid TLSMinVersion %v: expects 1.0, 1.1, 1.2, 1.3 or empty", version) + } +} + // Start runs the server. // It will install the webhook related resources depend on the server configuration. func (s *Server) Start(ctx context.Context) error { @@ -197,9 +226,15 @@ func (s *Server) Start(ctx context.Context) error { } }() - cfg := &tls.Config{ + tlsMinVersion, err := tlsVersion(s.TLSMinVersion) + if err != nil { + return err + } + + cfg := &tls.Config{ //nolint:gosec NextProtos: []string{"h2"}, GetCertificate: certWatcher.GetCertificate, + MinVersion: tlsMinVersion, } // load CA to verify client certificate @@ -219,7 +254,7 @@ func (s *Server) Start(ctx context.Context) error { cfg.ClientAuth = tls.RequireAndVerifyClientCert } - listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(int(s.Port))), cfg) + listener, err := tls.Listen("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), cfg) if err != nil { return err } @@ -243,6 +278,9 @@ func (s *Server) Start(ctx context.Context) error { close(idleConnsClosed) }() + s.mu.Lock() + s.started = true + s.mu.Unlock() if err := srv.Serve(listener); err != nil && err != http.ErrServerClosed { return err } @@ -251,6 +289,34 @@ func (s *Server) Start(ctx context.Context) error { return nil } +// StartedChecker returns an healthz.Checker which is healthy after the +// server has been started. +func (s *Server) StartedChecker() healthz.Checker { + config := &tls.Config{ + InsecureSkipVerify: true, // nolint:gosec // config is used to connect to our own webhook port. + } + return func(req *http.Request) error { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.started { + return fmt.Errorf("webhook server has not been started yet") + } + + d := &net.Dialer{Timeout: 10 * time.Second} + conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config) + if err != nil { + return fmt.Errorf("webhook server is not reachable: %v", err) + } + + if err := conn.Close(); err != nil { + return fmt.Errorf("webhook server is not reachable: closing connection: %v", err) + } + + return nil + } +} + // InjectFunc injects the field setter into the server. func (s *Server) InjectFunc(f inject.Func) error { s.setFields = f