diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fd9e8cf8..6ad5b320 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -56,11 +56,10 @@ jobs: go-version: 1.14.7 - name: Setup Minikube-Kubernetes - uses: manusa/actions-setup-minikube@v2.3.0 + uses: medyagh/setup-minikube@latest with: - minikube version: v1.16.0 - kubernetes version: v1.20.1 - github token: ${{ secrets.GITHUB_TOKEN }} + minikube-version: 1.24.0 + kubernetes-version: v1.22.3 - name: Build images locally run: make build && make container || exit 1; @@ -69,7 +68,7 @@ jobs: run: | kubectl cluster-info echo "KUBECONFIG=$HOME/.kube/config" >> $GITHUB_ENV - echo "VELERO_RELEASE=v1.6.0" >> $GITHUB_ENV + echo "VELERO_RELEASE=v1.13.2" >> $GITHUB_ENV echo "OPENEBS_RELEASE=master" >> $GITHUB_ENV - name: Installation diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index d1666ce1..962d4f4e 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -48,19 +48,35 @@ jobs: needs: ['lint'] steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 + + - name: Set up Go 1.20 + uses: actions/setup-go@v4 + with: + go-version: 1.20.14 + cache: false - - name: Set up Go 1.14 - uses: actions/setup-go@v2 + - name: Set up Helm + uses: azure/setup-helm@v3 with: - go-version: 1.14.7 + version: v3.10.2 + + - name: Setup zfs pools + run: | + sudo apt-get install zfsutils-linux -y + truncate -s 100G /tmp/disk.img + sudo zpool create zfspv-pool $(sudo losetup -f /tmp/disk.img --show) + sudo zpool status - name: Setup Minikube-Kubernetes - uses: manusa/actions-setup-minikube@v2.3.0 + uses: medyagh/setup-minikube@latest with: - minikube version: v1.16.0 - kubernetes version: v1.20.1 - github token: ${{ secrets.GITHUB_TOKEN }} + cache: false + minikube-version: 1.31.1 + driver: none + kubernetes-version: ${{ matrix.kubernetes }} + cni: calico + start-args: "--install-addons=false" - name: Build images locally run: make build && make container || exit 1; @@ -69,7 +85,7 @@ jobs: run: | kubectl cluster-info echo "KUBECONFIG=$HOME/.kube/config" >> $GITHUB_ENV - echo "VELERO_RELEASE=v1.6.0" >> $GITHUB_ENV + echo "VELERO_RELEASE=v1.13.2" >> $GITHUB_ENV echo "OPENEBS_RELEASE=master" >> $GITHUB_ENV - name: Installation diff --git a/go.mod b/go.mod index 8bcd5e9a..a7386a33 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,13 @@ module github.com/openebs/velero-plugin -go 1.13 +go 1.20 require ( - cloud.google.com/go v0.58.0 // indirect - cloud.google.com/go/storage v1.9.0 // indirect - github.com/Azure/azure-pipeline-go v0.2.2 // indirect - github.com/Azure/azure-storage-blob-go v0.8.0 // indirect github.com/aws/aws-sdk-go v1.35.24 github.com/ghodss/yaml v1.0.0 github.com/gofrs/uuid v3.2.0+incompatible - github.com/google/wire v0.4.0 // indirect - github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26 // indirect - github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/onsi/ginkgo v1.15.2 github.com/onsi/gomega v1.10.2 - github.com/openebs/api/v2 v2.3.0 github.com/openebs/maya v1.12.1-0.20210416090832-ad9c32f086d5 github.com/openebs/zfs-localpv v1.6.1-0.20210504173514-62b3a0b7fe5d github.com/pkg/errors v0.9.1 @@ -29,6 +21,74 @@ require ( k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible ) +require ( + cloud.google.com/go v0.58.0 // indirect + cloud.google.com/go/storage v1.9.0 // indirect + github.com/Azure/azure-pipeline-go v0.2.2 // indirect + github.com/Azure/azure-storage-blob-go v0.8.0 // indirect + github.com/BurntSushi/toml v0.3.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/go-logr/logr v0.2.0 // indirect + github.com/gogo/protobuf v1.3.1 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.4.3 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/wire v0.4.0 // indirect + github.com/googleapis/gax-go v2.0.2+incompatible // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/googleapis/gnostic v0.4.1 // indirect + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd // indirect + github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26 // indirect + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect + github.com/imdario/mergo v0.3.9 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/json-iterator/go v1.1.10 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/openebs/lib-csi v0.3.0 // indirect + github.com/prometheus/client_golang v1.7.1 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.10.0 // indirect + github.com/prometheus/procfs v0.2.0 // indirect + github.com/spf13/cobra v1.1.1 // indirect + go.opencensus.io v0.22.3 // indirect + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/mod v0.3.0 // indirect + golang.org/x/net v0.0.0-20201110031124-69a78807bb2b // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/sys v0.0.0-20210112080510-489259a85091 // indirect + golang.org/x/text v0.3.4 // indirect + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect + golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/appengine v1.6.6 // indirect + google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect + google.golang.org/grpc v1.29.1 // indirect + google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.3.0 // indirect + honnef.co/go/tools v0.0.1-2020.1.4 // indirect + k8s.io/klog v1.0.0 // indirect + k8s.io/klog/v2 v2.4.0 // indirect + k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.0.2 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect +) + replace ( k8s.io/api => k8s.io/api v0.20.2 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.2 diff --git a/go.sum b/go.sum index 10733f9d..f710317c 100644 --- a/go.sum +++ b/go.sum @@ -90,7 +90,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= @@ -172,7 +171,6 @@ github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -284,7 +282,6 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -440,7 +437,6 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.2.2/go.mod h1:7FHVg6mFpFQrjeUZrm+BaD50N5jnDKm50uVPTpyYOmU= github.com/google/wire v0.4.0 h1:kXcsA/rIGzJImVqPdhfnr6q0xsS9gU0515q1EPpJ9fE= @@ -495,7 +491,6 @@ github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -677,8 +672,6 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/openebs/api/v2 v2.3.0 h1:tkgysm2FnxkkEiC9RxxZ5rTbN4W6iA4qXspcmKRMzPk= -github.com/openebs/api/v2 v2.3.0/go.mod h1:nLCaNvVjgjkjeD2a+n1fMbv5HjoEYP4XB8OAbwmIXtY= github.com/openebs/lib-csi v0.3.0 h1:0H7B2lhTw9+qNK2H+a+z3bnYfe+9gG0eFl023SciZ3k= github.com/openebs/lib-csi v0.3.0/go.mod h1:uruyzJiTwRoytQPQXOf4spaezn1cjkiAXjvFGw6aY/8= github.com/openebs/maya v1.12.1-0.20210416090832-ad9c32f086d5 h1:bCf0XvhkN0XcXIciqu0BFfJUc7fPi6Iwp9oBEz1pbMU= @@ -746,7 +739,6 @@ github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -857,16 +849,12 @@ go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= gocloud.dev v0.15.0 h1:Tl8dkOHWVZiYBYPxG2ouhpfmluoQGt3mY323DaAHaC8= @@ -1147,7 +1135,6 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= @@ -1289,7 +1276,6 @@ honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw= k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= -k8s.io/apiextensions-apiserver v0.20.2 h1:rfrMWQ87lhd8EzQWRnbQ4gXrniL/yTRBgYH1x1+BLlo= k8s.io/apiextensions-apiserver v0.20.2/go.mod h1:F6TXp389Xntt+LUq3vw6HFOLttPa0V8821ogLGwb6Zs= k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg= k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= diff --git a/pkg/cstor/api_service.go b/pkg/cstor/api_service.go deleted file mode 100644 index fe5f90c2..00000000 --- a/pkg/cstor/api_service.go +++ /dev/null @@ -1,333 +0,0 @@ -/* -Copyright 2019 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cstor - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/http" - "strconv" - - v1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" - "github.com/pkg/errors" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// httpRestCall execute REST API over HTTP -func (p *Plugin) httpRestCall(url, reqtype string, data []byte) ([]byte, error) { - req, err := http.NewRequest(reqtype, url, bytes.NewBuffer(data)) - if err != nil { - return nil, err - } - req.Header.Add("Content-Type", "application/json") - - c := &http.Client{ - Timeout: p.restTimeout, - } - - resp, err := c.Do(req) - if err != nil { - return nil, errors.Errorf("Error when connecting to maya-apiserver : %s", err.Error()) - } - - defer func() { - if err = resp.Body.Close(); err != nil { - p.Log.Warnf("Failed to close response : %s", err.Error()) - } - }() - - respdata, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errors.Errorf( - "Unable to read response from maya-apiserver, err=%s data=%s", - err.Error(), string(respdata)) - } - - code := resp.StatusCode - if code != http.StatusOK { - return nil, errors.Errorf("Status error{%v}, response=%s", http.StatusText(code), string(respdata)) - } - return respdata, nil -} - -// getMapiAddr return maya API server's ip address -func (p *Plugin) getMapiAddr() (string, error) { - var openebsNs string - - // check if user has provided openebs namespace - if p.namespace != "" { - openebsNs = p.namespace - } else { - openebsNs = metav1.NamespaceAll - } - - svclist, err := p.K8sClient. - CoreV1(). - Services(openebsNs). - List( - context.TODO(), - metav1.ListOptions{ - LabelSelector: mayaAPIServiceLabel, - }, - ) - - if err != nil { - if k8serrors.IsNotFound(err) { - return "", nil - } - p.Log.Errorf("Error getting maya-apiservice : %v", err.Error()) - return "", err - } - - if len(svclist.Items) != 0 { - goto fetchip - } - - // There are no any services having MayaApiService Label - // Let's try to find by name only.. - svclist, err = p.K8sClient. - CoreV1(). - Services(openebsNs). - List( - context.TODO(), - metav1.ListOptions{ - FieldSelector: "metadata.name=" + mayaAPIServiceName, - }) - if err != nil { - if k8serrors.IsNotFound(err) { - return "", nil - } - p.Log.Errorf("Error getting IP Address for service{%s} : %v", mayaAPIServiceName, err.Error()) - return "", err - } - -fetchip: - for _, s := range svclist.Items { - if s.Spec.ClusterIP != "" { - // update the namespace - p.namespace = s.Namespace - return "http://" + s.Spec.ClusterIP + ":" + strconv.FormatInt(int64(s.Spec.Ports[0].Port), 10), nil - } - } - - return "", nil -} - -// getCVCAddr return CVC server's ip address -func (p *Plugin) getCVCAddr() (string, error) { - var openebsNs string - - // check if user has provided openebs namespace - if p.namespace != "" { - openebsNs = p.namespace - } else { - openebsNs = metav1.NamespaceAll - } - - svclist, err := p.K8sClient. - CoreV1(). - Services(openebsNs). - List( - context.TODO(), - metav1.ListOptions{ - LabelSelector: cvcAPIServiceLabel, - }, - ) - - if err != nil { - if k8serrors.IsNotFound(err) { - return "", nil - } - p.Log.Errorf("Error getting cvc service: %v", err.Error()) - return "", err - } - - if len(svclist.Items) == 0 { - return "", nil - } - - for _, s := range svclist.Items { - if s.Spec.ClusterIP != "" { - // update the namespace - p.namespace = s.Namespace - return "http://" + s.Spec.ClusterIP + ":" + strconv.FormatInt(int64(s.Spec.Ports[0].Port), 10), nil - } - } - - return "", nil -} - -func (p *Plugin) sendBackupRequest(vol *Volume) (*v1alpha1.CStorBackup, error) { - var url string - - scheduleName := p.getScheduleName(vol.backupName) // This will be backup/schedule name - - serverAddr := p.cstorServerAddr + ":" + strconv.Itoa(CstorBackupPort) - - bkpSpec := &v1alpha1.CStorBackupSpec{ - BackupName: scheduleName, - VolumeName: vol.volname, - SnapName: vol.backupName, - BackupDest: serverAddr, - LocalSnap: p.local, - } - - bkp := &v1alpha1.CStorBackup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: vol.namespace, - }, - Spec: *bkpSpec, - } - - if vol.isCSIVolume { - url = p.cvcAddr + backupEndpoint - } else { - url = p.mayaAddr + backupEndpoint - } - - bkpData, err := json.Marshal(bkp) - if err != nil { - return nil, errors.Wrapf(err, "Error parsing json") - } - - _, err = p.httpRestCall(url, "POST", bkpData) - if err != nil { - return nil, errors.Wrapf(err, "Error calling REST api") - } - - return bkp, nil -} - -func (p *Plugin) sendRestoreRequest(vol *Volume) (*v1alpha1.CStorRestore, error) { - var url string - - restoreSrc := p.cstorServerAddr + ":" + strconv.Itoa(CstorRestorePort) - - if p.local { - restoreSrc = vol.srcVolname - } - - restore := &v1alpha1.CStorRestore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: p.namespace, - }, - Spec: v1alpha1.CStorRestoreSpec{ - RestoreName: vol.backupName, - VolumeName: vol.volname, - RestoreSrc: restoreSrc, - StorageClass: vol.storageClass, - Size: vol.size, - Local: p.local, - }, - } - - if vol.isCSIVolume { - url = p.cvcAddr + restorePath - } else { - url = p.mayaAddr + restorePath - } - - restoreData, err := json.Marshal(restore) - if err != nil { - return nil, err - } - - data, err := p.httpRestCall(url, "POST", restoreData) - if err != nil { - return nil, errors.Wrapf(err, "Error executing REST api for restore") - } - - // if apiserver is having version <=1.8 then it will return empty response - ok, err := isEmptyRestResponse(data) - if !ok && err == nil { - err = p.updateVolCASInfo(data, vol.volname) - if err != nil { - err = errors.Wrapf(err, "Error parsing restore API response") - } - } - - return restore, err -} - -func isEmptyRestResponse(data []byte) (bool, error) { - var obj interface{} - - dec := json.NewDecoder(bytes.NewReader(data)) - err := dec.Decode(&obj) - if err != nil { - return false, err - } - - res, ok := obj.(string) - if ok && res == "" { - return true, nil - } - - return false, nil -} - -func (p *Plugin) sendDeleteRequest(backup, volume, namespace, schedule string, isCSIVolume bool) error { - var url string - - if isCSIVolume { - url = p.cvcAddr + backupEndpoint + backup - } else { - url = p.mayaAddr + backupEndpoint + backup - } - - req, err := http.NewRequest("DELETE", url, nil) - if err != nil { - return errors.Wrapf(err, "failed to create HTTP request") - } - - q := req.URL.Query() - q.Add("volume", volume) - q.Add("namespace", namespace) - q.Add("schedule", schedule) - - req.URL.RawQuery = q.Encode() - - c := &http.Client{ - Timeout: p.restTimeout, - } - - resp, err := c.Do(req) - if err != nil { - return errors.Wrapf(err, "failed to connect maya-apiserver") - } - - defer func() { - if err = resp.Body.Close(); err != nil { - p.Log.Warnf("Failed to close response err=%s", err) - } - }() - - respdata, err := ioutil.ReadAll(resp.Body) - if err != nil { - return errors.Wrapf(err, "failed to read response from maya-apiserver, response=%s", string(respdata)) - } - - code := resp.StatusCode - if code != http.StatusOK { - return errors.Errorf("HTTP Status error{%v} from maya-apiserver, response=%s", code, string(respdata)) - } - - return nil -} diff --git a/pkg/cstor/cstor.go b/pkg/cstor/cstor.go deleted file mode 100644 index e9dcd812..00000000 --- a/pkg/cstor/cstor.go +++ /dev/null @@ -1,660 +0,0 @@ -/* -Copyright 2019 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cstor - -import ( - "context" - "fmt" - "net" - "strings" - "time" - - cloud "github.com/openebs/velero-plugin/pkg/clouduploader" - "github.com/pkg/errors" - - /* Due to dependency conflict, please ensure openebs - * dependency manually instead of using dep - */ - openebsapis "github.com/openebs/api/v2/pkg/client/clientset/versioned" - "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" - openebs "github.com/openebs/maya/pkg/client/generated/clientset/versioned" - "github.com/openebs/velero-plugin/pkg/velero" - "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -const ( - mayaAPIServiceName = "maya-apiserver-service" - mayaAPIServiceLabel = "openebs.io/component-name=maya-apiserver-svc" - cvcAPIServiceLabel = "openebs.io/component-name=cvc-operator-svc" - backupEndpoint = "/latest/backups/" - restorePath = "/latest/restore/" - casTypeCStor = "cstor" - backupStatusInterval = 5 - restoreStatusInterval = 5 - openebsVolumeLabel = "openebs.io/cas-type" - openebsCSIName = "cstor.csi.openebs.io" - trueStr = "true" -) - -const ( - // NAMESPACE config key for OpenEBS namespace - NAMESPACE = "namespace" - - // LocalSnapshot config key for local snapshot - LocalSnapshot = "local" - - // RestoreAllIncrementalSnapshots config key for restoring all incremental snapshots - RestoreAllIncrementalSnapshots = "restoreAllIncrementalSnapshots" - - // AutoSetTargetIP config key for setting the targetip automatically after successful restore - AutoSetTargetIP = "autoSetTargetIP" - - // SnapshotIDIdentifier is a word to generate snapshotID from volume name and backup name - SnapshotIDIdentifier = "-velero-bkp-" - - // port to connect for restoring the data - CstorRestorePort = 9000 - - // port to connect for backup - CstorBackupPort = 9001 - - // RestTimeOut config key for REST API timeout value - RestTimeOut = "restApiTimeout" -) - -// Plugin defines snapshot plugin for CStor volume -type Plugin struct { - // Log is used for logging - Log logrus.FieldLogger - - // K8sClient is used for kubernetes CR operation - K8sClient *kubernetes.Clientset - - // OpenEBSClient is used for openEBS CR operation - OpenEBSClient *openebs.Clientset - - // OpenEBSAPIsClient clientset for OpenEBS CR operations - /* - Note: This client comes from openebs/api ( github repo ) - and this client has the latest cstor v1 APIs. - For compatibility this client has also some (not all) v1alpha1 APIs - that is present in above OpenEBSClient(this client comes - from openebs/maya github repo) - Finally, we will migrate to client based on openebs/api. - */ - OpenEBSAPIsClient openebsapis.Interface - - // config to store parameters from velero server - config map[string]string - - // namespace in which openebs is installed, default is openebs - namespace string - - // cl stores cloud connection information - cl *cloud.Conn - - // mayaAddr is maya API server address - mayaAddr string - - // cvcAddr is cvc API server address - cvcAddr string - - // cstorServerAddr is network address used for CStor volume operation - // on this address cloud server will perform data operation(backup/restore) - cstorServerAddr string - - // volumes list of volume - volumes map[string]*Volume - - // snapshots list of snapshot - snapshots map[string]*Snapshot - - // if only local snapshot enabled - local bool - - // if set then restore will restore from base snapshot to given snapshot, including incremental snapshots - restoreAllSnapshots bool - - // if set then targetip will be set after successful restore - autoSetTargetIP bool - - // restTimeout defines timeout for REST API calls - restTimeout time.Duration -} - -// Snapshot describes snapshot object information -type Snapshot struct { - // Volume name on which snapshot should be taken - volID string - - // backupName is name of a snapshot - backupName string - - // namespace is volume's namespace - namespace string - - // isCSIVolume is true for cStor based CSI volume - isCSIVolume bool -} - -// Volume describes volume object information -type Volume struct { - // volname is volume name - volname string - - // srcVolname is source volume name in case of local restore - srcVolname string - - // namespace is volume claim's namespace - namespace string - - // backupName is snapshot name for given volume - backupName string - - // backupStatus is backup progress status for given volume - backupStatus v1alpha1.CStorBackupStatus - - // restoreStatus is restore progress status for given volume - restoreStatus v1alpha1.CStorRestoreStatus - - // size is volume size in string - size resource.Quantity - - // snapshotTag is cloud snapshot file identifier.. It will be same as volume name from backup - snapshotTag string - - // storageClass is volume's storageclass - storageClass string - - iscsi v1.ISCSIPersistentVolumeSource - - // isCSIVolume is true for cStor based CSI volume - isCSIVolume bool -} - -func (p *Plugin) getServerAddress() string { - netInterfaceAddresses, err := net.InterfaceAddrs() - - if err != nil { - p.Log.Errorf("Failed to get interface Address for velero server : %s", err.Error()) - return "" - } - - for _, netInterfaceAddress := range netInterfaceAddresses { - networkIP, ok := netInterfaceAddress.(*net.IPNet) - if ok && !networkIP.IP.IsLoopback() && networkIP.IP.To4() != nil { - ip := networkIP.IP.String() - p.Log.Infof("Ip address of velero-plugin server: %s", ip) - return ip - } - } - return "" -} - -// Init CStor snapshot plugin -func (p *Plugin) Init(config map[string]string) error { - if ns, ok := config[NAMESPACE]; ok { - p.namespace = ns - } - - conf, err := rest.InClusterConfig() - if err != nil { - p.Log.Errorf("Failed to get cluster config : %s", err.Error()) - return errors.New("error fetching cluster config") - } - - clientset, err := kubernetes.NewForConfig(conf) - if err != nil { - p.Log.Errorf("Error creating clientset : %s", err.Error()) - return errors.New("error creating k8s client") - } - - p.K8sClient = clientset - - openEBSClient, err := openebs.NewForConfig(conf) - if err != nil { - p.Log.Errorf("Failed to create openEBS client. %s", err) - return err - } - p.OpenEBSClient = openEBSClient - - // Set client from openebs apis - err = p.SetOpenEBSAPIClient(conf) - if err != nil { - return err - } - - p.mayaAddr, err = p.getMapiAddr() - if err != nil { - return errors.Wrapf(err, "error fetching Maya-ApiServer rest client address") - } - - p.cvcAddr, err = p.getCVCAddr() - if err != nil { - return errors.Wrapf(err, "error fetching CVC rest client address") - } - - if p.mayaAddr == "" && p.cvcAddr == "" { - return errors.New("failed to get address for maya-apiserver/cvc-server service") - } - - p.cstorServerAddr = p.getServerAddress() - if p.cstorServerAddr == "" { - return errors.New("error fetching cstorVeleroServer address") - } - p.config = config - - if p.volumes == nil { - p.volumes = make(map[string]*Volume) - } - if p.snapshots == nil { - p.snapshots = make(map[string]*Snapshot) - } - - // check for user-provided timeout values - if timeoutStr, ok := config[RestTimeOut]; ok { - timeout, err := time.ParseDuration(timeoutStr) - if err != nil { - return errors.Wrapf(err, "failed to parse restApiTimeout") - } - p.restTimeout = timeout - } else { - p.restTimeout = 60 * time.Second - } - - p.Log.Infof("Setting restApiTimeout to %v", p.restTimeout) - - if local, ok := config[LocalSnapshot]; ok && isTrue(local) { - p.local = true - return nil - } - - if err := velero.InitializeClientSet(conf); err != nil { - return errors.Wrapf(err, "failed to initialize velero clientSet") - } - - if restoreAllSnapshots, ok := config[RestoreAllIncrementalSnapshots]; ok && isTrue(restoreAllSnapshots) { - p.restoreAllSnapshots = true - p.autoSetTargetIP = true - } - - if autoSetTargetIP, ok := config[AutoSetTargetIP]; ok { - p.autoSetTargetIP = isTrue(autoSetTargetIP) - } - - p.cl = &cloud.Conn{Log: p.Log} - return p.cl.Init(config) -} - -// SetOpenEBSAPIClient sets openebs client from openebs/apis -// Ref: https://github.com/openebs/api/tree/HEAD/pkg/apis -func (p *Plugin) SetOpenEBSAPIClient(c *rest.Config) error { - OpenEBSAPIClient, err := openebsapis.NewForConfig(c) - if err != nil { - p.Log.Errorf("Failed to create OpenEBS client from openebs apis. %s", err) - return err - } - p.OpenEBSAPIsClient = OpenEBSAPIClient - return nil -} - -// GetVolumeID return volume name for given PV -func (p *Plugin) GetVolumeID(unstructuredPV runtime.Unstructured) (string, error) { - var isCSIVolume bool - pv := new(v1.PersistentVolume) - - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.UnstructuredContent(), pv); err != nil { - return "", errors.WithStack(err) - } - - // If PV doesn't have sufficient info to consider as CStor Volume - // then we will return empty volumeId and error as nil. - if pv.Name == "" || - pv.Spec.StorageClassName == "" || - (pv.Spec.ClaimRef != nil && pv.Spec.ClaimRef.Namespace == "") { - return "", nil - } - - volType, ok := pv.Labels[openebsVolumeLabel] - if ok { - if volType != casTypeCStor { - return "", nil - } - } else { - // check if PV is created by CSI driver - if isCSIVolume = isCSIPv(*pv); !isCSIVolume { - return "", nil - } - } - - if pv.Status.Phase == v1.VolumeReleased || - pv.Status.Phase == v1.VolumeFailed { - return "", errors.New("pv is in released state") - } - - if _, exists := p.volumes[pv.Name]; !exists { - p.volumes[pv.Name] = &Volume{ - volname: pv.Name, - snapshotTag: pv.Name, - storageClass: pv.Spec.StorageClassName, - namespace: pv.Spec.ClaimRef.Namespace, - size: pv.Spec.Capacity[v1.ResourceStorage], - isCSIVolume: isCSIVolume, - } - } - - return pv.Name, nil -} - -// DeleteSnapshot delete CStor volume snapshot -func (p *Plugin) DeleteSnapshot(snapshotID string) error { - var snapInfo *Snapshot - var err error - - if snapshotID == "" { - p.Log.Warning("Empty snapshotID") - return nil - } - - p.Log.Infof("Deleting snapshot %v", snapshotID) - if _, exists := p.snapshots[snapshotID]; !exists { - snapInfo, err = p.getSnapInfo(snapshotID) - if err != nil { - return err - } - p.snapshots[snapshotID] = snapInfo - } else { - snapInfo = p.snapshots[snapshotID] - } - - scheduleName := p.getScheduleName(snapInfo.backupName) - - if snapInfo.volID == "" || snapInfo.backupName == "" || snapInfo.namespace == "" || scheduleName == "" { - return errors.Errorf("Got insufficient info vol:{%s} snap:{%s} ns:{%s} schedule:{%s}", - snapInfo.volID, - snapInfo.backupName, - snapInfo.namespace, - scheduleName) - } - - err = p.sendDeleteRequest(snapInfo.backupName, - snapInfo.volID, - snapInfo.namespace, - scheduleName, snapInfo.isCSIVolume) - if err != nil { - return errors.Wrapf(err, "failed to execute maya-apiserver DELETE API") - } - - if p.local { - // volumesnapshotlocation is configured for local snapshot - return nil - } - - filename := p.cl.GenerateRemoteFilename(snapInfo.volID, snapInfo.backupName) - if filename == "" { - return errors.Errorf("Error creating remote file name for backup") - } - - ret := p.cl.Delete(filename) - if !ret { - return errors.New("failed to remove snapshot") - } - - return nil -} - -// CreateSnapshot creates snapshot for CStor volume and upload it to cloud storage -func (p *Plugin) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) { - if !p.local { - p.cl.ExitServer = false - } - - bkpname, ok := tags["velero.io/backup"] - if !ok { - return "", errors.New("failed to get backup name") - } - - vol, ok := p.volumes[volumeID] - if !ok { - return "", errors.New("volume not found") - } - vol.backupName = bkpname - size, ok := vol.size.AsInt64() - if !ok { - return "", errors.Errorf("Failed to parse volume size %v", vol.size) - } - - if !p.local { - // If cloud snapshot is configured then we need to backup PVC also - err := p.backupPVC(volumeID) - if err != nil { - return "", errors.Wrapf(err, "failed to create backup for PVC") - } - } - - p.Log.Infof("creating snapshot{%s}", bkpname) - - bkp, err := p.sendBackupRequest(vol) - if err != nil { - return "", errors.Wrapf(err, "Failed to send backup request") - } - - p.Log.Infof("Snapshot Successfully Created") - - if p.local { - // local snapshot - return generateSnapshotID(volumeID, bkpname), nil - } - - filename := p.cl.GenerateRemoteFilename(vol.snapshotTag, vol.backupName) - if filename == "" { - return "", errors.Errorf("Error creating remote file name for backup") - } - - go p.checkBackupStatus(bkp, vol.isCSIVolume) - - ok = p.cl.Upload(filename, size, CstorBackupPort) - if !ok { - return "", errors.New("failed to upload snapshot") - } - - if vol.backupStatus == v1alpha1.BKPCStorStatusDone { - return generateSnapshotID(volumeID, bkpname), nil - } - - return "", errors.Errorf("Failed to upload snapshot, status:{%v}", vol.backupStatus) -} - -func (p *Plugin) getSnapInfo(snapshotID string) (*Snapshot, error) { - volumeID, bkpName, err := getInfoFromSnapshotID(snapshotID) - if err != nil { - return nil, err - } - - pv, err := p.K8sClient. - CoreV1(). - PersistentVolumes(). - Get(context.TODO(), volumeID, metav1.GetOptions{}) - if err != nil { - return nil, errors.Errorf("Error fetching volume{%s} : %s", volumeID, err.Error()) - } - - // TODO - if pv.Spec.ClaimRef.Namespace == "" { - return nil, errors.Errorf("No namespace in pv.spec.claimref for PV{%s}", snapshotID) - } - isCSIVolume := isCSIPv(*pv) - return &Snapshot{ - volID: volumeID, - backupName: bkpName, - namespace: pv.Spec.ClaimRef.Namespace, - isCSIVolume: isCSIVolume, - }, nil -} - -// CreateVolumeFromSnapshot create CStor volume for given -// snapshotID and perform restore operation on it -func (p *Plugin) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) { - var ( - newVol *Volume - err error - ) - - if volumeType != "cstor-snapshot" { - return "", errors.Errorf("Invalid volume type{%s}", volumeType) - } - - volumeID, snapName, err := getInfoFromSnapshotID(snapshotID) - if err != nil { - return "", err - } - - snapType := "remote" - if p.local { - snapType = "local" - } - - p.Log.Infof("Restoring %s snapshot{%s} for volume:%s", snapType, snapName, volumeID) - - if p.local { - newVol, err = p.getVolumeForLocalRestore(volumeID, snapName) - if err != nil { - return "", errors.Wrapf(err, "Failed to read PVC for volumeID=%s snap=%s", volumeID, snapName) - } - - err = p.restoreVolumeFromLocal(newVol) - } else { - newVol, err = p.getVolumeForRemoteRestore(volumeID, snapName) - if err != nil { - return "", errors.Wrapf(err, "Failed to read PVC for volumeID=%s snap=%s", volumeID, snapName) - } - - err = p.restoreVolumeFromCloud(newVol, snapName) - } - - if err != nil { - p.Log.Errorf("Failed to restore volume : %s", err) - return "", errors.Wrapf(err, "Failed to restore volume") - } - - if newVol.restoreStatus == v1alpha1.RSTCStorStatusDone { - if p.autoSetTargetIP { - if err := p.markCVRsAsRestoreCompleted(newVol); err != nil { - readmeUrl := "https://github.com/openebs/velero-plugin#setting-targetip-in-replica" - errMsg := fmt.Sprintf( - "Error setting targetip on CVR, need to set it manually. Refer: %s", - readmeUrl, - ) - return newVol.volname, errors.Wrapf(err, errMsg) - } - } - - p.Log.Infof("Restore completed for CStor volume:%s snapshot:%s", volumeID, snapName) - return newVol.volname, nil - } - - return "", errors.New("failed to restore snapshot") -} - -// GetVolumeInfo return volume information for given volume name -func (p *Plugin) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) { - return "cstor-snapshot", nil, nil -} - -// SetVolumeID set volumeID for given PV -func (p *Plugin) SetVolumeID(unstructuredPV runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { - pv := new(v1.PersistentVolume) - - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.UnstructuredContent(), pv); err != nil { - return nil, errors.WithStack(err) - } - - vol := p.volumes[volumeID] - - if p.local { - if !vol.isCSIVolume { - fsType := pv.Spec.PersistentVolumeSource.ISCSI.FSType - pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ - ISCSI: &vol.iscsi, - } - // Set Old PV fsType - pv.Spec.PersistentVolumeSource.ISCSI.FSType = fsType - } else { - pv.Spec.PersistentVolumeSource.CSI.VolumeHandle = vol.volname - } - } - pv.Name = vol.volname - - res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pv) - if err != nil { - return nil, errors.WithStack(err) - } - - return &unstructured.Unstructured{Object: res}, nil -} - -// getScheduleName return the schedule name for the given backup -// It will check if backup name have 'bkp-20060102150405' format -func (p *Plugin) getScheduleName(backupName string) string { - // for non-scheduled backup, we are considering backup name as schedule name only - scheduleOrBackupName := backupName - - // If it is scheduled backup then we need to get the schedule name - splitName := strings.Split(backupName, "-") - if len(splitName) >= 2 { - _, err := time.Parse("20060102150405", splitName[len(splitName)-1]) - if err != nil { - // last substring is not timestamp, so it is not generated from schedule - return scheduleOrBackupName - } - scheduleOrBackupName = strings.Join(splitName[0:len(splitName)-1], "-") - } - return scheduleOrBackupName -} - -// getInfoFromSnapshotID return backup name and volume id from the given snapshotID -func getInfoFromSnapshotID(snapshotID string) (volumeID, backupName string, err error) { - s := strings.Split(snapshotID, SnapshotIDIdentifier) - if len(s) != 2 { - err = errors.New("invalid snapshot id") - return - } - - volumeID = s[0] - backupName = s[1] - - if volumeID == "" || backupName == "" { - err = errors.Errorf("invalid volumeID=%s backupName=%s", volumeID, backupName) - } - return -} - -func generateSnapshotID(volumeID, backupName string) string { - return volumeID + SnapshotIDIdentifier + backupName -} - -func isTrue(str string) bool { - str = strings.ToLower(str) - return str == trueStr || str == "yes" || str == "1" -} diff --git a/pkg/cstor/cvr_operation.go b/pkg/cstor/cvr_operation.go deleted file mode 100644 index 0240bf10..00000000 --- a/pkg/cstor/cvr_operation.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2019 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cstor - -import ( - "context" - "fmt" - "strings" - "time" - - cstorv1 "github.com/openebs/api/v2/pkg/apis/cstor/v1" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - cVRPVLabel = "openebs.io/persistent-volume" - restoreCompletedAnnotation = "openebs.io/restore-completed" -) - -var validCvrStatuses = []string{ - string(cstorv1.CVRStatusOnline), - string(cstorv1.CVRStatusError), - string(cstorv1.CVRStatusDegraded), -} - -// CVRWaitCount control time limit for waitForAllCVR -var CVRWaitCount = 100 - -// CVRCheckInterval defines amount of delay for CVR check -var CVRCheckInterval = 5 * time.Second - -// waitForAllCVRs will ensure that all CVR related to -// the given volume is created -func (p *Plugin) waitForAllCVRs(vol *Volume) error { - return p.waitForAllCVRsToBeInValidStatus(vol, validCvrStatuses) -} - -func (p *Plugin) waitForAllCVRsToBeInValidStatus(vol *Volume, statuses []string) error { - replicaCount := p.getCVRCount(vol.volname, vol.isCSIVolume) - if replicaCount == -1 { - return errors.Errorf("Failed to fetch replicaCount for volume{%s}", vol.volname) - } - - if vol.isCSIVolume { - return p.waitForCSIBasedCVRs(vol, replicaCount, statuses) - } - return p.waitFoNonCSIBasedCVRs(vol, replicaCount, statuses) -} - -// waitFoNonCSIBasedCVRs will ensure that all CVRs related to -// given non CSI based volume is created -func (p *Plugin) waitFoNonCSIBasedCVRs(vol *Volume, replicaCount int, statuses []string) error { - for cnt := 0; cnt < CVRWaitCount; cnt++ { - cvrList, err := p.OpenEBSClient. - OpenebsV1alpha1(). - CStorVolumeReplicas(p.namespace). - List(context.TODO(), metav1.ListOptions{ - LabelSelector: cVRPVLabel + "=" + vol.volname, - }) - if err != nil { - return errors.Errorf("Failed to fetch CVR for volume=%s %s", vol.volname, err) - } - if len(cvrList.Items) != replicaCount { - time.Sleep(CVRCheckInterval) - continue - } - cvrCount := 0 - for _, cvr := range cvrList.Items { - if contains(statuses, string(cvr.Status.Phase)) { - cvrCount++ - } - } - if cvrCount == replicaCount { - return nil - } - time.Sleep(CVRCheckInterval) - } - return errors.Errorf("CVR for volume{%s} are not ready!", vol.volname) -} - -// waitForCSIBasedCVRs will ensure that all CVRs related to -// the given CSI volume is created. -func (p *Plugin) waitForCSIBasedCVRs(vol *Volume, replicaCount int, statuses []string) error { - for cnt := 0; cnt < CVRWaitCount; cnt++ { - cvrList, err := p.OpenEBSAPIsClient. - CstorV1(). - CStorVolumeReplicas(p.namespace). - List(context.TODO(), metav1.ListOptions{ - LabelSelector: cVRPVLabel + "=" + vol.volname, - }) - if err != nil { - return errors.Errorf("Failed to fetch CVR for volume=%s %s", vol.volname, err) - } - - if len(cvrList.Items) != replicaCount { - time.Sleep(CVRCheckInterval) - continue - } - - cvrCount := 0 - for _, cvr := range cvrList.Items { - if contains(statuses, string(cvr.Status.Phase)) { - cvrCount++ - } - } - if cvrCount == replicaCount { - return nil - } - time.Sleep(CVRCheckInterval) - } - return errors.Errorf("CVR for volume{%s} are not ready!", vol.volname) -} - -// getCVRCount returns the number of CVR for a given volume -func (p *Plugin) getCVRCount(volname string, isCSIVolume bool) int { - // For CSI based volume, CVR of v1 is used. - if isCSIVolume { - // If the volume is CSI based, then CVR V1 is used. - obj, err := p.OpenEBSAPIsClient. - CstorV1(). - CStorVolumes(p.namespace). - Get(context.TODO(), volname, metav1.GetOptions{}) - if err != nil { - p.Log.Errorf("Failed to fetch cstorVolume.. %s", err) - return -1 - } - - return obj.Spec.ReplicationFactor - } - // For non CSI based volume, CVR of v1alpha1 is used. - obj, err := p.OpenEBSClient. - OpenebsV1alpha1(). - CStorVolumes(p.namespace). - Get(context.TODO(), volname, metav1.GetOptions{}) - if err != nil { - p.Log.Errorf("Failed to fetch cstorVolume.. %s", err) - return -1 - } - return obj.Spec.ReplicationFactor -} - -// markCVRsAsRestoreCompleted annotate relevant CVR with restoreCompletedAnnotation -// Note: It will not wait for CVR to become healthy. This is mainly to avoid the scenarios -// where target-affinity is used. -func (p *Plugin) markCVRsAsRestoreCompleted(vol *Volume) error { - p.Log.Infof("Waiting for all CVRs to be ready") - if err := p.waitForAllCVRs(vol); err != nil { - return err - } - - p.Log.Infof("Marking restore as completed on CVRs") - if err := p.markRestoreAsCompleted(vol); err != nil { - p.Log.Errorf("Failed to mark restore as completed : %s", err) - return err - } - - p.Log.Infof("Successfully annotated all CVRs") - return nil -} - -// markRestoreAsCompleted will mark CVRs that the restore was completed -func (p *Plugin) markRestoreAsCompleted(vol *Volume) error { - if vol.isCSIVolume { - return p.markRestoreAsCompletedForCSIBasedCVRs(vol) - } - return p.markRestoreAsCompletedForNonCSIBasedCVRs(vol) -} - -func (p *Plugin) markRestoreAsCompletedForCSIBasedCVRs(vol *Volume) error { - replicas := p.OpenEBSAPIsClient.CstorV1(). - CStorVolumeReplicas(p.namespace) - - cvrList, err := replicas. - List(context.TODO(), metav1.ListOptions{ - LabelSelector: cVRPVLabel + "=" + vol.volname, - }) - - if err != nil { - return errors.Errorf("Failed to fetch CVR for volume=%s %s", vol.volname, err) - } - - var errs []string - for i := range cvrList.Items { - cvr := cvrList.Items[i] - p.Log.Infof("Updating CVRs %s", cvr.Name) - - cvr.Annotations[restoreCompletedAnnotation] = trueStr - _, err := replicas.Update(context.TODO(), &cvr, metav1.UpdateOptions{}) - - if err != nil { - p.Log.Warnf("could not update CVR %s", cvr.Name) - errs = append(errs, err.Error()) - } - } - - if len(errs) > 0 { - return fmt.Errorf(strings.Join(errs, "; ")) - } - - return nil -} - -func (p *Plugin) markRestoreAsCompletedForNonCSIBasedCVRs(vol *Volume) error { - replicas := p.OpenEBSClient.OpenebsV1alpha1(). - CStorVolumeReplicas(p.namespace) - - cvrList, err := replicas. - List(context.TODO(), metav1.ListOptions{ - LabelSelector: cVRPVLabel + "=" + vol.volname, - }) - - if err != nil { - return errors.Errorf("Failed to fetch CVR for volume=%s %s", vol.volname, err) - } - - var errs []string - for i := range cvrList.Items { - cvr := cvrList.Items[i] - p.Log.Infof("Updating CVRs %s", cvr.Name) - - cvr.Annotations[restoreCompletedAnnotation] = trueStr - _, err := replicas.Update(context.TODO(), &cvr, metav1.UpdateOptions{}) - - if err != nil { - p.Log.Warnf("could not update CVR %s", cvr.Name) - errs = append(errs, err.Error()) - } - } - - if len(errs) > 0 { - return fmt.Errorf(strings.Join(errs, "; ")) - } - - return nil -} diff --git a/pkg/cstor/pv_operation.go b/pkg/cstor/pv_operation.go deleted file mode 100644 index 3fed81cf..00000000 --- a/pkg/cstor/pv_operation.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2020 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cstor - -import ( - "context" - "encoding/json" - "sort" - - uuid "github.com/gofrs/uuid" - v1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // PvClonePrefix prefix for clone volume in case restore from local backup - PvClonePrefix = "cstor-clone-" -) - -func (p *Plugin) updateVolCASInfo(data []byte, volumeID string) error { - var cas v1alpha1.CASVolume - - vol := p.volumes[volumeID] - if vol == nil { - return errors.Errorf("Volume{%s} not found in volume list", volumeID) - } - - if !vol.isCSIVolume { - err := json.Unmarshal(data, &cas) - if err != nil { - return err - } - - vol.iscsi = v1.ISCSIPersistentVolumeSource{ - TargetPortal: cas.Spec.TargetPortal, - IQN: cas.Spec.Iqn, - Lun: cas.Spec.Lun, - FSType: cas.Spec.FSType, - ReadOnly: false, - } - return nil - } - //NOTE: As of now no need to handle restore response for cStor CSI volumes - return nil -} - -// restoreVolumeFromCloud restore remote snapshot for the given volume -// Note: cstor snapshots are incremental in nature, so restore will be executed -// from base snapshot to incremental snapshot 'vol.backupName' if p.restoreAllSnapshots is set -// else restore will be performed for the given backup only. -func (p *Plugin) restoreVolumeFromCloud(vol *Volume, targetBackupName string) error { - var ( - snapshotList []string - err error - ) - - if p.restoreAllSnapshots { - // We are restoring from base backup to targeted Backup - snapshotList, err = p.cl.GetSnapListFromCloud(vol.snapshotTag, p.getScheduleName(targetBackupName)) - if err != nil { - return err - } - } else { - // We are restoring only given backup - snapshotList = []string{targetBackupName} - } - - if !contains(snapshotList, targetBackupName) { - return errors.Errorf("Targeted backup=%s not found in snapshot list", targetBackupName) - } - - // snapshots are created using timestamp, we need to sort it in ascending order - sort.Strings(snapshotList) - - for _, snap := range snapshotList { - // Check if snapshot file exists or not. - // There is a possibility where only PVC file exists, - // in case of failed/partially-failed backup, but not snapshot file. - exists, err := p.cl.FileExists(vol.snapshotTag, snap) - if err != nil { - p.Log.Errorf("Failed to check remote snapshot=%s, skipping restore of this snapshot, err=%s", snap, err) - continue - } - - // If the snapshot doesn't exist, skip the restore for that snapshot. - // Since the snapshots are incremental, we need to continue to restore for the next snapshot. - if !exists { - p.Log.Warningf("Remote snapshot=%s doesn't exist, skipping restore of this snapshot", snap) - continue - } - - p.Log.Infof("Restoring snapshot=%s", snap) - - vol.backupName = snap - - err = p.restoreSnapshotFromCloud(vol) - if err != nil { - return errors.Wrapf(err, "failed to restor snapshot=%s", snap) - } - p.Log.Infof("Restore of snapshot=%s completed", snap) - - if snap == targetBackupName { - // we restored till the targetBackupName, no need to restore next snapshot - break - } - } - return nil -} - -// restoreSnapshotFromCloud restore snapshot 'vol.backupName` to volume 'vol.volname' -func (p *Plugin) restoreSnapshotFromCloud(vol *Volume) error { - p.cl.ExitServer = false - - restore, err := p.sendRestoreRequest(vol) - if err != nil { - return errors.Wrapf(err, "Restore request to apiServer failed") - } - - filename := p.cl.GenerateRemoteFilename(vol.snapshotTag, vol.backupName) - if filename == "" { - return errors.Errorf("Error creating remote file name for restore") - } - - go p.checkRestoreStatus(restore, vol) - - ret := p.cl.Download(filename, CstorRestorePort) - if !ret { - return errors.New("failed to restore snapshot") - } - - if vol.restoreStatus != v1alpha1.RSTCStorStatusDone { - return errors.Errorf("failed to restore.. status {%s}", vol.restoreStatus) - } - - return nil -} - -func (p *Plugin) getPV(volumeID string) (*v1.PersistentVolume, error) { - return p.K8sClient. - CoreV1(). - PersistentVolumes(). - Get(context.TODO(), volumeID, metav1.GetOptions{}) -} - -func (p *Plugin) restoreVolumeFromLocal(vol *Volume) error { - _, err := p.sendRestoreRequest(vol) - if err != nil { - return errors.Wrapf(err, "Restore request to apiServer failed") - } - vol.restoreStatus = v1alpha1.RSTCStorStatusDone - return nil -} - -// getVolumeForLocalRestore return volume information to restore locally for the given volumeID and snapName -// volumeID : pv name from backup -// snapName : snapshot name from where new volume will be created -func (p *Plugin) getVolumeForLocalRestore(volumeID, snapName string) (*Volume, error) { - pv, err := p.getPV(volumeID) - if err != nil { - return nil, errors.Wrapf(err, "error fetching PV=%s", volumeID) - } - - clonePvName, err := generateClonePVName() - if err != nil { - return nil, err - } - p.Log.Infof("Renaming PV %s to %s", pv.Name, clonePvName) - - isCSIVolume := isCSIPv(*pv) - - vol := &Volume{ - volname: clonePvName, - srcVolname: pv.Name, - backupName: snapName, - storageClass: pv.Spec.StorageClassName, - size: pv.Spec.Capacity[v1.ResourceStorage], - isCSIVolume: isCSIVolume, - } - p.volumes[vol.volname] = vol - return vol, nil -} - -// getVolumeForRemoteRestore return volume information to restore from remote backup for the given volumeID and snapName -// volumeID : pv name from backup -// snapName : snapshot name from where new volume will be created -func (p *Plugin) getVolumeForRemoteRestore(volumeID, snapName string) (*Volume, error) { - vol, err := p.createPVC(volumeID, snapName) - if err != nil { - p.Log.Errorf("CreatePVC returned error=%s", err) - return nil, err - } - - p.Log.Infof("Generated PV name is %s", vol.volname) - - return vol, nil -} - -// generateClonePVName return new name for clone pv for the given pv -func generateClonePVName() (string, error) { - nuuid, err := uuid.NewV4() - if err != nil { - return "", errors.Wrapf(err, "Error generating uuid for PV rename") - } - - return PvClonePrefix + nuuid.String(), nil -} - -// isCSIPv returns true if given PV is created by cstor CSI driver -func isCSIPv(pv v1.PersistentVolume) bool { - if pv.Spec.CSI != nil && - pv.Spec.CSI.Driver == openebsCSIName { - return true - } - return false -} - -// contains return true if given target string exists in slice s -func contains(s []string, target string) bool { - for _, v := range s { - if v == target { - return true - } - } - - return false -} diff --git a/pkg/cstor/pvc_operation.go b/pkg/cstor/pvc_operation.go deleted file mode 100644 index 1da66f8b..00000000 --- a/pkg/cstor/pvc_operation.go +++ /dev/null @@ -1,346 +0,0 @@ -/* -Copyright 2019 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cstor - -import ( - "context" - "encoding/json" - "time" - - "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" - "github.com/openebs/velero-plugin/pkg/velero" - "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - // PVCWaitCount control time limit for createPVC - PVCWaitCount = 100 - - // NamespaceCreateTimeout defines timeout for namespace creation - NamespaceCreateTimeout = 5 * time.Minute - - // PVCCheckInterval defines amount of delay for PVC bound check - PVCCheckInterval = 5 * time.Second -) - -// backupPVC perform backup for given volume's PVC -func (p *Plugin) backupPVC(volumeID string) error { - vol := p.volumes[volumeID] - var bkpPvc *v1.PersistentVolumeClaim - - pvcs, err := p.K8sClient. - CoreV1(). - PersistentVolumeClaims(vol.namespace). - List(context.TODO(), metav1.ListOptions{}) - if err != nil { - p.Log.Errorf("Error fetching PVC list : %s", err.Error()) - return errors.New("failed to fetch PVC list") - } - - for _, pvc := range pvcs.Items { - if pvc.Spec.VolumeName == vol.volname { - fPVC := pvc - bkpPvc = &fPVC - break - } - } - - if bkpPvc == nil { - p.Log.Errorf("Failed to find PVC for PV{%s}", vol.volname) - return errors.Errorf("Failed to find PVC for volume{%s}", vol.volname) - } - - bkpPvc.ResourceVersion = "" - bkpPvc.SelfLink = "" - if bkpPvc.Spec.StorageClassName == nil || *bkpPvc.Spec.StorageClassName == "" { - sc := bkpPvc.Annotations[v1.BetaStorageClassAnnotation] - bkpPvc.Spec.StorageClassName = &sc - } - - bkpPvc.Annotations = nil - bkpPvc.UID = "" - bkpPvc.Spec.VolumeName = "" - - data, err := json.MarshalIndent(bkpPvc, "", "\t") - if err != nil { - return errors.New("error doing json parsing") - } - - filename := p.cl.GenerateRemoteFilename(vol.volname, vol.backupName) - if filename == "" { - return errors.New("error creating remote file name for pvc backup") - } - - if ok := p.cl.Write(data, filename+".pvc"); !ok { - return errors.New("failed to upload PVC") - } - - return nil -} - -// createPVC create PVC for given volume name -func (p *Plugin) createPVC(volumeID, snapName string) (*Volume, error) { - var vol *Volume - - pvc, err := p.downloadPVC(volumeID, snapName) - if err != nil { - return nil, errors.Wrapf(err, "failed to download pvc") - } - - targetedNs, err := velero.GetRestoreNamespace(pvc.Namespace, snapName, p.Log) - if err != nil { - return nil, err - } - - err = p.EnsureNamespaceOrCreate(targetedNs) - if err != nil { - return nil, errors.Wrapf(err, "error verifying namespace") - } - - pvc.Namespace = targetedNs - - newVol, err := p.getVolumeFromPVC(*pvc) - if err != nil { - return nil, err - } - - if newVol != nil { - newVol.backupName = snapName - newVol.snapshotTag = volumeID - return newVol, nil - } - - p.Log.Infof("Creating PVC for volumeID:%s snapshot:%s in namespace=%s", volumeID, snapName, targetedNs) - - pvc.Annotations = make(map[string]string) - // Add annotation PVCreatedByKey, with value 'restore' to PVC - // So that Maya-APIServer skip updating target IPAddress in CVR - pvc.Annotations[v1alpha1.PVCreatedByKey] = "restore" - rpvc, err := p.K8sClient. - CoreV1(). - PersistentVolumeClaims(pvc.Namespace). - Create(context.TODO(), pvc, metav1.CreateOptions{}) - if err != nil { - return nil, errors.Wrapf(err, "failed to create PVC=%s/%s", pvc.Namespace, pvc.Name) - } - - for cnt := 0; cnt < PVCWaitCount; cnt++ { - pvc, err = p.K8sClient. - CoreV1(). - PersistentVolumeClaims(rpvc.Namespace). - Get(context.TODO(), rpvc.Name, metav1.GetOptions{}) - if err != nil || pvc.Status.Phase == v1.ClaimLost { - if err = p.K8sClient. - CoreV1(). - PersistentVolumeClaims(rpvc.Namespace). - Delete(context.TODO(), rpvc.Name, metav1.DeleteOptions{}); err != nil { - p.Log.Warnf("Failed to delete pvc {%s/%s} : %s", rpvc.Namespace, rpvc.Name, err.Error()) - } - return nil, errors.Wrapf(err, "failed to create PVC=%s/%s", rpvc.Namespace, rpvc.Name) - } - if pvc.Status.Phase == v1.ClaimBound { - p.Log.Infof("PVC(%v) created..", pvc.Name) - vol = &Volume{ - volname: pvc.Spec.VolumeName, - snapshotTag: volumeID, - namespace: pvc.Namespace, - backupName: snapName, - storageClass: *pvc.Spec.StorageClassName, - } - p.volumes[vol.volname] = vol - break - } - time.Sleep(PVCCheckInterval) - } - - if vol == nil { - return nil, errors.Errorf("PVC{%s/%s} is not bounded!", rpvc.Namespace, rpvc.Name) - } - - // check for created volume type - pv, err := p.getPV(vol.volname) - - if err != nil { - p.Log.Errorf("Failed to get PV{%s}", vol.volname) - return nil, errors.Wrapf(err, "failed to get pv=%s", vol.volname) - } - - vol.isCSIVolume = isCSIPv(*pv) - if err = p.waitForAllCVRs(vol); err != nil { - return nil, err - } - - // CVRs are created and updated, now we can remove the annotation 'PVCreatedByKey' from PVC - if err = p.removePVCAnnotationKey(pvc, v1alpha1.PVCreatedByKey); err != nil { - p.Log.Warningf("Failed to remove restore annotation from PVC=%s/%s err=%s", pvc.Namespace, pvc.Name, err) - return nil, errors.Wrapf(err, - "failed to clear restore-annotation=%s from PVC=%s/%s", - v1alpha1.PVCreatedByKey, pvc.Namespace, pvc.Name, - ) - } - return vol, nil -} - -// getVolumeFromPVC returns volume info for given PVC if PVC is in bound state -func (p *Plugin) getVolumeFromPVC(pvc v1.PersistentVolumeClaim) (*Volume, error) { - rpvc, err := p.K8sClient. - CoreV1(). - PersistentVolumeClaims(pvc.Namespace). - Get(context.TODO(), pvc.Name, metav1.GetOptions{}) - if err != nil { - if k8serrors.IsNotFound(err) { - return nil, nil - } - return nil, errors.Wrapf(err, "failed to fetch PVC{%s}", pvc.Name) - } - - if rpvc.Status.Phase == v1.ClaimLost { - p.Log.Errorf("PVC{%s} is not bound yet!", rpvc.Name) - return nil, errors.Errorf("pvc{%s} is not bound", rpvc.Name) - } - // check for created volume type - pv, err := p.getPV(rpvc.Spec.VolumeName) - if err != nil { - p.Log.Errorf("Failed to get PV{%s}", rpvc.Spec.VolumeName) - return nil, errors.Wrapf(err, "failed to get pv=%s", rpvc.Spec.VolumeName) - } - isCSIVolume := isCSIPv(*pv) - vol := &Volume{ - volname: rpvc.Spec.VolumeName, - snapshotTag: rpvc.Spec.VolumeName, - namespace: rpvc.Namespace, - storageClass: *rpvc.Spec.StorageClassName, - isCSIVolume: isCSIVolume, - } - p.volumes[vol.volname] = vol - - if err = p.waitForAllCVRs(vol); err != nil { - return nil, errors.Wrapf(err, "cvr not ready") - } - - // remove the annotation 'PVCreatedByKey' from PVC - // There might be chances of stale PVCreatedByKey annotation in PVC - if err = p.removePVCAnnotationKey(rpvc, v1alpha1.PVCreatedByKey); err != nil { - p.Log.Warningf("Failed to remove restore annotation from PVC=%s/%s err=%s", rpvc.Namespace, rpvc.Name, err) - return nil, errors.Wrapf(err, - "failed to clear restore-annotation=%s from PVC=%s/%s", - v1alpha1.PVCreatedByKey, rpvc.Namespace, rpvc.Name, - ) - } - - return vol, nil -} - -func (p *Plugin) downloadPVC(volumeID, snapName string) (*v1.PersistentVolumeClaim, error) { - pvc := &v1.PersistentVolumeClaim{} - - filename := p.cl.GenerateRemoteFilename(volumeID, snapName) - - data, ok := p.cl.Read(filename + ".pvc") - if !ok { - return nil, errors.Errorf("failed to download PVC file=%s", filename+".pvc") - } - - if err := json.Unmarshal(data, pvc); err != nil { - return nil, errors.Errorf("failed to decode pvc file=%s", filename+".pvc") - } - - return pvc, nil -} - -// removePVCAnnotationKey remove the given annotation key from the PVC and update it -func (p *Plugin) removePVCAnnotationKey(pvc *v1.PersistentVolumeClaim, annotationKey string) error { - var err error - - if pvc.Annotations == nil { - return nil - } - - delete(pvc.Annotations, annotationKey) - - _, err = p.K8sClient. - CoreV1(). - PersistentVolumeClaims(pvc.Namespace). - Update(context.TODO(), pvc, metav1.UpdateOptions{}) - return err -} - -// EnsureNamespaceOrCreate ensure that given namespace exists and ready -// - If namespace exists and ready to use then it will return nil -// - If namespace is in terminating state then function will wait for ns removal and re-create it -// - If namespace doesn't exist then function will create it -func (p *Plugin) EnsureNamespaceOrCreate(ns string) error { - checkNs := func(namespace string) (bool, error) { - var isNsReady bool - - err := wait.PollImmediate(time.Second, NamespaceCreateTimeout, func() (bool, error) { - p.Log.Debugf("Checking namespace=%s", namespace) - - obj, err := p.K8sClient.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}) - if err != nil { - if !k8serrors.IsNotFound(err) { - return false, err - } - - // namespace doesn't exist - return true, nil - } - - if obj.GetDeletionTimestamp() != nil || obj.Status.Phase == v1.NamespaceTerminating { - // will wait till namespace get deleted - return false, nil - } - - if obj.Status.Phase == v1.NamespaceActive { - isNsReady = true - } - - return isNsReady, nil - }) - - return isNsReady, err - } - - isReady, err := checkNs(ns) - if err != nil { - return errors.Wrapf(err, "failed to check namespace") - } - - if isReady { - return nil - } - - // namespace doesn't exist, create it - nsObj := &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: ns, - }, - } - - p.Log.Infof("Creating namespace=%s", ns) - - _, err = p.K8sClient.CoreV1().Namespaces().Create(context.TODO(), nsObj, metav1.CreateOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to create namespace") - } - - return nil -} diff --git a/pkg/cstor/status.go b/pkg/cstor/status.go deleted file mode 100644 index 9ab951f3..00000000 --- a/pkg/cstor/status.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2019 The OpenEBS Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cstor - -import ( - "encoding/json" - "time" - - "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" -) - -// checkBackupStatus queries MayaAPI server for given backup status -// and wait until backup completes -func (p *Plugin) checkBackupStatus(bkp *v1alpha1.CStorBackup, isCSIVolume bool) { - var ( - bkpDone bool - url string - ) - - if isCSIVolume { - url = p.cvcAddr + backupEndpoint - } else { - url = p.mayaAddr + backupEndpoint - } - - bkpvolume, exists := p.volumes[bkp.Spec.VolumeName] - if !exists { - p.Log.Errorf("Failed to fetch volume info for {%s}", bkp.Spec.VolumeName) - p.cl.ExitServer = true - bkpvolume.backupStatus = v1alpha1.BKPCStorStatusInvalid - return - } - - bkpData, err := json.Marshal(bkp) - if err != nil { - p.Log.Errorf("JSON marshal failed : %s", err.Error()) - p.cl.ExitServer = true - bkpvolume.backupStatus = v1alpha1.BKPCStorStatusInvalid - return - } - - for !bkpDone { - var bs v1alpha1.CStorBackup - - time.Sleep(backupStatusInterval * time.Second) - resp, err := p.httpRestCall(url, "GET", bkpData) - if err != nil { - p.Log.Warnf("Failed to fetch backup status : %s", err.Error()) - continue - } - - err = json.Unmarshal(resp, &bs) - if err != nil { - p.Log.Warnf("Unmarshal failed : %s", err.Error()) - continue - } - - bkpvolume.backupStatus = bs.Status - - switch bs.Status { - case v1alpha1.BKPCStorStatusDone, v1alpha1.BKPCStorStatusFailed, v1alpha1.BKPCStorStatusInvalid: - bkpDone = true - p.cl.ExitServer = true - if err = p.cleanupCompletedBackup(bs, isCSIVolume); err != nil { - p.Log.Warningf("failed to execute clean-up request for backup=%s err=%s", bs.Name, err) - } - } - } -} - -// checkRestoreStatus queries MayaAPI server for given restore status -// and wait until restore completes -func (p *Plugin) checkRestoreStatus(rst *v1alpha1.CStorRestore, vol *Volume) { - var ( - rstDone bool - url string - ) - - if vol.isCSIVolume { - url = p.cvcAddr + restorePath - } else { - url = p.mayaAddr + restorePath - } - - rstData, err := json.Marshal(rst) - if err != nil { - p.Log.Errorf("JSON marshal failed : %s", err.Error()) - vol.restoreStatus = v1alpha1.RSTCStorStatusInvalid - p.cl.ExitServer = true - } - - for !rstDone { - var rs v1alpha1.CStorRestore - - time.Sleep(restoreStatusInterval * time.Second) - resp, err := p.httpRestCall(url, "GET", rstData) - if err != nil { - p.Log.Warnf("Failed to fetch backup status : %s", err.Error()) - continue - } - - err = json.Unmarshal(resp, &rs.Status) - if err != nil { - p.Log.Warnf("Unmarshal failed : %s", err.Error()) - continue - } - - vol.restoreStatus = rs.Status - - switch rs.Status { - case v1alpha1.RSTCStorStatusDone, v1alpha1.RSTCStorStatusFailed, v1alpha1.RSTCStorStatusInvalid: - rstDone = true - p.cl.ExitServer = true - } - } -} - -// cleanupCompletedBackup send the delete request to apiserver -// to cleanup backup resources -// If it is normal backup then it will delete the current backup, it can be failed or succeeded backup -// If it is scheduled backup then -// - if current backup is base backup, not incremental one, then it will not perform any clean-up -// - if current backup is incremental backup and failed one then it will delete that(current) backup -// - if current backup is incremental backup and completed successfully then -// it will delete the last completed or previous backup -func (p *Plugin) cleanupCompletedBackup(bkp v1alpha1.CStorBackup, isCSIVolume bool) error { - targetedSnapName := bkp.Spec.SnapName - - // In case of scheduled backup we are using the last completed backup to send - // differential snapshot. So We don't need to delete the last completed backup. - if isScheduledBackup(bkp) && isBackupSucceeded(bkp) { - // For incremental backup We are using PrevSnapName to send the differential snapshot - // Since Given backup is completed successfully We can delete the 2nd last completed backup - if bkp.Spec.PrevSnapName == "" { - // PrevSnapName will be empty if the given backup is base backup - // clean-up is not required for base backup - return nil - } - targetedSnapName = bkp.Spec.PrevSnapName - } - - p.Log.Infof("executing clean-up request.. snapshot=%s volume=%s ns=%s backup=%s", - targetedSnapName, - bkp.Spec.VolumeName, - bkp.Namespace, - bkp.Spec.BackupName, - ) - - return p.sendDeleteRequest(targetedSnapName, - bkp.Spec.VolumeName, - bkp.Namespace, - bkp.Spec.BackupName, - isCSIVolume) -} - -// return true if given backup is part of schedule -func isScheduledBackup(bkp v1alpha1.CStorBackup) bool { - // if backup is scheduled backup then snapshot name and backup name are different - return bkp.Spec.SnapName != bkp.Spec.BackupName -} - -// isBackupSucceeded returns true if backup completed successfully -func isBackupSucceeded(bkp v1alpha1.CStorBackup) bool { - return bkp.Status == v1alpha1.BKPCStorStatusDone -} diff --git a/pkg/snapshot/snap.go b/pkg/snapshot/snap.go index 6db540fb..62c14d66 100644 --- a/pkg/snapshot/snap.go +++ b/pkg/snapshot/snap.go @@ -17,7 +17,7 @@ limitations under the License. package snapshot import ( - "github.com/openebs/velero-plugin/pkg/cstor" + zfs "github.com/openebs/velero-plugin/pkg/zfs/plugin" "github.com/sirupsen/logrus" "github.com/vmware-tanzu/velero/pkg/plugin/velero" "k8s.io/apimachinery/pkg/runtime" @@ -33,9 +33,9 @@ var _ velero.VolumeSnapshotter = (*BlockStore)(nil) // Init the plugin func (p *BlockStore) Init(config map[string]string) error { - p.Log.Infof("Initializing velero plugin for CStor") + p.Log.Infof("Initializing velero plugin for Zfs") - p.plugin = &cstor.Plugin{Log: p.Log} + p.plugin = &zfs.Plugin{Log: p.Log} return p.plugin.Init(config) } diff --git a/script/install-openebs.sh b/script/install-openebs.sh index 9c5a7003..d88b3e72 100755 --- a/script/install-openebs.sh +++ b/script/install-openebs.sh @@ -24,9 +24,10 @@ sudo service iscsid start sudo systemctl status iscsid --no-pager echo "Installation complete" -#TODO add openebs release -kubectl apply -f https://raw.githubusercontent.com/openebs/openebs/master/k8s/openebs-operator.yaml +helm repo add openebs https://openebs.github.io/openebs +helm repo update +helm install openebs --namespace openebs openebs/openebs --set engines.replicated.mayastor.enabled=false --set engines.local.lvm.enabled=false --create-namespace function waitForDeployment() { DEPLOY=$1 NS=$2 @@ -37,7 +38,7 @@ function waitForDeployment() { fi for i in $(seq 1 50) ; do - kubectl get deployment -n ${NS} ${DEPLOY} + kubectl get deployment -n "${NS}" "${DEPLOY}" kstat=$? if [ $kstat -ne 0 ] && ! $CREATE ; then return @@ -62,12 +63,10 @@ function waitForDeployment() { function dumpMayaAPIServerLogs() { LC=$1 MAPIPOD=$(kubectl get pods -o jsonpath='{.items[?(@.spec.containers[0].name=="maya-apiserver")].metadata.name}' -n openebs) - kubectl logs --tail=${LC} $MAPIPOD -n openebs + kubectl logs --tail="${LC}" "$MAPIPOD" -n openebs printf "\n\n" } -waitForDeployment maya-apiserver openebs -waitForDeployment openebs-provisioner openebs -waitForDeployment openebs-ndm-operator openebs +waitForDeployment openebs-zfs-localpv-controller openebs kubectl get pods --all-namespaces diff --git a/script/volumesnapshotlocation.yaml b/script/volumesnapshotlocation.yaml index 6f4351ce..7d5a7ad2 100644 --- a/script/volumesnapshotlocation.yaml +++ b/script/volumesnapshotlocation.yaml @@ -19,7 +19,7 @@ metadata: name: default namespace: velero spec: - provider: openebs.io/cstor-blockstore + provider: openebs.io/zfspv-blockstore config: bucket: velero provider: aws diff --git a/tests/app/application_yaml.go b/tests/app/application_yaml.go index c0582dec..10760986 100644 --- a/tests/app/application_yaml.go +++ b/tests/app/application_yaml.go @@ -21,7 +21,7 @@ const ( BusyboxYaml = `apiVersion: v1 kind: Pod metadata: - name: busybox-cstor + name: busybox-zfs namespace: default spec: containers: @@ -38,6 +38,6 @@ spec: volumes: - name: demo-vol1 persistentVolumeClaim: - claimName: cstor-vol1-1r-claim + claimName: csi-zfspv ` ) diff --git a/tests/openebs/storage_install.go b/tests/openebs/storage_install.go index 4a4e8e48..8aa44a79 100644 --- a/tests/openebs/storage_install.go +++ b/tests/openebs/storage_install.go @@ -97,7 +97,6 @@ func (c *ClientSet) CreateSPC(spcYAML string) error { func (c *ClientSet) CreateVolume(pvcYAML, pvcNs string, wait bool) error { var pvc corev1.PersistentVolumeClaim var err error - if err = yaml.Unmarshal([]byte(pvcYAML), &pvc); err != nil { return err } @@ -109,12 +108,11 @@ func (c *ClientSet) CreateVolume(pvcYAML, pvcNs string, wait bool) error { time.Sleep(5 * time.Second) PVCName = pvc.Name - if wait { - err = c.WaitForHealthyCVR(&pvc) - } + if err == nil { AppPVC = &pvc } + return err } diff --git a/tests/openebs/storage_status.go b/tests/openebs/storage_status.go index 040dcf38..8f2a1d13 100644 --- a/tests/openebs/storage_status.go +++ b/tests/openebs/storage_status.go @@ -18,9 +18,7 @@ package openebs import ( "context" - "fmt" "strings" - "time" v1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" k8s "github.com/openebs/velero-plugin/tests/k8s" @@ -38,54 +36,6 @@ const ( CVRMaxRetry = 5 ) -// WaitForHealthyCVR wait till CVR for given PVC becomes healthy -func (c *ClientSet) WaitForHealthyCVR(pvc *v1.PersistentVolumeClaim) error { - dumpLog := 0 - for { - if healthy := c.CheckCVRStatus(pvc.Name, - pvc.Namespace, - v1alpha1.CVRStatusOnline); healthy { - break - } - time.Sleep(5 * time.Second) - if dumpLog > 6 { - fmt.Printf("Waiting for %s/%s's CVR\n", pvc.Namespace, pvc.Name) - dumpLog = 0 - } - dumpLog++ - } - return nil -} - -// CheckCVRStatus check CVR status for given PVC -func (c *ClientSet) CheckCVRStatus(pvc, ns string, status v1alpha1.CStorVolumeReplicaPhase) bool { - var match bool - - for i := 0; i < CVRMaxRetry; i++ { - cvrlist, err := c.getPVCCVRList(pvc, ns) - if err != nil { - return match - } - - match = true - if len(cvrlist.Items) == 0 { - match = false - } - - for _, v := range cvrlist.Items { - if v.Status.Phase != status { - match = false - } - } - - if match { - break - } - time.Sleep(5 * time.Second) - } - - return match -} func (c *ClientSet) getPVCCVRList(pvc, ns string) (*v1alpha1.CStorVolumeReplicaList, error) { vol, err := c.getPVCVolumeName(pvc, ns) diff --git a/tests/openebs/storage_yaml.go b/tests/openebs/storage_yaml.go index 0949267f..e17c63e7 100644 --- a/tests/openebs/storage_yaml.go +++ b/tests/openebs/storage_yaml.go @@ -36,23 +36,22 @@ spec: SCYaml = `apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: openebs-cstor-sparse-auto - annotations: - openebs.io/cas-type: cstor - cas.openebs.io/config: | - - name: StoragePoolClaim - value: "sparse-claim-auto" - - name: ReplicaCount - value: "1" -provisioner: openebs.io/provisioner-iscsi + name: openebs-zfspv +parameters: + recordsize: "128k" + compression: "off" + dedup: "off" + fstype: "zfs" + poolname: "zfspv-pool" +provisioner: zfs.csi.openebs.io ` // PVCYaml for PVC CR PVCYaml = `kind: PersistentVolumeClaim apiVersion: v1 metadata: - name: cstor-vol1-1r-claim + name: csi-zfspv spec: - storageClassName: openebs-cstor-sparse-auto + storageClassName: openebs-zfspv accessModes: - ReadWriteOnce resources: diff --git a/tests/sanity/backup_test.go b/tests/sanity/backup_test.go index ec391919..b769b6f1 100644 --- a/tests/sanity/backup_test.go +++ b/tests/sanity/backup_test.go @@ -18,11 +18,10 @@ package sanity import ( "testing" - "time" + "fmt" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" corev1 "k8s.io/api/core/v1" @@ -57,18 +56,19 @@ var ( var _ = BeforeSuite(func() { err = app.CreateNamespace(AppNs) Expect(err).NotTo(HaveOccurred()) + fmt.Printf("Namespace : %s created\n", AppNs) err = k8s.Client.CreateStorageClass(openebs.SCYaml) Expect(err).NotTo(HaveOccurred()) - - err = openebs.Client.CreateSPC(openebs.SPCYaml) - Expect(err).NotTo(HaveOccurred()) + fmt.Println("Storageclass created") err = openebs.Client.CreateVolume(openebs.PVCYaml, AppNs, true) Expect(err).NotTo(HaveOccurred()) + fmt.Println("Volume created") err = app.DeployApplication(app.BusyboxYaml, AppNs) Expect(err).NotTo(HaveOccurred()) + fmt.Println("Application deployed") velero.BackupLocation = BackupLocation velero.SnapshotLocation = SnapshotLocation @@ -82,10 +82,10 @@ var _ = Describe("Backup/Restore Test", func() { By("Creating a backup") - err = openebs.Client.WaitForHealthyCVR(openebs.AppPVC) - Expect(err).NotTo(HaveOccurred(), "No healthy CVR for %s", openebs.AppPVC) - // There are chances that istgt is not updated, but replica is healthy - time.Sleep(30 * time.Second) + // err = openebs.Client.WaitForHealthyCVR(openebs.AppPVC) + // Expect(err).NotTo(HaveOccurred(), "No healthy CVR for %s", openebs.AppPVC) + // // There are chances that istgt is not updated, but replica is healthy + // time.Sleep(30 * time.Second) backupName, status, err = velero.Client.CreateBackup(AppNs) if (err != nil) || status != v1.BackupPhaseCompleted { @@ -156,12 +156,6 @@ var _ = Describe("Backup/Restore Test", func() { Expect(perr).NotTo(HaveOccurred(), "Failed to verify PVC=%s bound status for namespace=%s", app.PVCName, AppNs) Expect(phase).To(Equal(corev1.ClaimBound), "PVC=%s not bound", app.PVCName) - By("Checking if restored CVR are in healthy state") - ok := openebs.Client.CheckCVRStatus(app.PVCName, AppNs, v1alpha1.CVRStatusOnline) - if !ok { - dumpLogs() - } - Expect(ok).To(BeTrue(), "CVR for PVC=%s are not in healthy state", app.PVCName) }) It("Restore from scheduled backup", func() { @@ -183,18 +177,12 @@ var _ = Describe("Backup/Restore Test", func() { Expect(err).NotTo(HaveOccurred(), "Failed to verify PVC=%s bound status for namespace=%s", app.PVCName, AppNs) Expect(phase).To(Equal(corev1.ClaimBound), "PVC=%s not bound", app.PVCName) - By("Checking if restored CVR are in healthy state") - ok := openebs.Client.CheckCVRStatus(app.PVCName, AppNs, v1alpha1.CVRStatusOnline) - if !ok { - dumpLogs() - } - Expect(ok).To(BeTrue(), "CVR for PVC=%s are not in healthy state", app.PVCName) By("Checking if restore has created snapshot or not") snapshotList, serr := velero.Client.GetRestoredSnapshotFromSchedule(scheduleName) Expect(serr).NotTo(HaveOccurred()) for snapshot := range snapshotList { - ok, err = openebs.Client.CheckSnapshot(app.PVCName, AppNs, snapshot) + ok, err := openebs.Client.CheckSnapshot(app.PVCName, AppNs, snapshot) if err != nil { dumpLogs() } @@ -234,12 +222,6 @@ var _ = Describe("Backup/Restore Test", func() { Expect(perr).NotTo(HaveOccurred(), "Failed to verify PVC=%s bound status for namespace=%s", app.PVCName, TargetedNs) Expect(phase).To(Equal(corev1.ClaimBound), "PVC=%s not bound", app.PVCName) - By("Checking if restored CVR are in healthy state") - ok := openebs.Client.CheckCVRStatus(app.PVCName, TargetedNs, v1alpha1.CVRStatusOnline) - if !ok { - dumpLogs() - } - Expect(ok).To(BeTrue(), "CVR for PVC=%s is not in healthy state", app.PVCName) }) It("Restore from scheduled backup to different Namespace", func() { @@ -258,12 +240,6 @@ var _ = Describe("Backup/Restore Test", func() { Expect(err).NotTo(HaveOccurred(), "Failed to verify PVC=%s bound status for namespace=%s", app.PVCName, TargetedNs) Expect(phase).To(Equal(corev1.ClaimBound), "PVC=%s not bound", app.PVCName) - By("Checking if restored CVR are in healthy state") - ok := openebs.Client.CheckCVRStatus(app.PVCName, TargetedNs, v1alpha1.CVRStatusOnline) - if !ok { - dumpLogs() - } - Expect(ok).To(BeTrue(), "CVR for PVC=%s is not in healthy state", app.PVCName) By("Checking if restore has created snapshot or not") snapshotList, err := velero.Client.GetRestoredSnapshotFromSchedule(scheduleName) diff --git a/velero-blockstore-openebs/main.go b/velero-blockstore-openebs/main.go index 2502db67..27658b1f 100644 --- a/velero-blockstore-openebs/main.go +++ b/velero-blockstore-openebs/main.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - snap "github.com/openebs/velero-plugin/pkg/snapshot" zfssnap "github.com/openebs/velero-plugin/pkg/zfs/snapshot" "github.com/sirupsen/logrus" "github.com/spf13/pflag" @@ -27,15 +26,10 @@ import ( func main() { veleroplugin.NewServer(). BindFlags(pflag.CommandLine). - RegisterVolumeSnapshotter("openebs.io/cstor-blockstore", openebsSnapPlugin). RegisterVolumeSnapshotter("openebs.io/zfspv-blockstore", zfsSnapPlugin). Serve() } -func openebsSnapPlugin(logger logrus.FieldLogger) (interface{}, error) { - return &snap.BlockStore{Log: logger}, nil -} - func zfsSnapPlugin(logger logrus.FieldLogger) (interface{}, error) { return &zfssnap.BlockStore{Log: logger}, nil }