From 939971a3ea3f1bb1306cdd3292edc512a4fb4b99 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Thu, 18 Nov 2021 14:21:10 -0700 Subject: [PATCH] build: set the release version to 1.6.11 With the patch release we update the example manifest version to v1.6.11 Signed-off-by: Travis Nielsen --- Documentation/cassandra.md | 2 +- Documentation/ceph-common-issues.md | 6 ++-- Documentation/ceph-monitoring.md | 2 +- Documentation/ceph-quickstart.md | 2 +- Documentation/ceph-toolbox.md | 6 ++-- Documentation/ceph-upgrade.md | 30 +++++++++---------- Documentation/nfs.md | 2 +- .../kubernetes/cassandra/operator.yaml | 2 +- .../kubernetes/ceph/direct-mount.yaml | 2 +- .../kubernetes/ceph/operator-openshift.yaml | 2 +- .../examples/kubernetes/ceph/operator.yaml | 2 +- .../examples/kubernetes/ceph/osd-purge.yaml | 2 +- .../examples/kubernetes/ceph/toolbox-job.yaml | 4 +-- cluster/examples/kubernetes/ceph/toolbox.yaml | 2 +- cluster/examples/kubernetes/nfs/operator.yaml | 2 +- cluster/examples/kubernetes/nfs/webhook.yaml | 2 +- 16 files changed, 35 insertions(+), 35 deletions(-) diff --git a/Documentation/cassandra.md b/Documentation/cassandra.md index b5725aa46913..072f9465556b 100644 --- a/Documentation/cassandra.md +++ b/Documentation/cassandra.md @@ -21,7 +21,7 @@ To make sure you have a Kubernetes cluster that is ready for `Rook`, you can [fo First deploy the Rook Cassandra Operator using the following commands: ```console -$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.6.11 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/cassandra kubectl apply -f operator.yaml ``` diff --git a/Documentation/ceph-common-issues.md b/Documentation/ceph-common-issues.md index fe5115459843..baae988ea3c1 100644 --- a/Documentation/ceph-common-issues.md +++ b/Documentation/ceph-common-issues.md @@ -995,7 +995,7 @@ You can see https://github.com/rook/rook/issues/7940 for more detailed informati ### Solution #### Recover from corruption (v1.6.0-v1.6.7) -If you are using Rook v1.6, you must first update to v1.6.10 or higher to avoid further incidents of +If you are using Rook v1.6, you must first update to v1.6.11 or higher to avoid further incidents of OSD corruption caused by these Atari partitions. An old workaround suggested using `deviceFilter: ^sd[a-z]+$`, but this still results in unexpected @@ -1003,7 +1003,7 @@ partitions. Rook will merely stop creating new OSDs on the partitions. It does n issue that `ceph-volume` that is unaware of the Atari partition problem. Users who used this workaround are still at risk for OSD failures in the future. -To resolve the issue, immediately update to v1.6.10 or higher. After the update, no corruption should +To resolve the issue, immediately update to v1.6.11 or higher. After the update, no corruption should occur on OSDs created in the future. Next, to get back to a healthy Ceph cluster state, focus on one corruped disk at a time and [remove all OSDs on each corrupted disk](ceph-osd-mgmt.md#remove-an-osd) one disk at a time. @@ -1024,4 +1024,4 @@ as well as a second corrupted disk `/dev/sde` with one unexpected partition (`/d 5. Now Repeat steps 1-4 for `/dev/sde` and `/dev/sde2`, and continue for any other corruped disks. If your Rook-Ceph cluster does not have any critical data stored in it, it may be simpler to -uninstall Rook completely and redeploy with v1.6.10 or higher. +uninstall Rook completely and redeploy with v1.6.11 or higher. diff --git a/Documentation/ceph-monitoring.md b/Documentation/ceph-monitoring.md index a90f54c8769d..0f0690c0e292 100644 --- a/Documentation/ceph-monitoring.md +++ b/Documentation/ceph-monitoring.md @@ -38,7 +38,7 @@ With the Prometheus operator running, we can create a service monitor that will From the root of your locally cloned Rook repo, go the monitoring directory: ```console -$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.6.11 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph/monitoring ``` diff --git a/Documentation/ceph-quickstart.md b/Documentation/ceph-quickstart.md index ffd54d8eca0d..23fb3093a6a7 100644 --- a/Documentation/ceph-quickstart.md +++ b/Documentation/ceph-quickstart.md @@ -50,7 +50,7 @@ If the `FSTYPE` field is not empty, there is a filesystem on top of the correspo If you're feeling lucky, a simple Rook cluster can be created with the following kubectl commands and [example yaml files](https://github.com/rook/rook/blob/{{ branchName }}/cluster/examples/kubernetes/ceph). For the more detailed install, skip to the next section to [deploy the Rook operator](#deploy-the-rook-operator). ```console -$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.6.11 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph kubectl create -f crds.yaml -f common.yaml -f operator.yaml kubectl create -f cluster.yaml diff --git a/Documentation/ceph-toolbox.md b/Documentation/ceph-toolbox.md index 86b778b8f7c7..ee9981a26907 100644 --- a/Documentation/ceph-toolbox.md +++ b/Documentation/ceph-toolbox.md @@ -43,7 +43,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent @@ -133,7 +133,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -155,7 +155,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index 58d2ff83cac8..ba13ea754f40 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -52,12 +52,12 @@ With this upgrade guide, there are a few notes to consider: Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to another are as simple as updating the common resources and the image of the Rook operator. For -example, when Rook v1.6.10 is released, the process of updating from v1.6.0 is as simple as running +example, when Rook v1.6.11 is released, the process of updating from v1.6.0 is as simple as running the following: First get the latest common resources manifests that contain the latest changes for Rook v1.6. ```sh -git clone --single-branch --depth=1 --branch v1.6.10 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.6.11 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph ``` @@ -68,7 +68,7 @@ section for instructions on how to change the default namespaces in `common.yaml Then apply the latest changes from v1.6 and update the Rook Operator image. ```console kubectl apply -f common.yaml -f crds.yaml -kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.10 +kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.11 ``` As exemplified above, it is a good practice to update Rook-Ceph common resources from the example @@ -248,7 +248,7 @@ Any pod that is using a Rook volume should also remain healthy: ## Rook Operator Upgrade Process In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.5.9` to -the version `v1.6.10`. This upgrade should work from any official patch release of Rook v1.5 to any +the version `v1.6.11`. This upgrade should work from any official patch release of Rook v1.5 to any official patch release of v1.6. **Rook release from `master` are expressly unsupported.** It is strongly recommended that you use @@ -282,7 +282,7 @@ needed by the Operator. Also update the Custom Resource Definitions (CRDs). First get the latest common resources manifests that contain the latest changes for Rook v1.6. ```sh -git clone --single-branch --depth=1 --branch v1.6.10 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.6.11 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/ceph ``` @@ -338,7 +338,7 @@ The largest portion of the upgrade is triggered when the operator's image is upd When the operator is updated, it will proceed to update all of the Ceph daemons. ```sh -kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.10 +kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.6.11 ``` ## 4. Wait for the upgrade to complete @@ -354,17 +354,17 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster= ``` As an example, this cluster is midway through updating the OSDs from v1.5 to v1.6. When all -deployments report `1/1/1` availability and `rook-version=v1.6.10`, the Ceph cluster's core +deployments report `1/1/1` availability and `rook-version=v1.6.11`, the Ceph cluster's core components are fully updated. >``` >Every 2.0s: kubectl -n rook-ceph get deployment -o j... > ->rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.6.10 ->rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.6.10 ->rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.6.10 ->rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.6.10 ->rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.6.10 +>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.6.11 +>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.6.11 +>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.6.11 +>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.6.11 +>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.6.11 >rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.5.9 >rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.5.9 >``` @@ -376,14 +376,14 @@ An easy check to see if the upgrade is totally finished is to check that there i # kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: rook-version=v1.5.9 - rook-version=v1.6.10 + rook-version=v1.6.11 This cluster is finished: - rook-version=v1.6.10 + rook-version=v1.6.11 ``` ## 5. Verify the updated cluster -At this point, your Rook operator should be running version `rook/ceph:v1.6.10`. +At this point, your Rook operator should be running version `rook/ceph:v1.6.11`. Verify the Ceph cluster's health using the [health verification section](#health-verification). diff --git a/Documentation/nfs.md b/Documentation/nfs.md index 422613ac0088..c43e9558bf12 100644 --- a/Documentation/nfs.md +++ b/Documentation/nfs.md @@ -23,7 +23,7 @@ You can read further about the details and limitations of these volumes in the [ First deploy the Rook NFS operator using the following commands: ```console -$ git clone --single-branch --branch v1.6.10 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.6.11 https://github.com/rook/rook.git cd rook/cluster/examples/kubernetes/nfs kubectl create -f common.yaml kubectl create -f operator.yaml diff --git a/cluster/examples/kubernetes/cassandra/operator.yaml b/cluster/examples/kubernetes/cassandra/operator.yaml index 72cc83c293ad..25cdfb38e1c7 100644 --- a/cluster/examples/kubernetes/cassandra/operator.yaml +++ b/cluster/examples/kubernetes/cassandra/operator.yaml @@ -185,7 +185,7 @@ spec: serviceAccountName: rook-cassandra-operator containers: - name: rook-cassandra-operator - image: rook/cassandra:v1.6.10 + image: rook/cassandra:v1.6.11 imagePullPolicy: "Always" args: ["cassandra", "operator"] env: diff --git a/cluster/examples/kubernetes/ceph/direct-mount.yaml b/cluster/examples/kubernetes/ceph/direct-mount.yaml index 6f4258c08412..4cf0ef8027a3 100644 --- a/cluster/examples/kubernetes/ceph/direct-mount.yaml +++ b/cluster/examples/kubernetes/ceph/direct-mount.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-direct-mount - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/cluster/examples/kubernetes/ceph/operator-openshift.yaml b/cluster/examples/kubernetes/ceph/operator-openshift.yaml index 27a719e06760..3611da42524c 100644 --- a/cluster/examples/kubernetes/ceph/operator-openshift.yaml +++ b/cluster/examples/kubernetes/ceph/operator-openshift.yaml @@ -439,7 +439,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 args: ["ceph", "operator"] volumeMounts: - mountPath: /var/lib/rook diff --git a/cluster/examples/kubernetes/ceph/operator.yaml b/cluster/examples/kubernetes/ceph/operator.yaml index c4b408c71e2a..adfaf864ea21 100644 --- a/cluster/examples/kubernetes/ceph/operator.yaml +++ b/cluster/examples/kubernetes/ceph/operator.yaml @@ -362,7 +362,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 args: ["ceph", "operator"] volumeMounts: - mountPath: /var/lib/rook diff --git a/cluster/examples/kubernetes/ceph/osd-purge.yaml b/cluster/examples/kubernetes/ceph/osd-purge.yaml index 9b4491b8f89c..c70a0109ed41 100644 --- a/cluster/examples/kubernetes/ceph/osd-purge.yaml +++ b/cluster/examples/kubernetes/ceph/osd-purge.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: osd-removal - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 # TODO: Insert the OSD ID in the last parameter that is to be removed # The OSD IDs are a comma-separated list. For example: "0" or "0,2". args: ["ceph", "osd", "remove", "--osd-ids", ""] diff --git a/cluster/examples/kubernetes/ceph/toolbox-job.yaml b/cluster/examples/kubernetes/ceph/toolbox-job.yaml index 1ef9ccae872a..d62f6511e250 100644 --- a/cluster/examples/kubernetes/ceph/toolbox-job.yaml +++ b/cluster/examples/kubernetes/ceph/toolbox-job.yaml @@ -10,7 +10,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -32,7 +32,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/cluster/examples/kubernetes/ceph/toolbox.yaml b/cluster/examples/kubernetes/ceph/toolbox.yaml index 477e3a590271..7da0933a31be 100644 --- a/cluster/examples/kubernetes/ceph/toolbox.yaml +++ b/cluster/examples/kubernetes/ceph/toolbox.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:v1.6.10 + image: rook/ceph:v1.6.11 command: ["/tini"] args: ["-g", "--", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/cluster/examples/kubernetes/nfs/operator.yaml b/cluster/examples/kubernetes/nfs/operator.yaml index a42447eb69b9..1c1f47befd37 100644 --- a/cluster/examples/kubernetes/nfs/operator.yaml +++ b/cluster/examples/kubernetes/nfs/operator.yaml @@ -117,7 +117,7 @@ spec: serviceAccountName: rook-nfs-operator containers: - name: rook-nfs-operator - image: rook/nfs:v1.6.10 + image: rook/nfs:v1.6.11 imagePullPolicy: IfNotPresent args: ["nfs", "operator"] env: diff --git a/cluster/examples/kubernetes/nfs/webhook.yaml b/cluster/examples/kubernetes/nfs/webhook.yaml index 913a759a63a9..3e3ffcae8e42 100644 --- a/cluster/examples/kubernetes/nfs/webhook.yaml +++ b/cluster/examples/kubernetes/nfs/webhook.yaml @@ -111,7 +111,7 @@ spec: spec: containers: - name: rook-nfs-webhook - image: rook/nfs:v1.6.10 + image: rook/nfs:v1.6.11 imagePullPolicy: IfNotPresent args: ["nfs", "webhook"] ports: