Skip to content

Commit

Permalink
opt: skip transfer leader if only one PD; update some CRD comments (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
csuzhangxc authored Jan 23, 2025
1 parent c75ea80 commit 3d2a1b5
Show file tree
Hide file tree
Showing 7 changed files with 101 additions and 13 deletions.
8 changes: 4 additions & 4 deletions apis/core/v1alpha1/cluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,10 @@ type TLSCluster struct {
// - use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
// - or use cert-manager signed certificates: https://cert-manager.io/
// 2. Create one secret object for one component group which contains the certificates created above.
// The name of this Secret must be: <clusterName>-<groupName>-cluster-secret.
// For PD: kubectl create secret generic <clusterName>-<pd-groupName>-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
// For TiKV: kubectl create secret generic <clusterName>-<tikv-groupName>-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
// For TiDB: kubectl create secret generic <clusterName>-<tidb-groupName>-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
// The name of this Secret must be: <groupName>-<componentName>-cluster-secret.
// For PD: kubectl create secret generic <groupName>-pd-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
// For TiKV: kubectl create secret generic <groupName>-tikv-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
// For TiDB: kubectl create secret generic <groupName>-tidb-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
// For Client: kubectl create secret generic <clusterName>-cluster-client-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
// Same for other components.
// +optional
Expand Down
5 changes: 3 additions & 2 deletions apis/core/v1alpha1/pd_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,8 +278,9 @@ type PDGroupSpec struct {
Replicas *int32 `json:"replicas"`
Version string `json:"version"`

// Bootstrapped means that pd cluster has been bootstrapped
// It's no need to initialize a new cluster
// Bootstrapped means that pd cluster has been bootstrapped,
// and there is no need to initialize a new cluster.
// In other words, this PD group will just join an existing cluster.
// Normally, this field is automatically changed by operator.
// If it's true, it cannot be set to false for security
Bootstrapped bool `json:"bootstrapped,omitempty"`
Expand Down
8 changes: 4 additions & 4 deletions manifests/crd/core.pingcap.com_clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,10 @@ spec:
- use the K8s built-in certificate signing system signed certificates: https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/
- or use cert-manager signed certificates: https://cert-manager.io/
2. Create one secret object for one component group which contains the certificates created above.
The name of this Secret must be: <clusterName>-<groupName>-cluster-secret.
For PD: kubectl create secret generic <clusterName>-<pd-groupName>-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
For TiKV: kubectl create secret generic <clusterName>-<tikv-groupName>-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
For TiDB: kubectl create secret generic <clusterName>-<tidb-groupName>-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
The name of this Secret must be: <groupName>-<componentName>-cluster-secret.
For PD: kubectl create secret generic <groupName>-pd-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
For TiKV: kubectl create secret generic <groupName>-tikv-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
For TiDB: kubectl create secret generic <groupName>-tidb-cluster-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
For Client: kubectl create secret generic <clusterName>-cluster-client-secret --namespace=<namespace> --from-file=tls.crt=<path/to/tls.crt> --from-file=tls.key=<path/to/tls.key> --from-file=ca.crt=<path/to/ca.crt>
Same for other components.
type: boolean
Expand Down
5 changes: 3 additions & 2 deletions manifests/crd/core.pingcap.com_pdgroups.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,9 @@ spec:
properties:
bootstrapped:
description: |-
Bootstrapped means that pd cluster has been bootstrapped
It's no need to initialize a new cluster
Bootstrapped means that pd cluster has been bootstrapped,
and there is no need to initialize a new cluster.
In other words, this PD group will just join an existing cluster.
Normally, this field is automatically changed by operator.
If it's true, it cannot be set to false for security
type: boolean
Expand Down
5 changes: 5 additions & 0 deletions pkg/controllers/pd/tasks/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,11 @@ func preDeleteCheck(
) (bool, error) {
// TODO: add quorum check. After stopping this pd, quorum should not be lost

if len(peers) == 1 {
logger.Info("no need to transfer leader because there is only one pd")
return false, nil
}

if isLeader {
peer := LongestHealthPeer(pd, peers)
if peer == "" {
Expand Down
51 changes: 51 additions & 0 deletions pkg/controllers/pd/tasks/pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,17 @@ func TestTaskPod(t *testing.T) {
}
return obj
}),
fake.FakeObj("aaa-yyy", func(obj *v1alpha1.PD) *v1alpha1.PD {
obj.Spec.Version = fakeVersion
obj.Status.Conditions = []metav1.Condition{
{
Type: v1alpha1.CondHealth,
Status: metav1.ConditionFalse,
LastTransitionTime: metav1.Now(),
},
}
return obj
}),
},
},
Healthy: true,
Expand All @@ -224,6 +235,46 @@ func TestTaskPod(t *testing.T) {

expectedStatus: task.SFail,
},
{
desc: "pod spec changed, pod is healthy, pod is leader, only one pd",
state: &ReconcileContext{
State: &state{
pd: fake.FakeObj("aaa-xxx", func(obj *v1alpha1.PD) *v1alpha1.PD {
obj.Spec.Version = fakeVersion
obj.Status.Conditions = []metav1.Condition{
{
Type: v1alpha1.CondHealth,
Status: metav1.ConditionTrue,
},
}
return obj
}),
cluster: fake.FakeObj[v1alpha1.Cluster]("aaa"),
pod: fake.FakeObj("aaa-pd-xxx", func(obj *corev1.Pod) *corev1.Pod {
return obj
}),
pds: []*v1alpha1.PD{
fake.FakeObj("aaa-xxx", func(obj *v1alpha1.PD) *v1alpha1.PD {
obj.Spec.Version = fakeVersion
obj.Status.Conditions = []metav1.Condition{
{
Type: v1alpha1.CondHealth,
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Now(),
},
}
return obj
}),
},
},
Healthy: true,
IsLeader: true,
},

expectUpdatedPod: false,
expectedPodIsTerminating: true,
expectedStatus: task.SWait,
},
{
desc: "pod spec changed, pod is healthy, pod is not leader",
state: &ReconcileContext{
Expand Down
32 changes: 31 additions & 1 deletion tests/e2e/pd/pd.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ var _ = ginkgo.Describe("PD", label.PD, func() {
})

ginkgo.Context("Update", label.P0, label.Update, func() {
ginkgo.It("support rolling update PD by change config file", func(ctx context.Context) {
ginkgo.It("support rolling update PD by change config file with 3 replicas", func(ctx context.Context) {
pdg := data.NewPDGroup(
f.Namespace.Name,
data.WithReplicas[*runtime.PDGroup](3),
Expand Down Expand Up @@ -128,6 +128,36 @@ var _ = ginkgo.Describe("PD", label.PD, func() {
cancel()
<-ch
})

ginkgo.It("support update PD by change config file with 1 replica", func(ctx context.Context) {
pdg := data.NewPDGroup(
f.Namespace.Name,
data.WithReplicas[*runtime.PDGroup](1),
)

ginkgo.By("Create PDGroup")
f.Must(f.Client.Create(ctx, pdg))
f.WaitForPDGroupReady(ctx, pdg)

patch := client.MergeFrom(pdg.DeepCopy())
pdg.Spec.Template.Spec.Config = `log.level = 'warn'`

nctx, cancel := context.WithCancel(ctx)
ch := make(chan struct{})
go func() {
defer close(ch)
defer ginkgo.GinkgoRecover()
f.Must(waiter.WaitPodsRollingUpdateOnce(nctx, f.Client, runtime.FromPDGroup(pdg), waiter.LongTaskTimeout))
}()

changeTime := time.Now()
ginkgo.By("Change config of the PDGroup")
f.Must(f.Client.Patch(ctx, pdg, patch))
f.Must(waiter.WaitForPodsRecreated(ctx, f.Client, runtime.FromPDGroup(pdg), changeTime, waiter.LongTaskTimeout))
f.WaitForPDGroupReady(ctx, pdg)
cancel()
<-ch
})
})

ginkgo.Context("Suspend", label.P0, label.Suspend, func() {
Expand Down

0 comments on commit 3d2a1b5

Please sign in to comment.