From 676660f4a41f388881e507d32cceea6f2cb8a0e2 Mon Sep 17 00:00:00 2001 From: xfwduke Date: Thu, 16 Jan 2025 16:09:06 +0800 Subject: [PATCH] =?UTF-8?q?feat(mysql):=20=E9=83=A8=E7=BD=B2=E5=91=A8?= =?UTF-8?q?=E8=BE=B9=E5=AD=90=E6=B5=81=E7=A8=8B=20#7639?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../subcmd/mysqlcmd/deploy_mysql_crond.go | 213 +++---- .../subcmd/mysqlcmd/install_backup_client.go | 181 +++--- .../subcmd/mysqlcmd/install_checkusm.go | 182 +++--- .../subcmd/mysqlcmd/install_dba_toolkit.go | 178 +++--- .../subcmd/mysqlcmd/install_monitor.go | 188 +++--- .../subcmd/mysqlcmd/install_new_dbbackup.go | 212 +++---- .../install_new_mysql_rotatebinlog.go | 192 +++--- .../internal/subcmd/mysqlcmd/mysqlcmd.go | 17 +- .../prepare_peripheraltools_binary.go | 69 ++ .../mysqlcmd/push_backup_client_config.go | 136 ++-- ...hecksum_config.go => push_checksum_cnf.go} | 0 ...bbackup_config.go => push_dbbackup_cnf.go} | 0 .../subcmd/mysqlcmd/push_exporter_cnf.go | 68 ++ ..._monitor_config.go => push_monitor_cnf.go} | 4 - ...rond_config.go => push_mysql_crond_cnf.go} | 0 ...nfig.go => push_mysql_rotatebinlog_cnf.go} | 0 .../internal/subcmd/proxycmd/cmd.go | 1 + .../subcmd/proxycmd/standardize_proxy.go | 72 +++ .../pkg/components/crontab/clear_crontab.go | 1 - .../components/mysql/clear_instance_config.go | 12 +- .../components/mysql/install_dba_toolkit.go | 138 ++-- .../pkg/components/mysql/install_mysql.go | 19 + .../mysql_proxy/standardize_proxy.go | 80 +++ .../peripheraltools/checksum/add_crond.go | 5 +- .../peripheraltools/checksum/binary.go | 5 +- .../peripheraltools/checksum/example.go | 34 +- .../peripheraltools/checksum/init.go | 27 +- .../checksum/runtime_config.go | 26 +- .../peripheraltools/crond/binary.go | 9 +- .../peripheraltools/crond/example.go | 7 +- .../components/peripheraltools/crond/init.go | 2 + .../peripheraltools/dba_toolkit/init.go | 30 + .../peripheraltools/dbbackup/add_crond.go | 38 +- .../peripheraltools/dbbackup/backup_dir.go | 8 - .../peripheraltools/dbbackup/binary.go | 28 +- .../peripheraltools/dbbackup/example.go | 28 +- .../peripheraltools/dbbackup/init.go | 83 +-- .../peripheraltools/dbbackup/legacy.go | 7 +- .../peripheraltools/dbbackup/render_data.go | 43 +- .../dbbackup/runtime_config.go | 13 +- .../peripheraltools/exporter/init.go | 111 ++++ .../peripheraltools/monitor/add_crond.go | 4 +- .../peripheraltools/monitor/binary.go | 9 +- .../peripheraltools/monitor/example.go | 24 +- .../monitor/exporter_config.go | 133 ++-- .../peripheraltools/monitor/init.go | 38 +- .../peripheraltools/monitor/items_config.go | 10 +- .../peripheraltools/monitor/runtime_config.go | 87 +-- .../peripheraltools/prepare_binary.go | 81 +++ .../peripheraltools/rotatebinlog/binary.go | 18 +- .../peripheraltools/rotatebinlog/example.go | 20 +- .../peripheraltools/rotatebinlog/init.go | 46 +- .../pkg/checker/summary.go | 5 +- .../api/cluster/tendbha/decommission.py | 10 +- dbm-ui/backend/db_meta/enums/type_maps.py | 2 +- .../migrations/0043_auto_20241106_1704.py | 42 ++ .../migrations/0046_merge_20250117_0804.py | 13 + .../reverse_api/mysql/impl/__init__.py | 1 + .../mysql/impl/list_instance_info.py | 6 + .../impl/list_instance_monitor_config.py | 81 +++ .../db_proxy/reverse_api/mysql/views.py | 19 +- .../db_services/meta_import/serializers.py | 34 +- .../backend/db_services/meta_import/views.py | 232 +++---- dbm-ui/backend/flow/consts.py | 6 +- .../mysql/deploy_peripheraltools/__init__.py} | 20 - .../deploy_peripheraltools/cc_trans_module.py | 97 +++ .../deploy_peripheraltools/collect_sysinfo.py | 37 ++ .../mysql/deploy_peripheraltools/departs.py | 42 ++ .../mysql/deploy_peripheraltools/flow.py | 59 ++ .../mysql/deploy_peripheraltools/group_ips.py | 46 ++ .../instance_standardize.py | 83 +++ .../prepare_departs_binary.py | 128 ++++ .../deploy_peripheraltools/push_config.py | 306 +++++++++ .../mysql/deploy_peripheraltools/subflow.py | 152 +++++ .../deploy_peripheraltools/trans_files.py | 98 +++ .../bamboo/scene/mysql/mysql_ha_apply_flow.py | 55 +- .../scene/mysql/mysql_ha_standardize_flow.py | 379 ----------- .../scene/mysql/mysql_proxy_cluster_add.py | 60 +- .../scene/mysql/mysql_proxy_cluster_switch.py | 40 +- .../mysql/mysql_push_peripheral_config.py | 294 --------- .../spider/spider_cluster_standardize_flow.py | 377 ----------- .../bamboo/scene/tendbsingle/standardize.py | 271 -------- .../backend/flow/engine/controller/mysql.py | 19 +- .../backend/flow/engine/controller/spider.py | 7 +- .../flow/engine/controller/tendbsingle.py | 7 +- .../common/create_random_job_user.py | 1 - .../mysql/cluster_standardize_trans_module.py | 6 +- ...generate_mysql_cluster_standardize_flow.py | 57 ++ dbm-ui/backend/flow/urls.py | 3 +- .../flow/utils/mysql/mysql_act_playload.py | 591 ++++++++++-------- .../flow/views/spider_cluster_standardize.py | 38 -- .../flow/views/tendb_ha_standardize.py | 38 -- .../mysql/mysql_cluster_standardize.py | 46 ++ .../builders/mysql/mysql_ha_standardize.py | 75 --- .../mysql/mysql_push_peripheral_config.py | 40 -- .../spider/mysql_spider_standardize.py | 74 --- .../builders/tendbsingle/standardize.py | 53 -- dbm-ui/backend/ticket/constants.py | 6 +- 98 files changed, 3603 insertions(+), 3390 deletions(-) create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/prepare_peripheraltools_binary.go rename dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/{push_checksum_config.go => push_checksum_cnf.go} (100%) rename dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/{push_dbbackup_config.go => push_dbbackup_cnf.go} (100%) create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_exporter_cnf.go rename dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/{push_monitor_config.go => push_monitor_cnf.go} (95%) rename dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/{push_mysql_crond_config.go => push_mysql_crond_cnf.go} (100%) rename dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/{push_mysql_rotatebinlog_config.go => push_mysql_rotatebinlog_cnf.go} (100%) create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/standardize_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/standardize_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dba_toolkit/init.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/exporter/init.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/prepare_binary.go create mode 100644 dbm-ui/backend/db_meta/migrations/0043_auto_20241106_1704.py create mode 100644 dbm-ui/backend/db_meta/migrations/0046_merge_20250117_0804.py create mode 100644 dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_monitor_config.py rename dbm-ui/backend/flow/{views/mysql_push_peripheral_config.py => engine/bamboo/scene/mysql/deploy_peripheraltools/__init__.py} (54%) create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/cc_trans_module.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/collect_sysinfo.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/departs.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/group_ips.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/instance_standardize.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/prepare_departs_binary.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/push_config.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/subflow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/trans_files.py delete mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_standardize_flow.py delete mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_push_peripheral_config.py delete mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_standardize_flow.py delete mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/tendbsingle/standardize.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/generate_mysql_cluster_standardize_flow.py delete mode 100644 dbm-ui/backend/flow/views/spider_cluster_standardize.py delete mode 100644 dbm-ui/backend/flow/views/tendb_ha_standardize.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_cluster_standardize.py delete mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_standardize.py delete mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_push_peripheral_config.py delete mode 100644 dbm-ui/backend/ticket/builders/spider/mysql_spider_standardize.py delete mode 100644 dbm-ui/backend/ticket/builders/tendbsingle/standardize.py diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go index 05a49353e9..ed3fcad793 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go @@ -1,108 +1,109 @@ package mysqlcmd -import ( - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond" - "fmt" - - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - - "github.com/spf13/cobra" -) - -// DeployMysqlCrondAct 部署 -type DeployMysqlCrondAct struct { - *subcmd.BaseOptions - Service crond.MySQLCrondComp -} - -// DeployMySQLCrond 命令常量 -const DeployMySQLCrond = "deploy-mysql-crond" - -// NewDeployMySQLCrondCommand 实现 -func NewDeployMySQLCrondCommand() *cobra.Command { - act := DeployMysqlCrondAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: DeployMySQLCrond, - Short: "部署 mysql-crond", - Example: fmt.Sprintf( - `dbactuator mysql %s %s %s`, - DeployMySQLCrond, - subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -// Validate 校验参数 -func (c *DeployMysqlCrondAct) Validate() (err error) { - return c.BaseOptions.Validate() -} - -// Init 初始化 -func (c *DeployMysqlCrondAct) Init() (err error) { - if err = c.Deserialize(&c.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - c.Service.GeneralParam = subcmd.GeneralRuntimeParam - logger.Info("extend params: %s", c.Service.Params) - return nil -} - -// Run 执行 -func (c *DeployMysqlCrondAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "初始化", - Func: c.Service.Init, - }, - { - FunName: "部署二进制", - Func: c.Service.DeployBinary, - }, - { - FunName: "生成配置文件", - Func: c.Service.GenerateRuntimeConfig, - }, - { - FunName: "生成空任务配置", - Func: c.Service.TouchJobsConfig, - }, - { - FunName: "移除保活监控", - Func: c.Service.RemoveKeepAlive, - }, - { - FunName: "停止进程", - Func: c.Service.Stop, - }, - { - FunName: "启动进程", - Func: c.Service.Start, - }, - { - FunName: "启动后检查", - Func: c.Service.CheckStart, - }, - { - FunName: "添加保活监控", - Func: c.Service.AddKeepAlive, - }, - } - if err := steps.Run(); err != nil { - logger.Error("部署 mysql-crond 失败: %s", err.Error()) - return err - } - logger.Info("部署 mysql-crond 完成") - return nil -} +// +//import ( +// "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond" +// "fmt" +// +// "dbm-services/common/go-pubpkg/logger" +// "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" +// "dbm-services/mysql/db-tools/dbactuator/pkg/util" +// +// "github.com/spf13/cobra" +//) +// +//// DeployMysqlCrondAct 部署 +//type DeployMysqlCrondAct struct { +// *subcmd.BaseOptions +// Service crond.MySQLCrondComp +//} +// +//// DeployMySQLCrond 命令常量 +//const DeployMySQLCrond = "deploy-mysql-crond" +// +//// NewDeployMySQLCrondCommand 实现 +//func NewDeployMySQLCrondCommand() *cobra.Command { +// act := DeployMysqlCrondAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: DeployMySQLCrond, +// Short: "部署 mysql-crond", +// Example: fmt.Sprintf( +// `dbactuator mysql %s %s %s`, +// DeployMySQLCrond, +// subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//// Validate 校验参数 +//func (c *DeployMysqlCrondAct) Validate() (err error) { +// return c.BaseOptions.Validate() +//} +// +//// Init 初始化 +//func (c *DeployMysqlCrondAct) Init() (err error) { +// if err = c.Deserialize(&c.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// c.Service.GeneralParam = subcmd.GeneralRuntimeParam +// logger.Info("extend params: %s", c.Service.Params) +// return nil +//} +// +//// Run 执行 +//func (c *DeployMysqlCrondAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "初始化", +// Func: c.Service.Init, +// }, +// { +// FunName: "部署二进制", +// Func: c.Service.DeployBinary, +// }, +// { +// FunName: "生成配置文件", +// Func: c.Service.GenerateRuntimeConfig, +// }, +// { +// FunName: "生成空任务配置", +// Func: c.Service.TouchJobsConfig, +// }, +// { +// FunName: "移除保活监控", +// Func: c.Service.RemoveKeepAlive, +// }, +// { +// FunName: "停止进程", +// Func: c.Service.Stop, +// }, +// { +// FunName: "启动进程", +// Func: c.Service.Start, +// }, +// { +// FunName: "启动后检查", +// Func: c.Service.CheckStart, +// }, +// { +// FunName: "添加保活监控", +// Func: c.Service.AddKeepAlive, +// }, +// } +// if err := steps.Run(); err != nil { +// logger.Error("部署 mysql-crond 失败: %s", err.Error()) +// return err +// } +// logger.Info("部署 mysql-crond 完成") +// return nil +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_backup_client.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_backup_client.go index dd8c482922..17436b339e 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_backup_client.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_backup_client.go @@ -1,98 +1,89 @@ package mysqlcmd -import ( - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/backup_client" - "fmt" +// 这个子命令在 python 里面没人调用 - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - - "github.com/spf13/cobra" -) - -// InstallBackupClientAct TODO -type InstallBackupClientAct struct { - *subcmd.BaseOptions - Service backup_client.BackupClientComp -} - -// CommandInstallBackupClient TODO -const CommandInstallBackupClient = "install-backup-client" - -// InstallBackupClientCommand TODO -func InstallBackupClientCommand() *cobra.Command { - act := InstallBackupClientAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: CommandInstallBackupClient, - Short: "部署 backup_client", - Example: fmt.Sprintf( - `dbactuator mysql %s %s %s`, CommandInstallBackupClient, - subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - if act.RollBack { - util.CheckErr(act.Rollback()) - return - } - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -// Init TODO -func (d *InstallBackupClientAct) Init() (err error) { - if err = d.Deserialize(&d.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - d.Service.GeneralParam = subcmd.GeneralRuntimeParam - return nil -} - -// Run TODO -func (d *InstallBackupClientAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "init", - Func: d.Service.Init, - }, - { - FunName: "预检查", - Func: d.Service.PreCheck, - }, - { - FunName: "部署二进制", - Func: d.Service.DeployBinary, - }, - { - FunName: "渲染 config.toml", - Func: d.Service.GenerateBinaryConfig, - }, - { - FunName: "生成 cosinfo.toml", - Func: d.Service.GenerateBucketConfig, - }, - { - FunName: "添加 upload crontab", - Func: d.Service.AddCrond, - }, - } - - if err := steps.Run(); err != nil { - return err - } - logger.Info("install backup_client successfully~") - return nil -} - -// Rollback TODO -func (d *InstallBackupClientAct) Rollback() (err error) { - return -} +//// InstallBackupClientAct TODO +//type InstallBackupClientAct struct { +// *subcmd.BaseOptions +// Service backup_client.BackupClientComp +//} +// +//// CommandInstallBackupClient TODO +//const CommandInstallBackupClient = "install-backup-client" +// +//// InstallBackupClientCommand TODO +//func InstallBackupClientCommand() *cobra.Command { +// act := InstallBackupClientAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: CommandInstallBackupClient, +// Short: "部署 backup_client", +// Example: fmt.Sprintf( +// `dbactuator mysql %s %s %s`, CommandInstallBackupClient, +// subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// if act.RollBack { +// util.CheckErr(act.Rollback()) +// return +// } +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//// Init TODO +//func (d *InstallBackupClientAct) Init() (err error) { +// if err = d.Deserialize(&d.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// d.Service.GeneralParam = subcmd.GeneralRuntimeParam +// return nil +//} +// +//// Run TODO +//func (d *InstallBackupClientAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "init", +// Func: d.Service.Init, +// }, +// { +// FunName: "预检查", +// Func: d.Service.PreCheck, +// }, +// { +// FunName: "部署二进制", +// Func: d.Service.DeployBinary, +// }, +// { +// FunName: "渲染 config.toml", +// Func: d.Service.GenerateBinaryConfig, +// }, +// { +// FunName: "生成 cosinfo.toml", +// Func: d.Service.GenerateBucketConfig, +// }, +// { +// FunName: "添加 upload crontab", +// Func: d.Service.AddCrond, +// }, +// } +// +// if err := steps.Run(); err != nil { +// return err +// } +// logger.Info("install backup_client successfully~") +// return nil +//} +// +//// Rollback TODO +//func (d *InstallBackupClientAct) Rollback() (err error) { +// return +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go index 813b57f5bf..7cb17984e8 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go @@ -1,94 +1,94 @@ package mysqlcmd -import ( - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum" - "fmt" - - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - - "github.com/spf13/cobra" -) - -// InstallMySQLChecksumAct 安装数据校验 -type InstallMySQLChecksumAct struct { - *subcmd.BaseOptions - Service checksum.MySQLChecksumComp -} - -// InstallMySQLChecksum 安装数据校验子命令名称 -const InstallMySQLChecksum = "install-checksum" - -// NewInstallMySQLChecksumCommand godoc +//import ( +// "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum" +// "fmt" // -// @Summary 安装mysql校验 -// @Description 安装mysql校验 -// @Tags mysql -// @Accept json -// @Param body body mysql.InstallMySQLChecksumComp true "short description" -// @Router /mysql/install-checksum [post] -func NewInstallMySQLChecksumCommand() *cobra.Command { - act := InstallMySQLChecksumAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: InstallMySQLChecksum, - Short: "安装mysql校验", - Example: fmt.Sprintf( - `dbactuator mysql %s %s %s`, - InstallMySQLChecksum, - subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -// Validate 基本校验 -func (c *InstallMySQLChecksumAct) Validate() (err error) { - return c.BaseOptions.Validate() -} - -// Init 初始化子命令 -func (c *InstallMySQLChecksumAct) Init() (err error) { - if err = c.Deserialize(&c.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - c.Service.GeneralParam = subcmd.GeneralRuntimeParam - logger.Info("extend params: %s", c.Service.Params) - return nil -} - -// Run 执行子命令 -func (c *InstallMySQLChecksumAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "初始化", - Func: c.Service.Init, - }, - { - FunName: "部署二进制程序", - Func: c.Service.DeployBinary, - }, - { - FunName: "生成二进制程序配置", - Func: c.Service.GenerateRuntimeConfig, - }, - { - FunName: "注册 crond 任务", - Func: c.Service.AddToCrond, - }, - } - if err := steps.Run(); err != nil { - return err - } - logger.Info("部署mysql校验完成") - return nil -} +// "dbm-services/common/go-pubpkg/logger" +// "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" +// "dbm-services/mysql/db-tools/dbactuator/pkg/util" +// +// "github.com/spf13/cobra" +//) +// +//// InstallMySQLChecksumAct 安装数据校验 +//type InstallMySQLChecksumAct struct { +// *subcmd.BaseOptions +// Service checksum.MySQLChecksumComp +//} +// +//// InstallMySQLChecksum 安装数据校验子命令名称 +//const InstallMySQLChecksum = "install-checksum" +// +//// NewInstallMySQLChecksumCommand godoc +//// +//// @Summary 安装mysql校验 +//// @Description 安装mysql校验 +//// @Tags mysql +//// @Accept json +//// @Param body body mysql.InstallMySQLChecksumComp true "short description" +//// @Router /mysql/install-checksum [post] +//func NewInstallMySQLChecksumCommand() *cobra.Command { +// act := InstallMySQLChecksumAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: InstallMySQLChecksum, +// Short: "安装mysql校验", +// Example: fmt.Sprintf( +// `dbactuator mysql %s %s %s`, +// InstallMySQLChecksum, +// subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//// Validate 基本校验 +//func (c *InstallMySQLChecksumAct) Validate() (err error) { +// return c.BaseOptions.Validate() +//} +// +//// Init 初始化子命令 +//func (c *InstallMySQLChecksumAct) Init() (err error) { +// if err = c.Deserialize(&c.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// c.Service.GeneralParam = subcmd.GeneralRuntimeParam +// logger.Info("extend params: %s", c.Service.Params) +// return nil +//} +// +//// Run 执行子命令 +//func (c *InstallMySQLChecksumAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "初始化", +// Func: c.Service.Init, +// }, +// { +// FunName: "部署二进制程序", +// Func: c.Service.DeployBinary, +// }, +// { +// FunName: "生成二进制程序配置", +// Func: c.Service.GenerateRuntimeConfig, +// }, +// { +// FunName: "注册 crond 任务", +// Func: c.Service.AddToCrond, +// }, +// } +// if err := steps.Run(); err != nil { +// return err +// } +// logger.Info("部署mysql校验完成") +// return nil +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go index 991a70391f..fd40bff5d7 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go @@ -1,92 +1,92 @@ package mysqlcmd -import ( - "fmt" - - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - - "github.com/spf13/cobra" -) - -// InstallDBAToolkitAct TODO -type InstallDBAToolkitAct struct { - *subcmd.BaseOptions - Service mysql.InstallDBAToolkitComp -} - -// CommandInstallDBAToolkit TODO -const CommandInstallDBAToolkit = "install-dbatoolkit" - -// NewInstallDBAToolkitCommand godoc +//import ( +// "fmt" // -// @Summary 部署DBA工具箱 -// @Description 部署 /home/mysql/dba_toolkit,覆盖 -// @Tags mysql -// @Accept json -// @Param body body mysql.InstallDBAToolkitComp true "short description" -// @Router /mysql/install-dbatoolkit [post] -func NewInstallDBAToolkitCommand() *cobra.Command { - act := InstallDBAToolkitAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: CommandInstallDBAToolkit, - Short: "部署 rotate_binlog", - Example: fmt.Sprintf( - `dbactuator mysql %s %s %s`, CommandInstallDBAToolkit, - subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - if act.RollBack { - util.CheckErr(act.Rollback()) - return - } - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -// Init TODO -func (d *InstallDBAToolkitAct) Init() (err error) { - if err = d.Deserialize(&d.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - return nil -} - -// Run TODO -func (d *InstallDBAToolkitAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "init", - Func: d.Service.Init, - }, - { - FunName: "预检查", - Func: d.Service.PreCheck, - }, - { - FunName: "部署二进制", - Func: d.Service.DeployBinary, - }, - } - - if err := steps.Run(); err != nil { - return err - } - logger.Info("install dba-toolkit successfully~") - return nil -} - -// Rollback TODO -func (d *InstallDBAToolkitAct) Rollback() (err error) { - return -} +// "dbm-services/common/go-pubpkg/logger" +// "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" +// "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" +// "dbm-services/mysql/db-tools/dbactuator/pkg/util" +// +// "github.com/spf13/cobra" +//) +// +//// InstallDBAToolkitAct TODO +//type InstallDBAToolkitAct struct { +// *subcmd.BaseOptions +// Service mysql.InstallDBAToolkitComp +//} +// +//// CommandInstallDBAToolkit TODO +//const CommandInstallDBAToolkit = "install-dbatoolkit" +// +//// NewInstallDBAToolkitCommand godoc +//// +//// @Summary 部署DBA工具箱 +//// @Description 部署 /home/mysql/dba_toolkit,覆盖 +//// @Tags mysql +//// @Accept json +//// @Param body body mysql.InstallDBAToolkitComp true "short description" +//// @Router /mysql/install-dbatoolkit [post] +//func NewInstallDBAToolkitCommand() *cobra.Command { +// act := InstallDBAToolkitAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: CommandInstallDBAToolkit, +// Short: "部署 rotate_binlog", +// Example: fmt.Sprintf( +// `dbactuator mysql %s %s %s`, CommandInstallDBAToolkit, +// subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// if act.RollBack { +// util.CheckErr(act.Rollback()) +// return +// } +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//// Init TODO +//func (d *InstallDBAToolkitAct) Init() (err error) { +// if err = d.Deserialize(&d.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// return nil +//} +// +//// Run TODO +//func (d *InstallDBAToolkitAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "init", +// Func: d.Service.Init, +// }, +// { +// FunName: "预检查", +// Func: d.Service.PreCheck, +// }, +// { +// FunName: "部署二进制", +// Func: d.Service.DeployBinary, +// }, +// } +// +// if err := steps.Run(); err != nil { +// return err +// } +// logger.Info("install dba-toolkit successfully~") +// return nil +//} +// +//// Rollback TODO +//func (d *InstallDBAToolkitAct) Rollback() (err error) { +// return +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go index 51a11e4345..1ed1d7ed4a 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go @@ -1,96 +1,96 @@ package mysqlcmd -import ( - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor" - "fmt" - - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - - "github.com/spf13/cobra" -) - -// InstallMonitorAct 安装 mysql monitor -type InstallMonitorAct struct { - *subcmd.BaseOptions - Service monitor.MySQLMonitorComp -} - -// InstallMySQLMonitor 安装 mysql monitor -const InstallMySQLMonitor = "install-monitor" - -// NewInstallMySQLMonitorCommand 安装 mysql monitor 子命令 -func NewInstallMySQLMonitorCommand() *cobra.Command { - act := InstallMonitorAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: InstallMySQLMonitor, - Short: "安装mysql监控", - Example: fmt.Sprintf( - `dbactuator mysql %s %s %s`, - InstallMySQLMonitor, - subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -// Validate 参数验证 -func (c *InstallMonitorAct) Validate() (err error) { - return c.BaseOptions.Validate() -} - -// Init 初始化 -func (c *InstallMonitorAct) Init() (err error) { - if err = c.Deserialize(&c.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - c.Service.GeneralParam = subcmd.GeneralRuntimeParam - logger.Info("extend params: %s", c.Service.Params) - return nil -} - -// Run 执行入口 -func (c *InstallMonitorAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "初始化", - Func: c.Service.Init, - }, - - { - FunName: "部署二进制程序", - Func: c.Service.DeployBinary, - }, - { - FunName: "生成二进制程序配置", - Func: c.Service.GenerateRuntimeConfig, - }, - { - FunName: "生成监控项配置", - Func: c.Service.GenerateItemsConfig, - }, - { - FunName: "生成exporter配置文件", - Func: c.Service.GenerateExporterConfig, - }, - { - FunName: "注册crond任务", - Func: c.Service.AddToCrond, - }, - } - if err := steps.Run(); err != nil { - return err - } - logger.Info("部署mysql监控完成") - return nil -} +//import ( +// "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor" +// "fmt" +// +// "dbm-services/common/go-pubpkg/logger" +// "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" +// "dbm-services/mysql/db-tools/dbactuator/pkg/util" +// +// "github.com/spf13/cobra" +//) +// +//// InstallMonitorAct 安装 mysql monitor +//type InstallMonitorAct struct { +// *subcmd.BaseOptions +// Service monitor.MySQLMonitorComp +//} +// +//// InstallMySQLMonitor 安装 mysql monitor +//const InstallMySQLMonitor = "install-monitor" +// +//// NewInstallMySQLMonitorCommand 安装 mysql monitor 子命令 +//func NewInstallMySQLMonitorCommand() *cobra.Command { +// act := InstallMonitorAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: InstallMySQLMonitor, +// Short: "安装mysql监控", +// Example: fmt.Sprintf( +// `dbactuator mysql %s %s %s`, +// InstallMySQLMonitor, +// subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//// Validate 参数验证 +//func (c *InstallMonitorAct) Validate() (err error) { +// return c.BaseOptions.Validate() +//} +// +//// Init 初始化 +//func (c *InstallMonitorAct) Init() (err error) { +// if err = c.Deserialize(&c.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// c.Service.GeneralParam = subcmd.GeneralRuntimeParam +// logger.Info("extend params: %s", c.Service.Params) +// return nil +//} +// +//// Run 执行入口 +//func (c *InstallMonitorAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "初始化", +// Func: c.Service.Init, +// }, +// +// //{ +// // FunName: "部署二进制程序", +// // Func: c.Service.DeployBinary, +// //}, +// { +// FunName: "生成二进制程序配置", +// Func: c.Service.GenerateRuntimeConfig, +// }, +// { +// FunName: "生成监控项配置", +// Func: c.Service.GenerateItemsConfig, +// }, +// //{ +// // FunName: "生成exporter配置文件", +// // Func: c.Service.GenerateExporterConfig, +// //}, +// { +// FunName: "注册crond任务", +// Func: c.Service.AddToCrond, +// }, +// } +// if err := steps.Run(); err != nil { +// return err +// } +// logger.Info("部署mysql监控完成") +// return nil +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go index 9f7ac20845..019e06efea 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go @@ -1,109 +1,109 @@ package mysqlcmd -import ( - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup" - "fmt" - - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - - "github.com/spf13/cobra" -) - -// InstallNewDbBackupAct TODO -type InstallNewDbBackupAct struct { - *subcmd.BaseOptions - Service dbbackup.NewDbBackupComp -} - -// NewInstallNewDbBackupCommand godoc +//import ( +// "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup" +// "fmt" // -// @Summary 部署备份程序 -// @Description 部署GO版本备份程序 -// @Tags mysql -// @Accept json -// @Param body body mysql.InstallNewDbBackupComp true "short description" -// @Router /mysql/deploy-dbbackup [post] -func NewInstallNewDbBackupCommand() *cobra.Command { - act := InstallNewDbBackupAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: "deploy-dbbackup", - Short: "部署GO版本备份程序", - Example: fmt.Sprintf( - `dbactuator mysql deploy-dbbackup %s %s`, subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - if act.RollBack { - util.CheckErr(act.Rollback()) - return - } - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -// Init TODO -func (d *InstallNewDbBackupAct) Init() (err error) { - if err = d.Deserialize(&d.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - d.Service.GeneralParam = subcmd.GeneralRuntimeParam - return nil -} - -// Run TODO -func (d *InstallNewDbBackupAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "init", - Func: d.Service.Init, - }, - { - FunName: "初始化待渲染配置", - Func: d.Service.InitRenderData, - }, - { - FunName: "初始化备份数据目录", - Func: d.Service.InitBackupDir, - }, - { - FunName: "备份原备份程序", - Func: d.Service.StageLegacyBackup, - }, - { - FunName: "解压备份程序压缩包", - Func: d.Service.DeployBinary, - }, - { - FunName: "生成配置", - Func: d.Service.GenerateRuntimeConfig, - }, - { - FunName: "更改安装路径所属用户组", - Func: d.Service.ChownGroup, - }, - { - FunName: "添加系统crontab", - Func: d.Service.AddCrontab, - }, - } - - if err := steps.Run(); err != nil { - return err - } - logger.Info("install new dbbackup successfully~") - return nil -} - -// Rollback TODO -func (d *InstallNewDbBackupAct) Rollback() (err error) { - return -} +// "dbm-services/common/go-pubpkg/logger" +// "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" +// "dbm-services/mysql/db-tools/dbactuator/pkg/util" +// +// "github.com/spf13/cobra" +//) +// +//// InstallNewDbBackupAct TODO +//type InstallNewDbBackupAct struct { +// *subcmd.BaseOptions +// Service dbbackup.NewDbBackupComp +//} +// +//// NewInstallNewDbBackupCommand godoc +//// +//// @Summary 部署备份程序 +//// @Description 部署GO版本备份程序 +//// @Tags mysql +//// @Accept json +//// @Param body body mysql.InstallNewDbBackupComp true "short description" +//// @Router /mysql/deploy-dbbackup [post] +//func NewInstallNewDbBackupCommand() *cobra.Command { +// act := InstallNewDbBackupAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: "deploy-dbbackup", +// Short: "部署GO版本备份程序", +// Example: fmt.Sprintf( +// `dbactuator mysql deploy-dbbackup %s %s`, subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// if act.RollBack { +// util.CheckErr(act.Rollback()) +// return +// } +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//// Init TODO +//func (d *InstallNewDbBackupAct) Init() (err error) { +// if err = d.Deserialize(&d.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// d.Service.GeneralParam = subcmd.GeneralRuntimeParam +// return nil +//} +// +//// Run TODO +//func (d *InstallNewDbBackupAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "init", +// Func: d.Service.Init, +// }, +// { +// FunName: "初始化待渲染配置", +// Func: d.Service.InitRenderData, +// }, +// { +// FunName: "初始化备份数据目录", +// Func: d.Service.InitBackupDir, +// }, +// { +// FunName: "备份原备份程序", +// Func: d.Service.StageLegacyBackup, +// }, +// { +// FunName: "解压备份程序压缩包", +// Func: d.Service.DeployBinary, +// }, +// { +// FunName: "生成配置", +// Func: d.Service.GenerateRuntimeConfig, +// }, +// { +// FunName: "更改安装路径所属用户组", +// Func: d.Service.ChownGroup, +// }, +// { +// FunName: "添加系统crontab", +// Func: d.Service.AddCrontab, +// }, +// } +// +// if err := steps.Run(); err != nil { +// return err +// } +// logger.Info("install new dbbackup successfully~") +// return nil +//} +// +//// Rollback TODO +//func (d *InstallNewDbBackupAct) Rollback() (err error) { +// return +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_mysql_rotatebinlog.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_mysql_rotatebinlog.go index 6815510dae..7571f0b43d 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_mysql_rotatebinlog.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_mysql_rotatebinlog.go @@ -1,98 +1,98 @@ package mysqlcmd -import ( - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog" - "fmt" - - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - - "github.com/spf13/cobra" -) - -// InstallMysqlRotateBinlogAct TODO -type InstallMysqlRotateBinlogAct struct { - *subcmd.BaseOptions - Service rotatebinlog.MySQLRotateBinlogComp -} - -// CommandDeployMysqlRotatebinlog TODO -const CommandDeployMysqlRotatebinlog = "deploy-mysql-rotatebinlog" - -// NewInstallRotateBinlogCommand TODO -func NewInstallRotateBinlogCommand() *cobra.Command { - act := InstallMysqlRotateBinlogAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: CommandDeployMysqlRotatebinlog, - Short: "部署 mysql rotate binlog", - Example: fmt.Sprintf( - `dbactuator mysql %s %s %s`, CommandDeployMysqlRotatebinlog, - subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - if act.RollBack { - util.CheckErr(act.Rollback()) - return - } - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -// Init TODO -func (d *InstallMysqlRotateBinlogAct) Init() (err error) { - if err = d.Deserialize(&d.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - d.Service.GeneralParam = subcmd.GeneralRuntimeParam - return nil -} - -// Run TODO -func (d *InstallMysqlRotateBinlogAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "init", - Func: d.Service.Init, - }, - { - FunName: "预检查", - Func: d.Service.PreCheck, - }, - { - FunName: "部署二进制", - Func: d.Service.DeployBinary, - }, - { - FunName: "渲染 config.yaml", - Func: d.Service.GenerateRuntimeConfig, - }, - { - FunName: "添加系统crontab", - Func: d.Service.AddCrond, - }, - { - FunName: "迁移旧rotate_logbin", - Func: d.Service.RunMigrateOld, - }, - } - - if err := steps.Run(); err != nil { - return err - } - logger.Info("install new rotate_binlog successfully~") - return nil -} - -// Rollback TODO -func (d *InstallMysqlRotateBinlogAct) Rollback() (err error) { - return -} +//import ( +// "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog" +// "fmt" +// +// "dbm-services/common/go-pubpkg/logger" +// "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" +// "dbm-services/mysql/db-tools/dbactuator/pkg/util" +// +// "github.com/spf13/cobra" +//) +// +//// InstallMysqlRotateBinlogAct TODO +//type InstallMysqlRotateBinlogAct struct { +// *subcmd.BaseOptions +// Service rotatebinlog.MySQLRotateBinlogComp +//} +// +//// CommandDeployMysqlRotatebinlog TODO +//const CommandDeployMysqlRotatebinlog = "deploy-mysql-rotatebinlog" +// +//// NewInstallRotateBinlogCommand TODO +//func NewInstallRotateBinlogCommand() *cobra.Command { +// act := InstallMysqlRotateBinlogAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: CommandDeployMysqlRotatebinlog, +// Short: "部署 mysql rotate binlog", +// Example: fmt.Sprintf( +// `dbactuator mysql %s %s %s`, CommandDeployMysqlRotatebinlog, +// subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// if act.RollBack { +// util.CheckErr(act.Rollback()) +// return +// } +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//// Init TODO +//func (d *InstallMysqlRotateBinlogAct) Init() (err error) { +// if err = d.Deserialize(&d.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// d.Service.GeneralParam = subcmd.GeneralRuntimeParam +// return nil +//} +// +//// Run TODO +//func (d *InstallMysqlRotateBinlogAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "init", +// Func: d.Service.Init, +// }, +// { +// FunName: "预检查", +// Func: d.Service.PreCheck, +// }, +// { +// FunName: "部署二进制", +// Func: d.Service.DeployBinary, +// }, +// { +// FunName: "渲染 config.yaml", +// Func: d.Service.GenerateRuntimeConfig, +// }, +// { +// FunName: "添加系统crontab", +// Func: d.Service.AddCrond, +// }, +// { +// FunName: "迁移旧rotate_logbin", +// Func: d.Service.RunMigrateOld, +// }, +// } +// +// if err := steps.Run(); err != nil { +// return err +// } +// logger.Info("install new rotate_binlog successfully~") +// return nil +//} +// +//// Rollback TODO +//func (d *InstallMysqlRotateBinlogAct) Rollback() (err error) { +// return +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go index 54cba52708..1b7b32c4e4 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go @@ -46,18 +46,18 @@ func NewMysqlCommand() *cobra.Command { ParseBinlogTimeCommand(), FlashbackBinlogCommand(), NewPtTableChecksumCommand(), - NewInstallMySQLChecksumCommand(), - NewInstallNewDbBackupCommand(), + //NewInstallMySQLChecksumCommand(), + //NewInstallNewDbBackupCommand(), // NewFullBackupCommand(), - NewInstallRotateBinlogCommand(), - NewInstallDBAToolkitCommand(), - NewDeployMySQLCrondCommand(), + //NewInstallRotateBinlogCommand(), + //NewInstallDBAToolkitCommand(), + //NewDeployMySQLCrondCommand(), ClearInstanceConfigCommand(), - NewInstallMySQLMonitorCommand(), + //NewInstallMySQLMonitorCommand(), NewExecPartitionSQLCommand(), NewBackupDemandCommand(), NewDropTableCommand(), - InstallBackupClientCommand(), + //InstallBackupClientCommand(), NewEnableTokudbPluginCommand(), NewOpenAreaDumpSchemaCommand(), NewOpenAreaImportSchemaCommand(), @@ -84,6 +84,9 @@ func NewMysqlCommand() *cobra.Command { NewPushMySQLCrondConfigCommand(), ChangeServerIdCommand(), GoFlashbackBinlogCommand(), + // --- + NewPushExporterCnfCommand(), + NewPreparePeripheralToolsBinaryCommand(), NewFastExecuteSqlActCommand(), }, }, diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/prepare_peripheraltools_binary.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/prepare_peripheraltools_binary.go new file mode 100644 index 0000000000..0a07f8e691 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/prepare_peripheraltools_binary.go @@ -0,0 +1,69 @@ +package mysqlcmd + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" + + "github.com/spf13/cobra" +) + +type PreparePeripheralToolsBinaryAct struct { + *subcmd.BaseOptions + Service peripheraltools.PrepareBinary +} + +const PreparePeripheralToolsBinary = `prepare-peripheraltools-binary` + +func NewPreparePeripheralToolsBinaryCommand() *cobra.Command { + act := PreparePeripheralToolsBinaryAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: PreparePeripheralToolsBinary, + Short: "部署周边工具二进制", + Example: fmt.Sprintf( + `dbactuator mysql %s %s %s`, + PreparePeripheralToolsBinary, + subcmd.CmdBaseExampleStr, + subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + + return cmd +} + +func (c *PreparePeripheralToolsBinaryAct) Validate() (err error) { + return c.BaseOptions.Validate() +} + +func (c *PreparePeripheralToolsBinaryAct) Init() (err error) { + if err = c.Deserialize(&c.Service.Params); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + c.Service.GeneralParam = subcmd.GeneralRuntimeParam + logger.Info("extend params: %s", c.Service.Params) + return nil +} + +func (c *PreparePeripheralToolsBinaryAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "准备周边工具二进制", + Func: c.Service.Run, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("准备周边工具二进制完成") + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_backup_client_config.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_backup_client_config.go index 33771e09cc..a4e5f386e9 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_backup_client_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_backup_client_config.go @@ -1,75 +1,67 @@ package mysqlcmd -import ( - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/backup_client" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - "fmt" +// 没人用 - "github.com/spf13/cobra" -) - -type PushBackupClientConfigAct struct { - *subcmd.BaseOptions - Service backup_client.BackupClientComp -} - -const CommandPushBackupClientConfig = `push-backup-client-config` - -func NewPushBackupClientConfigCommand() *cobra.Command { - act := PushBackupClientConfigAct{ - BaseOptions: subcmd.GBaseOptions, - } - cmd := &cobra.Command{ - Use: CommandPushBackupClientConfig, - Short: "推送 backup_client 配置", - Example: fmt.Sprintf( - `dbactuator mysql %s %s %s`, CommandPushBackupClientConfig, - subcmd.CmdBaseExampleStr, - subcmd.ToPrettyJson(act.Service.Example()), - ), - Run: func(cmd *cobra.Command, args []string) { - util.CheckErr(act.Validate()) - util.CheckErr(act.Init()) - util.CheckErr(act.Run()) - }, - } - return cmd -} - -func (d *PushBackupClientConfigAct) Init() (err error) { - if err = d.Deserialize(&d.Service.Params); err != nil { - logger.Error("DeserializeAndValidate err %s", err.Error()) - return err - } - d.Service.GeneralParam = subcmd.GeneralRuntimeParam - return nil -} - -func (d *PushBackupClientConfigAct) Run() (err error) { - steps := subcmd.Steps{ - { - FunName: "init", - Func: d.Service.Init, - }, - { - FunName: "预检查", - Func: d.Service.PreCheck, - }, - { - FunName: "渲染 config.toml", - Func: d.Service.GenerateBinaryConfig, - }, - { - FunName: "生成 cosinfo.toml", - Func: d.Service.GenerateBucketConfig, - }, - } - - if err := steps.Run(); err != nil { - return err - } - logger.Info("push backup_client config successfully") - return nil -} +//type PushBackupClientConfigAct struct { +// *subcmd.BaseOptions +// Service backup_client.BackupClientComp +//} +// +//const CommandPushBackupClientConfig = `push-backup-client-config` +// +//func NewPushBackupClientConfigCommand() *cobra.Command { +// act := PushBackupClientConfigAct{ +// BaseOptions: subcmd.GBaseOptions, +// } +// cmd := &cobra.Command{ +// Use: CommandPushBackupClientConfig, +// Short: "推送 backup_client 配置", +// Example: fmt.Sprintf( +// `dbactuator mysql %s %s %s`, CommandPushBackupClientConfig, +// subcmd.CmdBaseExampleStr, +// subcmd.ToPrettyJson(act.Service.Example()), +// ), +// Run: func(cmd *cobra.Command, args []string) { +// util.CheckErr(act.Validate()) +// util.CheckErr(act.Init()) +// util.CheckErr(act.Run()) +// }, +// } +// return cmd +//} +// +//func (d *PushBackupClientConfigAct) Init() (err error) { +// if err = d.Deserialize(&d.Service.Params); err != nil { +// logger.Error("DeserializeAndValidate err %s", err.Error()) +// return err +// } +// d.Service.GeneralParam = subcmd.GeneralRuntimeParam +// return nil +//} +// +//func (d *PushBackupClientConfigAct) Run() (err error) { +// steps := subcmd.Steps{ +// { +// FunName: "init", +// Func: d.Service.Init, +// }, +// { +// FunName: "预检查", +// Func: d.Service.PreCheck, +// }, +// { +// FunName: "渲染 config.toml", +// Func: d.Service.GenerateBinaryConfig, +// }, +// { +// FunName: "生成 cosinfo.toml", +// Func: d.Service.GenerateBucketConfig, +// }, +// } +// +// if err := steps.Run(); err != nil { +// return err +// } +// logger.Info("push backup_client config successfully") +// return nil +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_checksum_config.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_checksum_cnf.go similarity index 100% rename from dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_checksum_config.go rename to dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_checksum_cnf.go diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_dbbackup_config.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_dbbackup_cnf.go similarity index 100% rename from dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_dbbackup_config.go rename to dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_dbbackup_cnf.go diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_exporter_cnf.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_exporter_cnf.go new file mode 100644 index 0000000000..5644af06bb --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_exporter_cnf.go @@ -0,0 +1,68 @@ +package mysqlcmd + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/exporter" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" + + "github.com/spf13/cobra" +) + +type PushExporterCnfAct struct { + *subcmd.BaseOptions + Service exporter.PushCnfComp +} + +const PushExporterCnf = `push-exporter-cnf` + +func NewPushExporterCnfCommand() *cobra.Command { + act := PushExporterCnfAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: PushExporterCnf, + Short: "push exporter cnf", + Example: fmt.Sprintf( + `dbactuator mysql %s %s %s`, + PushExporterCnf, + subcmd.CmdBaseExampleStr, + subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +func (c *PushExporterCnfAct) Validate() (err error) { + return c.BaseOptions.Validate() +} + +func (c *PushExporterCnfAct) Init() (err error) { + if err = c.Deserialize(&c.Service.Params); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + c.Service.GeneralParam = subcmd.GeneralRuntimeParam + logger.Info("extend params: %s", c.Service.Params) + return nil +} + +func (c *PushExporterCnfAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "推送配置", + Func: c.Service.Run, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("推送 exporter 配置完成") + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_monitor_config.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_monitor_cnf.go similarity index 95% rename from dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_monitor_config.go rename to dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_monitor_cnf.go index b957dad3d5..bd5b0a0551 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_monitor_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_monitor_cnf.go @@ -68,10 +68,6 @@ func (c *PushMySQLMonitorConfigAct) Run() (err error) { FunName: "生成监控项配置", Func: c.Service.GenerateItemsConfig, }, - { - FunName: "生成exporter配置文件", - Func: c.Service.GenerateExporterConfig, - }, { FunName: "重载配置", Func: c.Service.AddToCrond, diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_crond_config.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_crond_cnf.go similarity index 100% rename from dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_crond_config.go rename to dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_crond_cnf.go diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_rotatebinlog_config.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_rotatebinlog_cnf.go similarity index 100% rename from dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_rotatebinlog_config.go rename to dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/push_mysql_rotatebinlog_cnf.go diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go index 48eef92790..18351ca6c7 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go @@ -28,6 +28,7 @@ func NewMysqlProxyCommand() *cobra.Command { NewCloneProxyUserCommand(), NewRestartProxyCommand(), NewMySQLProxyUpgradeAct(), + NewStandardizeProxyCommand(), }, }, } diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/standardize_proxy.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/standardize_proxy.go new file mode 100644 index 0000000000..1e930c57ea --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/standardize_proxy.go @@ -0,0 +1,72 @@ +package proxycmd + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" + + "github.com/spf13/cobra" +) + +type StandardizeProxyAct struct { + *subcmd.BaseOptions + Service mysql_proxy.StandardizeProxyComp +} + +const StandardizeProxy = `standardize-proxy` + +func NewStandardizeProxyCommand() *cobra.Command { + act := StandardizeProxyAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: StandardizeProxy, + Short: "standard proxy commands", + Example: fmt.Sprintf( + `dbactuator proxy %s %s %s`, + StandardizeProxy, + subcmd.CmdBaseExampleStr, + subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +func (c *StandardizeProxyAct) Validate() (err error) { + return c.BaseOptions.Validate() +} + +func (c *StandardizeProxyAct) Init() (err error) { + if err = c.Deserialize(&c.Service.Params); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + c.Service.GeneralParam = subcmd.GeneralRuntimeParam + logger.Info("extend params: %s", c.Service.Params) + return nil +} + +func (c *StandardizeProxyAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "清理旧 crontab", + Func: c.Service.ClearOldCrontab, + }, + { + FunName: "标准化白名单", + Func: c.Service.AddUser, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("标准化 proxy 完成") + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go index ac1b82e94d..063ba31a1e 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go @@ -43,7 +43,6 @@ func (u *ClearCrontabParam) CleanCrontab() (err error) { err = cmd.Run() if err != nil { logger.Error("remove mysql-crond keep alive crontab failed: %s", err.Error()) - return err } logger.Info("remove mysql-crond keep alive crontab success") diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go index b5577f9c28..095312d1b5 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go @@ -175,11 +175,11 @@ func (c *ClearInstanceConfigComp) clearChecksum() (err error) { err = unInstallTableChecksum.Run() if err != nil { - logger.Error( + logger.Warn( "run %s failed: %s, %s", unInstallTableChecksum, err.Error(), stderr.String(), ) - return err + //return err } logger.Info("run %s success: %s", unInstallTableChecksum, stdout.String()) } @@ -203,8 +203,8 @@ func (c *ClearInstanceConfigComp) clearRotateBinlog() (err error) { _, err = osutil.ExecShellCommand(false, cmd) if err != nil { - logger.Error("remove rotate binlog config failed: %s", err.Error()) - return err + logger.Warn("remove rotate binlog config failed: %s", err.Error()) + //return err } logger.Info("remove rotate binlog config success [%s]", clearPortString) return nil @@ -232,11 +232,11 @@ func (c *ClearInstanceConfigComp) clearMySQLMonitor() (err error) { err = unInstallMySQLMonitorCmd.Run() if err != nil { - logger.Error( + logger.Warn( "run %s failed: %s, %s", unInstallMySQLMonitorCmd, err.Error(), stderr.String(), ) - return err + //return err } logger.Info("run %s success: %s", unInstallMySQLMonitorCmd, stdout.String()) } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go index 3e9a370797..699e83c3e0 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go @@ -1,71 +1,71 @@ package mysql -import ( - "fmt" - - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/pkg/components" - "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" - "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" -) - -// InstallDBAToolkitComp 基本结构 -type InstallDBAToolkitComp struct { - Params InstallDBAToolkitParam `json:"extend"` -} - -// InstallDBAToolkitParam 输入参数 -type InstallDBAToolkitParam struct { - components.Medium - // 发起执行actor的用户,仅用于审计 - ExecUser string `json:"exec_user"` -} - -// Init 初始化 -func (c *InstallDBAToolkitComp) Init() (err error) { - return nil -} - -// PreCheck 预检查 -func (c *InstallDBAToolkitComp) PreCheck() (err error) { - if err = c.Params.Medium.Check(); err != nil { - logger.Error("check dbatoolkit pkg failed: %s", err.Error()) - return err - } - return nil -} - -// DeployBinary 部署 rotate_binlog -func (c *InstallDBAToolkitComp) DeployBinary() (err error) { - decompressCmd := fmt.Sprintf( - `tar zxf %s -C %s`, - c.Params.Medium.GetAbsolutePath(), cst.MYSQL_TOOL_INSTALL_PATH, - ) - _, err = osutil.ExecShellCommand(false, decompressCmd) - if err != nil { - logger.Error("decompress dbatoolkit pkg failed: %s", err.Error()) - return err - } - - chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.DBAToolkitPath) - _, err = osutil.ExecShellCommand(false, chownCmd) - if err != nil { - logger.Error("chown %s to mysql failed: %s", cst.DBAToolkitPath, err.Error()) - return err - } - - return nil -} - -// Example 样例 -func (c *InstallDBAToolkitComp) Example() interface{} { - return InstallDBAToolkitComp{ - Params: InstallDBAToolkitParam{ - Medium: components.Medium{ - Pkg: "dba-toolkit.tar.gz", - PkgMd5: "12345", - }, - ExecUser: "sys", - }, - } -} +//import ( +// "fmt" +// +// "dbm-services/common/go-pubpkg/logger" +// "dbm-services/mysql/db-tools/dbactuator/pkg/components" +// "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" +// "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" +//) +// +//// InstallDBAToolkitComp 基本结构 +//type InstallDBAToolkitComp struct { +// Params InstallDBAToolkitParam `json:"extend"` +//} +// +//// InstallDBAToolkitParam 输入参数 +//type InstallDBAToolkitParam struct { +// components.Medium +// // 发起执行actor的用户,仅用于审计 +// ExecUser string `json:"exec_user"` +//} +// +//// Init 初始化 +//func (c *InstallDBAToolkitComp) Init() (err error) { +// return nil +//} +// +//// PreCheck 预检查 +//func (c *InstallDBAToolkitComp) PreCheck() (err error) { +// if err = c.Params.Medium.Check(); err != nil { +// logger.Error("check dbatoolkit pkg failed: %s", err.Error()) +// return err +// } +// return nil +//} +// +//// DeployBinary 部署 rotate_binlog +//func (c *InstallDBAToolkitComp) DeployBinary() (err error) { +// decompressCmd := fmt.Sprintf( +// `tar zxf %s -C %s`, +// c.Params.Medium.GetAbsolutePath(), cst.MYSQL_TOOL_INSTALL_PATH, +// ) +// _, err = osutil.ExecShellCommand(false, decompressCmd) +// if err != nil { +// logger.Error("decompress dbatoolkit pkg failed: %s", err.Error()) +// return err +// } +// +// chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.DBAToolkitPath) +// _, err = osutil.ExecShellCommand(false, chownCmd) +// if err != nil { +// logger.Error("chown %s to mysql failed: %s", cst.DBAToolkitPath, err.Error()) +// return err +// } +// +// return nil +//} +// +//// Example 样例 +//func (c *InstallDBAToolkitComp) Example() interface{} { +// return InstallDBAToolkitComp{ +// Params: InstallDBAToolkitParam{ +// Medium: components.Medium{ +// Pkg: "dba-toolkit.tar.gz", +// PkgMd5: "12345", +// }, +// ExecUser: "sys", +// }, +// } +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go index 01871bfad1..335b367d26 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go @@ -518,6 +518,23 @@ func (i *InstallMySQLComp) generateMycnfOnePort(port Port, tmplFileName string) return err } + // 先暂时不返回错误 + if port == 3306 { + logger.Info("try to make link") + if _, err := osutil.ExecShellCommand( + false, + fmt.Sprintf("rm -f /etc/my.cnf"), + ); err != nil { + logger.Error("rm -f /etc/my.cnf failed: %s", err.Error()) + } + logger.Info("try to rm /etc/my.cnf success") + + if _, err := osutil.ExecShellCommand( + false, fmt.Sprintf("ln -s %s /etc/my.cnf", cnf)); err != nil { + logger.Error("ln -s %s /etc/my.cnf failed: ", cnf, err.Error()) + } + logger.Info("make link success") + } return nil } @@ -962,6 +979,7 @@ func (i *InstallMySQLComp) InitDefaultPrivAndSchemaWithResetMaster() (err error) logger.Info("tdbctl port %d need tc_admin=0, binlog_format=off", port) initAccountSqls = append(initAccountSqls, "set session tc_admin=0;", "set session sql_log_bin=off;") initAccountSqls = append(initAccountSqls, i.generateDefaultMysqlAccount(version)...) + logger.Info("tdbctl init account sqls: ", initAccountSqls) default: // 默认按照mysql的初始化权限的方式 initAccountSqls = i.generateDefaultMysqlAccount(version) @@ -972,6 +990,7 @@ func (i *InstallMySQLComp) InitDefaultPrivAndSchemaWithResetMaster() (err error) initAccountSqls = append(initAccountSqls, "reset master;") } + logger.Info("init account sqls: ", initAccountSqls) if _, err := dbWork.ExecMore(initAccountSqls); err != nil { logger.Error("flush privileges failed for %d %v", port, err) return err diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/standardize_proxy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/standardize_proxy.go new file mode 100644 index 0000000000..210d543639 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/standardize_proxy.go @@ -0,0 +1,80 @@ +package mysql_proxy + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" +) + +type StandardizeProxyComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params *StandardizeProxyParam `json:"extend"` +} + +type StandardizeProxyParam struct { + DBHAAccount string `json:"dbha_account"` + IP string `json:"ip"` + PortList []int `json:"port_list"` +} + +func (c *StandardizeProxyComp) ClearOldCrontab() error { + err := osutil.CleanLocalCrontab() + if err != nil { + logger.Error("clear mysql crontab failed: %s", err.Error()) + return err + } else { + logger.Info("clear mysql crontab success") + } + return nil +} + +func (c *StandardizeProxyComp) AddUser() error { + for _, port := range c.Params.PortList { + err := c.addOnePort(port) + if err != nil { + return err + } + } + return nil +} + +func (c *StandardizeProxyComp) addOnePort(port int) error { + pc, err := native.NewDbWorkerNoPing( + fmt.Sprintf(`%s:%d`, c.Params.IP, native.GetProxyAdminPort(port)), + c.GeneralParam.RuntimeAccountParam.ProxyAdminUser, + c.GeneralParam.RuntimeAccountParam.ProxyAdminPwd, + ) + if err != nil { + logger.Error(err.Error()) + return err + } + defer func() { + pc.Stop() + }() + + _, err = pc.Exec(fmt.Sprintf(`refresh_users('%s@%%', '+')`, c.Params.DBHAAccount)) + if err != nil { + logger.Error("add dbha account failed %s", err.Error()) + return err + } + + return nil +} + +func (c *StandardizeProxyComp) Example() interface{} { + return StandardizeProxyComp{ + GeneralParam: &components.GeneralParam{ + RuntimeAccountParam: components.RuntimeAccountParam{ + MySQLAccountParam: common.AccountMonitorExample, + }, + }, + Params: &StandardizeProxyParam{ + DBHAAccount: "dbha_account", + IP: "127.0.0.1", + PortList: []int{1, 2, 3}, + }, + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/add_crond.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/add_crond.go index eb91079417..b2a07c7895 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/add_crond.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/add_crond.go @@ -16,10 +16,10 @@ func (c *MySQLChecksumComp) AddToCrond() (err error) { return err } - for _, inst := range c.Params.InstancesInfo { + for _, port := range c.Params.Ports { configPath := filepath.Join( cst.ChecksumInstallPath, - fmt.Sprintf("checksum_%d.yaml", inst.Port), + fmt.Sprintf("checksum_%d.yaml", port), ) err = internal.RegisterCrond(mysqlTableChecksum, configPath, c.Params.ExecUser) @@ -28,5 +28,6 @@ func (c *MySQLChecksumComp) AddToCrond() (err error) { return err } } + return nil } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/binary.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/binary.go index 9baf465ce2..6f00235491 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/binary.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/binary.go @@ -2,6 +2,7 @@ package checksum import ( "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" "fmt" @@ -9,7 +10,7 @@ import ( "path/filepath" ) -func (c *MySQLChecksumComp) DeployBinary() (err error) { +func DeployBinary(medium *components.Medium) (err error) { err = os.MkdirAll(cst.ChecksumInstallPath, 0755) if err != nil { logger.Error("mkdir %s failed: %s", cst.ChecksumInstallPath, err.Error()) @@ -18,7 +19,7 @@ func (c *MySQLChecksumComp) DeployBinary() (err error) { decompressCmd := fmt.Sprintf( `tar zxf %s -C %s`, - c.Params.Medium.GetAbsolutePath(), cst.ChecksumInstallPath, + medium.GetAbsolutePath(), cst.ChecksumInstallPath, ) _, err = osutil.ExecShellCommand(false, decompressCmd) if err != nil { diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/example.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/example.go index 213e1292fb..5350973c8b 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/example.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/example.go @@ -3,7 +3,6 @@ package checksum import ( "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common" - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/internal" "dbm-services/mysql/db-tools/dbactuator/pkg/native" ) @@ -15,28 +14,17 @@ func (c *MySQLChecksumComp) Example() interface{} { }, }, Params: &MySQLChecksumParam{ - Medium: components.Medium{ - Pkg: "mysql-table-checksum.tar.gz", - PkgMd5: "12345", - }, - SystemDbs: native.DBSys, - ExecUser: "whoru", - ApiUrl: "http://x.x.x.x:yyyy", - InstancesInfo: []*instanceInfo{ - { - internal.InstanceInfo{ - BkBizId: 0, - Ip: "", - Port: 0, - Role: "", - ClusterId: 0, - ImmuteDomain: "", - BkInstanceId: 0, - DBModuleId: 0, - }, - "", - }, - }, + SystemDbs: native.DBSys, + ExecUser: "whoru", + ApiUrl: "http://x.x.x.x:yyyy", + BkBizId: 0, + IP: "127.0.0.1", + Ports: []int{3306, 3307}, + Role: "", + ClusterId: 0, + ImmuteDomain: "", + DBModuleId: 0, + Schedule: "0 5 2 * * * 1-5", StageDBHeader: "stage_header", RollbackDBTail: "rollback", }, diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/init.go index b424720318..7367c8a1c7 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/init.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/init.go @@ -22,23 +22,26 @@ type MySQLChecksumComp struct { func (c *MySQLChecksumComp) Init() (err error) { c.tools = tools.NewToolSetWithPickNoValidate(tools.ToolMysqlTableChecksum, tools.ToolPtTableChecksum) - err = c.Params.Medium.Check() - if err != nil { - logger.Error(err.Error()) - return err - } logger.Info("install checksum init success") return nil } type MySQLChecksumParam struct { - components.Medium - SystemDbs []string `json:"system_dbs"` - ExecUser string `json:"exec_user"` - ApiUrl string `json:"api_url"` - InstancesInfo []*instanceInfo `json:"instances_info"` - StageDBHeader string `json:"stage_db_header"` - RollbackDBTail string `json:"rollback_db_tail"` + //components.Medium + BkBizId int `json:"bk_biz_id"` + IP string `json:"ip"` + Ports []int `json:"port_list"` + Role string `json:"role"` + ClusterId int `json:"cluster_id"` + ImmuteDomain string `json:"immute_domain"` + DBModuleId int `json:"db_module_id"` + Schedule string `json:"schedule"` + SystemDbs []string `json:"system_dbs"` + ExecUser string `json:"exec_user"` + ApiUrl string `json:"api_url"` + //InstancesInfo []*instanceInfo `json:"instances_info"` + StageDBHeader string `json:"stage_db_header"` + RollbackDBTail string `json:"rollback_db_tail"` } type instanceInfo struct { diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/runtime_config.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/runtime_config.go index d632c8c5e6..7c17ab85e9 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/runtime_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum/runtime_config.go @@ -5,18 +5,16 @@ import ( "path/filepath" "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/internal" "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" - "dbm-services/mysql/db-tools/dbactuator/pkg/tools" "gopkg.in/yaml.v2" ) func (c *MySQLChecksumComp) GenerateRuntimeConfig() (err error) { - for _, inst := range c.Params.InstancesInfo { - logger.Info("generating runtime config on %v", inst) - err = generateRuntimeConfigIns(c.Params, inst, &c.GeneralParam.RuntimeAccountParam, c.tools) + for _, port := range c.Params.Ports { + logger.Info("generating runtime config on %v", port) + err = c.generateRuntimeConfigIns(port) if err != nil { return err } @@ -24,19 +22,21 @@ func (c *MySQLChecksumComp) GenerateRuntimeConfig() (err error) { return nil } -func generateRuntimeConfigIns(mcp *MySQLChecksumParam, instance *instanceInfo, rtap *components.RuntimeAccountParam, tl *tools.ToolSet) (err error) { +func (c *MySQLChecksumComp) generateRuntimeConfigIns(port int) (err error) { logDir := filepath.Join(cst.ChecksumInstallPath, "logs") var ignoreDbs []string - ignoreDbs = append(ignoreDbs, mcp.SystemDbs...) - ignoreDbs = append(ignoreDbs, fmt.Sprintf(`%s%%`, mcp.StageDBHeader)) + ignoreDbs = append(ignoreDbs, c.Params.SystemDbs...) + ignoreDbs = append(ignoreDbs, fmt.Sprintf(`%s%%`, c.Params.StageDBHeader)) ignoreDbs = append(ignoreDbs, `bak_%`) // gcs/scr truncate header - ignoreDbs = append(ignoreDbs, fmt.Sprintf(`%%%s`, mcp.RollbackDBTail)) + ignoreDbs = append(ignoreDbs, fmt.Sprintf(`%%%s`, c.Params.RollbackDBTail)) cfg := NewRuntimeConfig( - instance.BkBizId, instance.ClusterId, instance.Port, - instance.Role, instance.Schedule, instance.ImmuteDomain, instance.Ip, - rtap.MonitorUser, rtap.MonitorPwd, mcp.ApiUrl, logDir, 2, tl) + c.Params.BkBizId, c.Params.ClusterId, port, + c.Params.Role, c.Params.Schedule, c.Params.ImmuteDomain, c.Params.IP, + c.GeneralParam.RuntimeAccountParam.MonitorUser, c.GeneralParam.RuntimeAccountParam.MonitorPwd, + c.Params.ApiUrl, logDir, 2, c.tools, + ) cfg.SetFilter(nil, ignoreDbs, nil, nil) b, err := yaml.Marshal(&cfg) @@ -47,7 +47,7 @@ func generateRuntimeConfigIns(mcp *MySQLChecksumParam, instance *instanceInfo, r logger.Info(string(b)) - cfgFilePath := filepath.Join(cst.ChecksumInstallPath, fmt.Sprintf("checksum_%d.yaml", instance.Port)) + cfgFilePath := filepath.Join(cst.ChecksumInstallPath, fmt.Sprintf("checksum_%d.yaml", port)) logger.Info(cfgFilePath) return internal.WriteConfig(cfgFilePath, b) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/binary.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/binary.go index ec630133a7..493aedf93f 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/binary.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/binary.go @@ -2,13 +2,18 @@ package crond import ( "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" "fmt" "os" ) -func (c *MySQLCrondComp) DeployBinary() (err error) { +//func (c *MySQLCrondComp) DeployBinary() (err error) { +// return DeployBinary(&c.Params.Medium) +//} + +func DeployBinary(medium *components.Medium) (err error) { err = os.MkdirAll(cst.MySQLCrondInstallPath, 0755) if err != nil { logger.Error("mkdir %s failed: %s", cst.MySQLCrondInstallPath, err.Error()) @@ -17,7 +22,7 @@ func (c *MySQLCrondComp) DeployBinary() (err error) { decompressCmd := fmt.Sprintf( `tar zxf %s -C %s`, - c.Params.Medium.GetAbsolutePath(), cst.MySQLCrondInstallPath, + medium.GetAbsolutePath(), cst.MySQLCrondInstallPath, ) _, err = osutil.ExecShellCommand(false, decompressCmd) if err != nil { diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/example.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/example.go index dde36f2ae2..ba6f4064c8 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/example.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/example.go @@ -13,16 +13,15 @@ func (c *MySQLCrondComp) Example() interface{} { }, }, Params: &MySQLCrondParam{ - Medium: components.Medium{ - Pkg: "mysql-crond.tar.gz", - PkgMd5: "12345", - }, Ip: "127.0.0.1", BkCloudId: 0, EventDataId: 123, EventDataToken: "abc", MetricsDataId: 456, MetricsDataToken: "xyz", + BeatPath: "/a/bc", + AgentAddress: "127.0.0.1", + BkBizId: 123, }, } } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/init.go index 04d975e0f7..635af505c1 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/init.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond/init.go @@ -24,6 +24,8 @@ func (c *MySQLCrondComp) Init() error { return err } + // 这里不 return err + // 就是没下发nginx ip只打印一行日志, 不影响crond部署过程 if c.Params.NginxAddrs == nil || len(c.Params.NginxAddrs) <= 0 { err := fmt.Errorf("nginx addresses are required") logger.Error(err.Error()) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dba_toolkit/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dba_toolkit/init.go new file mode 100644 index 0000000000..5e0cdbb43f --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dba_toolkit/init.go @@ -0,0 +1,30 @@ +package dba_toolkit + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" +) + +func DeployBinary(medium *components.Medium) (err error) { + decompressCmd := fmt.Sprintf( + `tar zxf %s -C %s`, + medium.GetAbsolutePath(), cst.MYSQL_TOOL_INSTALL_PATH, + ) + _, err = osutil.ExecShellCommand(false, decompressCmd) + if err != nil { + logger.Error("decompress dbatoolkit pkg failed: %s", err.Error()) + return err + } + + chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.DBAToolkitPath) + _, err = osutil.ExecShellCommand(false, chownCmd) + if err != nil { + logger.Error("chown %s to mysql failed: %s", cst.DBAToolkitPath, err.Error()) + return err + } + + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/add_crond.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/add_crond.go index d4cc6a0195..7eb1d8524c 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/add_crond.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/add_crond.go @@ -13,10 +13,6 @@ import ( ) func (c *NewDbBackupComp) AddCrontab() error { - if c.Params.UntarOnly { - logger.Info("untar_only=true do not need AddCrontab") - return nil - } if c.Params.ClusterType == cst.TendbCluster { return c.addCrontabSpider() } else { @@ -27,13 +23,13 @@ func (c *NewDbBackupComp) AddCrontab() error { func (c *NewDbBackupComp) addCrontabLegacy() (err error) { crondManager := ma.NewManager("http://127.0.0.1:9999") var jobItem ma.JobDefine - logFile := path.Join(c.installPath, "logs/main.log") + logFile := path.Join(cst.DbbackupGoInstallPath, "logs/main.log") jobItem = ma.JobDefine{ Name: "dbbackup-schedule", - Command: filepath.Join(c.installPath, "dbbackup_main.sh"), - WorkDir: c.installPath, + Command: filepath.Join(cst.DbbackupGoInstallPath, "dbbackup_main.sh"), + WorkDir: cst.DbbackupGoInstallPath, Args: []string{">", logFile, "2>&1"}, - Schedule: c.getInsHostCrontabTime(), + Schedule: c.Params.Options.CrontabTime, Creator: c.Params.ExecUser, Enable: true, } @@ -51,10 +47,10 @@ func (c *NewDbBackupComp) addCrontabSpider() (err error) { dbbackupConfFile := fmt.Sprintf("dbbackup.%d.ini", c.Params.Ports[0]) jobItem = ma.JobDefine{ Name: "spiderbackup-schedule", - Command: filepath.Join(c.installPath, "dbbackup"), - WorkDir: c.installPath, + Command: filepath.Join(cst.DbbackupGoInstallPath, "dbbackup"), + WorkDir: cst.DbbackupGoInstallPath, Args: []string{"spiderbackup", "schedule", "--config", dbbackupConfFile}, - Schedule: c.getInsHostCrontabTime(), + Schedule: c.Params.Options.CrontabTime, //c.getInsHostCrontabTime(), Creator: c.Params.ExecUser, Enable: true, } @@ -66,8 +62,8 @@ func (c *NewDbBackupComp) addCrontabSpider() (err error) { if !(c.Params.Role == cst.BackupRoleSpiderMnt || c.Params.Role == cst.BackupRoleSpiderSlave) { // MASTER,SLAVE,REPEATER jobItem = ma.JobDefine{ Name: "spiderbackup-check", - Command: filepath.Join(c.installPath, "dbbackup"), - WorkDir: c.installPath, + Command: filepath.Join(cst.DbbackupGoInstallPath, "dbbackup"), + WorkDir: cst.DbbackupGoInstallPath, Args: []string{"spiderbackup", "check", "--run"}, Schedule: "*/1 * * * *", Creator: c.Params.ExecUser, @@ -87,8 +83,8 @@ func (c *NewDbBackupComp) addCrontabOld() (err error) { if err != nil { return fmt.Errorf(`删除原备份crontab任务失败("dbbackup") get an error:%w`, err) } - entryShell := path.Join(c.installPath, "dbbackup_main.sh") - logfile := path.Join(c.installPath, "dbbackup.log") + entryShell := path.Join(cst.DbbackupGoInstallPath, "dbbackup_main.sh") + logfile := path.Join(cst.DbbackupGoInstallPath, "dbbackup.log") newCrontab = append( newCrontab, fmt.Sprintf( @@ -100,19 +96,9 @@ func (c *NewDbBackupComp) addCrontabOld() (err error) { newCrontab, fmt.Sprintf( "%s %s 1>>%s 2>&1\n", - c.getInsHostCrontabTime(), entryShell, logfile, + c.Params.Options.CrontabTime, entryShell, logfile, ), ) crontabStr := strings.Join(newCrontab, "\n") return osutil.AddCrontab(crontabStr) } - -func (c *NewDbBackupComp) getInsHostCrontabTime() string { - cronTime := "" - for _, opt := range c.Params.Options { - if opt.CrontabTime > cronTime { - cronTime = opt.CrontabTime - } - } - return cronTime -} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/backup_dir.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/backup_dir.go index efe3b3b5e4..c95ab0f32d 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/backup_dir.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/backup_dir.go @@ -13,10 +13,6 @@ import ( ) func (c *NewDbBackupComp) InitBackupDir() (err error) { - if c.Params.UntarOnly { - logger.Info("untar_only=true do not need InitBackupDir") - return nil - } backupdir := c.Params.Configs["Public"]["BackupDir"] if _, err := os.Stat(backupdir); os.IsNotExist(err) { logger.Warn("backup dir %s is not exist. will make it", backupdir) @@ -35,10 +31,6 @@ func (c *NewDbBackupComp) InitBackupDir() (err error) { } func (c *NewDbBackupComp) initReportDir() (err error) { - if c.Params.UntarOnly { - logger.Info("untar_only=true do not need initReportDir") - return nil - } // redis 会污染 /home/mysql/dbareport,建立成软连 if isLink, _ := cmutil.IsSymLinkFile(cst.DBAReportBase); isLink { _ = os.Remove(cst.DBAReportBase) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/binary.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/binary.go index 06ca5edb54..2a4bc2e444 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/binary.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/binary.go @@ -3,33 +3,39 @@ package dbbackup import ( "dbm-services/common/go-pubpkg/cmutil" "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" "fmt" "path/filepath" ) -func (c *NewDbBackupComp) DeployBinary() (err error) { - if err = c.Params.Medium.Check(); err != nil { +func DeployBinary(medium *components.Medium) (err error) { + if err = medium.Check(); err != nil { return err } + cmd := fmt.Sprintf( - "tar zxf %s -C %s && mkdir -p %s && chown -R mysql.mysql %s", c.Params.Medium.GetAbsolutePath(), - filepath.Dir(c.installPath), filepath.Join(c.installPath, "logs"), c.installPath, + "tar zxf %s -C %s && mkdir -p %s && chown -R mysql.mysql %s", + medium.GetAbsolutePath(), + filepath.Dir(cst.DbbackupGoInstallPath), + filepath.Join(cst.DbbackupGoInstallPath, "logs"), + cst.DbbackupGoInstallPath, ) output, err := osutil.ExecShellCommand(false, cmd) if err != nil { err = fmt.Errorf("execute %s error:%w,%s", cmd, err, output) return err } - return nil + return ChownGroup() } -func (c *NewDbBackupComp) ChownGroup() (err error) { +func ChownGroup() (err error) { // run dbbackup migrateold _, errStr, err := cmutil.ExecCommandReturnBytes( false, - c.installPath, - filepath.Join(c.installPath, "dbbackup"), + cst.DbbackupGoInstallPath, + filepath.Join(cst.DbbackupGoInstallPath, "dbbackup"), "migrateold", ) if err != nil { @@ -41,7 +47,7 @@ func (c *NewDbBackupComp) ChownGroup() (err error) { cmd := fmt.Sprintf( " chown -R mysql.mysql %s ; chmod +x %s/*.sh ; chmod +x %s/dbbackup", - filepath.Dir(c.installPath), c.installPath, c.installPath, + filepath.Dir(cst.DbbackupGoInstallPath), cst.DbbackupGoInstallPath, cst.DbbackupGoInstallPath, ) output, err := osutil.ExecShellCommand(false, cmd) if err != nil { @@ -50,3 +56,7 @@ func (c *NewDbBackupComp) ChownGroup() (err error) { } return nil } + +func (c *NewDbBackupComp) ChownGroup() (err error) { + return ChownGroup() +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/example.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/example.go index 09ff7d14d6..b7306bf9c9 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/example.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/example.go @@ -1,26 +1,16 @@ package dbbackup -import "dbm-services/mysql/db-tools/dbactuator/pkg/components" - func (c *NewDbBackupComp) Example() interface{} { comp := NewDbBackupComp{ Params: &NewDbBackupParam{ - Medium: components.Medium{ - Pkg: "dbbackup-go.tar.gz", - PkgMd5: "90e5be347c606218b055a61f990ecdf4", - }, Host: "127.0.0.1", Ports: []int{20000, 20001}, - Options: map[int]BackupOptions{ - 20000: { - CrontabTime: "09:00:00", - BackupType: "logical", - Master: logicBackupDataOption{DataSchemaGrant: "grant"}, - Slave: logicBackupDataOption{DataSchemaGrant: "grant"}, - }, - 20001: { - BackupType: "physical", - }, + Options: &BackupOptions{ + + CrontabTime: "09:00:00", + BackupType: "logical", + Master: logicBackupDataOption{DataSchemaGrant: "grant"}, + Slave: logicBackupDataOption{DataSchemaGrant: "grant"}, }, Configs: map[string]map[string]string{ "Public": { @@ -38,9 +28,9 @@ func (c *NewDbBackupComp) Example() interface{} { "Throttle": "100", }, }, - Role: "slave", - ClusterAddress: map[int]string{20000: "testdb1.xx.a1.db", 20001: "testdb2.xx.a1.db"}, - ClusterId: map[int]int{20000: 111, 20001: 112}, + Role: "slave", + ImmuteDomain: "testdb1.xx.a1.db", + ClusterId: 112, }, } return comp diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/init.go index 642033deb2..1e1d9fbc18 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/init.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/init.go @@ -4,11 +4,9 @@ import ( "dbm-services/common/go-pubpkg/cmutil" "dbm-services/common/go-pubpkg/logger" "dbm-services/mysql/db-tools/dbactuator/pkg/components" - "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/dbactuator/pkg/native" "dbm-services/mysql/db-tools/dbactuator/pkg/util" "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config" - "path/filepath" "strings" ) @@ -20,74 +18,53 @@ type NewDbBackupComp struct { func (c *NewDbBackupComp) Init() (err error) { c.Params.Role = strings.ToUpper(c.Params.Role) - c.initBackupOptions() - c.installPath = filepath.Join(cst.MYSQL_TOOL_INSTALL_PATH, cst.BackupDir) - c.versionMap = make(map[int]string) // not use c.renderCnf = make(map[int]config.BackupConfig) - if c.Params.UntarOnly { - logger.Info("untar_only=true do not try to connect") - return nil - } logger.Info("config %v", c.Params.Configs) return nil } func (c *NewDbBackupComp) initBackupOptions() { - c.backupOpt = c.Params.Options + c.ignoreDbs = make([]string, 0) + c.ignoreTbls = make([]string, 0) + + var ignoreTbls, ignoreDbs []string + ignoreDbs = strings.Split(c.Params.Options.IgnoreObjs.IgnoreDatabases, ",") + ignoreDbs = append(ignoreDbs, native.DBSys...) + // 默认备份需要 infodba_schema 库 + ignoreDbs = cmutil.StringsRemove(ignoreDbs, native.INFODBA_SCHEMA) + ignoreTbls = strings.Split(c.Params.Options.IgnoreObjs.IgnoreTables, ",") - c.ignoreDbs = make(map[int][]string) - c.ignoreTbls = make(map[int][]string) - for _, port := range c.Params.Ports { - opt, ok := c.Params.Options[port] - if !ok { - c.Params.Options[port] = BackupOptions{} // unknown - continue - } - logger.Info("options %v", opt) - var ignoreTbls, ignoreDbs []string - ignoreDbs = strings.Split(opt.IgnoreObjs.IgnoreDatabases, ",") - ignoreDbs = append(ignoreDbs, native.DBSys...) - // 默认备份需要 infodba_schema 库 - ignoreDbs = cmutil.StringsRemove(ignoreDbs, native.INFODBA_SCHEMA) - ignoreTbls = strings.Split(opt.IgnoreObjs.IgnoreTables, ",") + c.ignoreDbs = util.UniqueStrings(cmutil.RemoveEmpty(ignoreDbs)) + c.ignoreTbls = util.UniqueStrings(cmutil.RemoveEmpty(ignoreTbls)) + + logger.Info("ignore dbs %v", c.ignoreDbs) + logger.Info("ignore tables %v", c.ignoreTbls) - c.ignoreDbs[port] = util.UniqueStrings(cmutil.RemoveEmpty(ignoreDbs)) - c.ignoreTbls[port] = util.UniqueStrings(cmutil.RemoveEmpty(ignoreTbls)) - //if len(c.ignoreTbls[port]) <= 0 { - // c.ignoreTbls[port] = []string{"*"} - //} - logger.Info("port %d ignore dbs %v", port, c.ignoreDbs[port]) - logger.Info("port %d ignore tables %v", port, c.ignoreTbls[port]) - } } type NewDbBackupParam struct { - components.Medium // Configs BackupConfig - Configs map[string]map[string]string `json:"configs" validate:"required"` // 模板配置 - Options map[int]BackupOptions `json:"options" validate:"required"` // 选项参数配置 - Host string `json:"host" validate:"required,ip"` // 当前实例的主机地址 - Ports []int `json:"ports" validate:"required,gt=0,dive"` // 被监控机器的上所有需要监控的端口 - Role string `json:"role" validate:"required"` // 当前主机安装的mysqld的角色 - ClusterType string `json:"cluster_type"` - BkBizId int `json:"bk_biz_id" validate:"required"` // bkbizid - BkCloudId int `json:"bk_cloud_id"` // bk_cloud_id - ClusterAddress map[int]string `json:"cluster_address"` // cluster addresss - ClusterId map[int]int `json:"cluster_id"` // cluster id - ShardValue map[int]int `json:"shard_value"` // shard value for spider - ExecUser string `json:"exec_user"` // 执行Job的用户 - UntarOnly bool `json:"untar_only"` // 只解压,不校验不渲染配置,不连接 db + Configs map[string]map[string]string `json:"configs" validate:"required"` // 模板配置 + Options *BackupOptions `json:"options" validate:"required"` // 选项参数配置 + Host string `json:"host" validate:"required,ip"` // 当前实例的主机地址 + Ports []int `json:"ports" validate:"required,gt=0,dive"` // 被监控机器的上所有需要监控的端口 + Role string `json:"role" validate:"required"` // 当前主机安装的mysqld的角色 + ClusterType string `json:"cluster_type"` + BkBizId int `json:"bk_biz_id" validate:"required"` + BkCloudId int `json:"bk_cloud_id"` + ImmuteDomain string `json:"immute_domain"` + ClusterId int `json:"cluster_id"` // cluster id + ShardValue map[int]int `json:"shard_value"` // shard value for spider + ExecUser string `json:"exec_user"` // 执行Job的用户 + } type newDbBackupContext struct { - installPath string // dbbackupInstallPath - versionMap map[int]string // 当前机器数据库实例版本 - renderCnf map[int]config.BackupConfig // 绝对不能改成指针数组 - backupOpt map[int]BackupOptions - ignoreDbs map[int][]string - ignoreTbls map[int][]string + renderCnf map[int]config.BackupConfig // 绝对不能改成指针数组 + ignoreDbs []string + ignoreTbls []string } type BackupOptions struct { diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/legacy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/legacy.go index ac74647afb..ac26eac3d4 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/legacy.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/legacy.go @@ -1,15 +1,16 @@ package dbbackup import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" "fmt" "os" ) func (c *NewDbBackupComp) StageLegacyBackup() (err error) { - bakInstallPath := c.installPath + "-backup" - if _, err := os.Stat(c.installPath); !os.IsNotExist(err) { - cmd := fmt.Sprintf("rm -rf %s; mv %s %s", bakInstallPath, c.installPath, bakInstallPath) + bakInstallPath := cst.DbbackupGoInstallPath + "-backup" + if _, err := os.Stat(cst.DbbackupGoInstallPath); !os.IsNotExist(err) { + cmd := fmt.Sprintf("rm -rf %s; mv %s %s", bakInstallPath, cst.DbbackupGoInstallPath, bakInstallPath) output, err := osutil.ExecShellCommand(false, cmd) if err != nil { err = fmt.Errorf("execute %s get an error:%s,%w", cmd, output, err) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/render_data.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/render_data.go index d3550c8bdc..c1bd302aa3 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/render_data.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/render_data.go @@ -12,16 +12,11 @@ import ( ) func (c *NewDbBackupComp) InitRenderData() (err error) { - if c.Params.UntarOnly { - logger.Info("untar_only=true do not need InitRenderData") - return nil - } - bkuser := c.GeneralParam.RuntimeAccountParam.DbBackupUser bkpwd := c.GeneralParam.RuntimeAccountParam.DbBackupPwd for _, port := range c.Params.Ports { - pf, err := db_table_filter.NewFilter([]string{"*"}, []string{"*"}, c.ignoreDbs[port], c.ignoreTbls[port]) + pf, err := db_table_filter.NewFilter([]string{"*"}, []string{"*"}, c.ignoreDbs, c.ignoreTbls) if err != nil { return err } @@ -33,12 +28,12 @@ func (c *NewDbBackupComp) InitRenderData() (err error) { var dsg string switch c.Params.Role { case cst.BackupRoleMaster, cst.BackupRoleRepeater: - dsg = c.backupOpt[port].Master.DataSchemaGrant + dsg = c.Params.Options.Master.DataSchemaGrant case cst.BackupRoleSlave: - dsg = c.backupOpt[port].Slave.DataSchemaGrant + dsg = c.Params.Options.Slave.DataSchemaGrant case cst.BackupRoleOrphan: // orphan 使用的是 tendbsingle Master.DataSchemaGrant - dsg = c.backupOpt[port].Master.DataSchemaGrant + dsg = c.Params.Options.Master.DataSchemaGrant case cst.BackupRoleSpiderMaster, cst.BackupRoleSpiderSlave, cst.BackupRoleSpiderMnt: // spider 只在 spider_master and tdbctl_master 上,备份schema,grant dsg = "schema,grant" @@ -54,10 +49,10 @@ func (c *NewDbBackupComp) InitRenderData() (err error) { MysqlRole: strings.ToLower(c.Params.Role), BkBizId: c.Params.BkBizId, BkCloudId: c.Params.BkCloudId, - ClusterAddress: c.getInsDomainAddr(port), - ClusterId: c.getInsClusterId(port), + ClusterAddress: c.Params.ImmuteDomain, + ClusterId: c.Params.ClusterId, ShardValue: c.getInsShardValue(port), - BackupType: c.backupOpt[port].BackupType, + BackupType: c.Params.Options.BackupType, DataSchemaGrant: dsg, }, BackupClient: config.BackupClient{}, @@ -84,30 +79,6 @@ func (c *NewDbBackupComp) InitRenderData() (err error) { return nil } -func (c *NewDbBackupComp) getInsDomainAddr(port int) string { - if c.Params.ClusterAddress == nil { - return "" - } - if len(c.Params.ClusterAddress) == 0 { - return "" - } - if v, ok := c.Params.ClusterAddress[port]; ok { - return v - } - return "" -} -func (c *NewDbBackupComp) getInsClusterId(port int) int { - if c.Params.ClusterId == nil { - return 0 - } - if len(c.Params.ClusterId) == 0 { - return 0 - } - if v, ok := c.Params.ClusterId[port]; ok { - return v - } - return 0 -} func (c *NewDbBackupComp) getInsShardValue(port int) int { if c.Params.ShardValue == nil { return 0 diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/runtime_config.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/runtime_config.go index 2422caffea..c2810bd5a9 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/runtime_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup/runtime_config.go @@ -5,7 +5,6 @@ import ( "path/filepath" "text/template" - "dbm-services/common/go-pubpkg/logger" "dbm-services/common/go-pubpkg/mysqlcomm" "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config" @@ -15,12 +14,8 @@ import ( ) func (c *NewDbBackupComp) GenerateRuntimeConfig() (err error) { - if c.Params.UntarOnly { - logger.Info("untar_only=true do not need GenerateDbbackupConfig") - return nil - } // 先渲染模版配置文件 - templatePath := filepath.Join(c.installPath, fmt.Sprintf("%s.tpl", cst.BackupFile)) + templatePath := filepath.Join(cst.DbbackupGoInstallPath, fmt.Sprintf("%s.tpl", cst.BackupFile)) if err := saveTplConfigfile(c.Params.Configs, templatePath); err != nil { return err } @@ -31,12 +26,12 @@ func (c *NewDbBackupComp) GenerateRuntimeConfig() (err error) { } for _, port := range c.Params.Ports { - _, err := writeCnf(port, c.installPath, c.renderCnf, cnfTemp) + _, err := writeCnf(port, cst.DbbackupGoInstallPath, c.renderCnf, cnfTemp) if err != nil { return err } if c.Params.Role == cst.BackupRoleSpiderMaster { - cnfPath, err := writeCnf(mysqlcomm.GetTdbctlPortBySpider(port), c.installPath, c.renderCnf, cnfTemp) + cnfPath, err := writeCnf(mysqlcomm.GetTdbctlPortBySpider(port), cst.DbbackupGoInstallPath, c.renderCnf, cnfTemp) if err != nil { return err } @@ -52,7 +47,7 @@ func (c *NewDbBackupComp) GenerateRuntimeConfig() (err error) { return err } - tdbCtlCnf.LogicalBackup.DefaultsFile = filepath.Join(c.installPath, "mydumper_for_tdbctl.cnf") + tdbCtlCnf.LogicalBackup.DefaultsFile = filepath.Join(cst.DbbackupGoInstallPath, "mydumper_for_tdbctl.cnf") err = tdbCtlCnfIni.ReflectFrom(&tdbCtlCnf) if err != nil { return err diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/exporter/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/exporter/init.go new file mode 100644 index 0000000000..3ecec9daa6 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/exporter/init.go @@ -0,0 +1,111 @@ +package exporter + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" + "os" + "path/filepath" +) + +type PushCnfComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params *PushCnfParams `json:"extend"` +} + +type PushCnfParams struct { + IP string `json:"ip"` + PortList []int `json:"port_list"` + MachineType string `json:"machine_type"` +} + +func (c *PushCnfComp) Run() (err error) { + if c.Params.MachineType == "proxy" { + for _, port := range c.Params.PortList { + err = c.generateProxyExporterCnf(c.Params.IP, port) + if err != nil { + return err + } + } + return nil + } + + for _, port := range c.Params.PortList { + err = c.generateMySQLExporterCnf(c.Params.IP, port) + if err != nil { + return err + } + } + return nil +} + +func (c *PushCnfComp) generateProxyExporterCnf(ip string, port int) (err error) { + f, err := makeCnfFile(port) + if err != nil { + return err + } + defer func() { + _ = f.Close() + }() + + content := fmt.Sprintf( + `%s:%d,,,%s:%d,%s,%s`, + ip, port, + ip, native.GetProxyAdminPort(port), + c.GeneralParam.RuntimeAccountParam.ProxyAdminUser, c.GeneralParam.RuntimeAccountParam.ProxyAdminPwd, + ) + + _, err = f.WriteString(content) + if err != nil { + logger.Error(err.Error()) + return err + } + return nil +} + +func (c *PushCnfComp) generateMySQLExporterCnf(ip string, port int) (err error) { + err = util.CreateExporterConf( + makeCnfFilePath(port), + ip, port, + c.GeneralParam.RuntimeAccountParam.MonitorUser, + c.GeneralParam.RuntimeAccountParam.MonitorPwd) + if err != nil { + logger.Error(err.Error()) + return err + } + return nil +} + +func makeCnfFile(port int) (*os.File, error) { + f, err := os.OpenFile(makeCnfFilePath(port), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + logger.Error(err.Error()) + return nil, err + } + return f, nil +} + +func makeCnfFilePath(port int) string { + return filepath.Join( + "/etc/", + fmt.Sprintf("exporter_%d.cnf", port), + ) +} + +func (c *PushCnfComp) Example() interface{} { + return PushCnfComp{ + GeneralParam: &components.GeneralParam{ + RuntimeAccountParam: components.RuntimeAccountParam{ + MySQLAccountParam: common.AccountMonitorExample, + }, + }, + Params: &PushCnfParams{ + IP: "1.2.3.4", + PortList: []int{1, 2, 3}, + MachineType: "proxy", + }, + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/add_crond.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/add_crond.go index 8d1b2b551f..70315fa82e 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/add_crond.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/add_crond.go @@ -16,10 +16,10 @@ func (c *MySQLMonitorComp) AddToCrond() (err error) { return err } - for _, inst := range c.Params.InstancesInfo { + for _, ele := range c.Params.PortBkInstanceList { configPath := filepath.Join( cst.MySQLMonitorInstallPath, - fmt.Sprintf("monitor-config_%d.yaml", inst.Port), + fmt.Sprintf("monitor-config_%d.yaml", ele.Port), ) err = internal.RegisterCrond(mysqlMonitor, configPath, c.Params.ExecUser) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/binary.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/binary.go index 935f0b1fe9..864883d28a 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/binary.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/binary.go @@ -2,6 +2,7 @@ package monitor import ( "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" "fmt" @@ -9,7 +10,7 @@ import ( "path/filepath" ) -func (c *MySQLMonitorComp) DeployBinary() (err error) { +func DeployBinary(medium *components.Medium) (err error) { err = os.MkdirAll(cst.MySQLMonitorInstallPath, 0755) if err != nil { logger.Error("mkdir %s failed: %s", cst.MySQLCrondInstallPath, err.Error()) @@ -34,7 +35,7 @@ func (c *MySQLMonitorComp) DeployBinary() (err error) { decompressCmd := fmt.Sprintf( `tar zxf %s -C %s`, - c.Params.Medium.GetAbsolutePath(), cst.MySQLMonitorInstallPath, + medium.GetAbsolutePath(), cst.MySQLMonitorInstallPath, ) _, err = osutil.ExecShellCommand(false, decompressCmd) if err != nil { @@ -85,3 +86,7 @@ func (c *MySQLMonitorComp) DeployBinary() (err error) { } return nil } + +//func (c *MySQLMonitorComp) DeployBinary() (err error) { +// return DeployBinary(&c.Params.Medium) +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/example.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/example.go index 678948a173..b8d8ba2bd9 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/example.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/example.go @@ -3,7 +3,6 @@ package monitor import ( "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common" - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/internal" "dbm-services/mysql/db-tools/dbactuator/pkg/native" "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" ) @@ -16,10 +15,6 @@ func (c *MySQLMonitorComp) Example() interface{} { }, }, Params: &MySQLMonitorParam{ - Medium: components.Medium{ - Pkg: "mysql-monitor.tar.gz", - PkgMd5: "12345", - }, SystemDbs: native.DBSys, ExecUser: "whoru", ApiUrl: `http://x.x.x.x:yyyy`, @@ -32,18 +27,19 @@ func (c *MySQLMonitorComp) Example() interface{} { Role: nil, }, }, - InstancesInfo: []*internal.InstanceInfo{ + + MachineType: "backend", + BkCloudId: 0, + PortBkInstanceList: []portBkInstancePair{ { - BkBizId: 1, - Ip: "127.0.0.1", - Port: 123, - Role: "master", - ClusterId: 12, - ImmuteDomain: "aaa.bbb.com", + Port: 20000, BkInstanceId: 123, }, }, - MachineType: "backend", - BkCloudId: 0, + IP: "127.0.0.1", + Role: "master", + ImmuteDomain: "db.local", + ClusterId: 123, + DBModuleId: 234, }, } } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/exporter_config.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/exporter_config.go index 27b735e4dc..3d3bb7532d 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/exporter_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/exporter_config.go @@ -1,76 +1,63 @@ package monitor -import ( - "fmt" - "os" - "path/filepath" +//func (c *MySQLMonitorComp) GenerateExporterConfig() (err error) { +// for _, inst := range c.Params.InstancesInfo { +// err = generateExporterConfigIns(c.Params, inst, &c.GeneralParam.RuntimeAccountParam) +// if err != nil { +// return err +// } +// } +// return nil +//} - "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/pkg/components" - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/internal" - "dbm-services/mysql/db-tools/dbactuator/pkg/native" - "dbm-services/mysql/db-tools/dbactuator/pkg/util" - "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" -) - -func (c *MySQLMonitorComp) GenerateExporterConfig() (err error) { - for _, inst := range c.Params.InstancesInfo { - err = generateExporterConfigIns(c.Params, inst, &c.GeneralParam.RuntimeAccountParam) - if err != nil { - return err - } - } - return nil -} - -func generateExporterConfigIns(mmp *MySQLMonitorParam, instance *internal.InstanceInfo, rtap *components.RuntimeAccountParam) (err error) { - exporterConfigPath := filepath.Join( - "/etc", - fmt.Sprintf("exporter_%d.cnf", instance.Port), - ) - - if mmp.MachineType == "proxy" { - f, err := os.OpenFile(exporterConfigPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) - if err != nil { - logger.Error(err.Error()) - return err - } - defer func() { - _ = f.Close() - }() - - proxyContent := fmt.Sprintf( - "%s:%d,,,%s:%d,%s,%s", - instance.Ip, instance.Port, - instance.Ip, native.GetProxyAdminPort(instance.Port), - rtap.ProxyAdminUser, rtap.ProxyAdminPwd, - ) - _, err = f.WriteString(proxyContent) - if err != nil { - logger.Error(err.Error()) - return err - } - } else { - err = util.CreateExporterConf( - exporterConfigPath, - instance.Ip, - instance.Port, - rtap.MonitorUser, - rtap.MonitorPwd, - ) - if err != nil { - logger.Error(err.Error()) - return err - } - } - - _, err = osutil.ExecShellCommand( - false, - fmt.Sprintf("chown mysql %s", exporterConfigPath), - ) - if err != nil { - logger.Error(err.Error()) - return err - } - return nil -} +//func generateExporterConfigIns(mmp *MySQLMonitorParam, instance *internal.InstanceInfo, rtap *components.RuntimeAccountParam) (err error) { +// exporterConfigPath := filepath.Join( +// "/etc", +// fmt.Sprintf("exporter_%d.cnf", instance.Port), +// ) +// +// if mmp.MachineType == "proxy" { +// f, err := os.OpenFile(exporterConfigPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) +// if err != nil { +// logger.Error(err.Error()) +// return err +// } +// defer func() { +// _ = f.Close() +// }() +// +// proxyContent := fmt.Sprintf( +// "%s:%d,,,%s:%d,%s,%s", +// instance.Ip, instance.Port, +// instance.Ip, native.GetProxyAdminPort(instance.Port), +// rtap.ProxyAdminUser, rtap.ProxyAdminPwd, +// ) +// _, err = f.WriteString(proxyContent) +// if err != nil { +// logger.Error(err.Error()) +// return err +// } +// } else { +// err = util.CreateExporterConf( +// exporterConfigPath, +// instance.Ip, +// instance.Port, +// rtap.MonitorUser, +// rtap.MonitorPwd, +// ) +// if err != nil { +// logger.Error(err.Error()) +// return err +// } +// } +// +// _, err = osutil.ExecShellCommand( +// false, +// fmt.Sprintf("chown mysql %s", exporterConfigPath), +// ) +// if err != nil { +// logger.Error(err.Error()) +// return err +// } +// return nil +//} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/init.go index 2bd5f3d131..a3cfe76098 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/init.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/init.go @@ -1,9 +1,7 @@ package monitor import ( - "dbm-services/common/go-pubpkg/logger" "dbm-services/mysql/db-tools/dbactuator/pkg/components" - "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/internal" "dbm-services/mysql/db-tools/dbactuator/pkg/tools" "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" ) @@ -17,21 +15,31 @@ type MySQLMonitorComp struct { func (c *MySQLMonitorComp) Init() (err error) { c.tools = tools.NewToolSetWithPickNoValidate(tools.ToolMySQLMonitor) - err = c.Params.Medium.Check() - if err != nil { - logger.Error(err.Error()) - return err - } + //err = c.Params.Medium.Check() + //if err != nil { + // logger.Error(err.Error()) + // return err + //} return nil } +type portBkInstancePair struct { + Port int `json:"port"` + BkInstanceId int64 `json:"bk_instance_id"` +} + type MySQLMonitorParam struct { - components.Medium - SystemDbs []string `json:"system_dbs"` - ExecUser string `json:"exec_user"` - ApiUrl string `json:"api_url"` - InstancesInfo []*internal.InstanceInfo `json:"instances_info"` - MachineType string `json:"machine_type"` - BkCloudId int `json:"bk_cloud_id"` - ItemsConfig map[string]*config.MonitorItem `json:"items_config" yaml:"items_config"` + SystemDbs []string `json:"system_dbs"` + ExecUser string `json:"exec_user"` + ApiUrl string `json:"api_url"` + MachineType string `json:"machine_type"` + BkCloudId int `json:"bk_cloud_id"` + BKBizId int `json:"bk_biz_id"` + PortBkInstanceList []portBkInstancePair `json:"port_bk_instance_list"` + IP string `json:"ip"` + Role string `json:"role"` + ImmuteDomain string `json:"immute_domain"` + ClusterId int `json:"cluster_id"` + ItemsConfig map[string]*config.MonitorItem `json:"items_config" yaml:"items_config"` + DBModuleId int `json:"db_module_id"` } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/items_config.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/items_config.go index 5ccfe7f47a..f281104778 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/items_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/items_config.go @@ -15,8 +15,8 @@ import ( ) func (c *MySQLMonitorComp) GenerateItemsConfig() (err error) { - for _, inst := range c.Params.InstancesInfo { - err = generateItemsConfigIns(inst, c.Params.ItemsConfig) + for _, ele := range c.Params.PortBkInstanceList { + err = c.generateItemsConfigIns(ele.Port) if err != nil { return err } @@ -24,8 +24,8 @@ func (c *MySQLMonitorComp) GenerateItemsConfig() (err error) { return nil } -func generateItemsConfigIns(instance *internal.InstanceInfo, itemsConfig map[string]*config.MonitorItem) (err error) { - itemList := maps.Values(itemsConfig) +func (c *MySQLMonitorComp) generateItemsConfigIns(port int) (err error) { + itemList := maps.Values(c.Params.ItemsConfig) slices.SortFunc(itemList, func(a, b *config.MonitorItem) int { return strings.Compare(a.Name, b.Name) }) @@ -38,7 +38,7 @@ func generateItemsConfigIns(instance *internal.InstanceInfo, itemsConfig map[str itemConfigPath := filepath.Join( cst.MySQLMonitorInstallPath, - fmt.Sprintf(`items-config_%d.yaml`, instance.Port), + fmt.Sprintf(`items-config_%d.yaml`, port), ) return internal.WriteConfig(itemConfigPath, append(b, []byte("\n")...)) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/runtime_config.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/runtime_config.go index 7104426a47..59259e74f8 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/runtime_config.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor/runtime_config.go @@ -3,7 +3,6 @@ package monitor import ( "context" "dbm-services/common/go-pubpkg/logger" - "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/internal" "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" @@ -18,14 +17,14 @@ import ( ) func (c *MySQLMonitorComp) GenerateRuntimeConfig() (err error) { - for _, inst := range c.Params.InstancesInfo { - err = generateRuntimeConfigIns(c.Params, inst, &c.GeneralParam.RuntimeAccountParam) + for _, ele := range c.Params.PortBkInstanceList { + err = c.generateRuntimeConfigIns(ele.Port, ele.BkInstanceId) if err != nil { return err } if c.Params.MachineType == "backend" { - err = createUserListBackupTable(inst, &c.GeneralParam.RuntimeAccountParam) + err = c.createUserListBackupTable(ele.Port) if err != nil { return err } @@ -34,12 +33,12 @@ func (c *MySQLMonitorComp) GenerateRuntimeConfig() (err error) { return nil } -func createUserListBackupTable(instance *internal.InstanceInfo, rtap *components.RuntimeAccountParam) (err error) { +func (c *MySQLMonitorComp) createUserListBackupTable(port int) (err error) { db, err := sqlx.Connect( "mysql", fmt.Sprintf("%s:%s@tcp(%s:%d)/", - rtap.MonitorUser, rtap.MonitorPwd, - instance.Ip, instance.Port, + c.GeneralParam.RuntimeAccountParam.MonitorUser, c.GeneralParam.RuntimeAccountParam.MonitorPwd, + c.Params.IP, port, )) if err != nil { return err @@ -67,35 +66,33 @@ func createUserListBackupTable(instance *internal.InstanceInfo, rtap *components return nil } -func generateRuntimeConfigIns(mmp *MySQLMonitorParam, instance *internal.InstanceInfo, rtap *components.RuntimeAccountParam) (err error) { - if instance.BkInstanceId <= 0 { +func (c *MySQLMonitorComp) generateRuntimeConfigIns(port int, bkInstanceId int64) (err error) { + logDir := filepath.Join(cst.MySQLMonitorInstallPath, "logs") + + ac, err := c.authByMachineType() + if err != nil { + return err + } + + if bkInstanceId <= 0 { err = errors.Errorf( "%s:%d invalid bk_instance_id: %d", - instance.Ip, - instance.Port, - instance.BkInstanceId, + c.Params.IP, port, bkInstanceId, ) logger.Error(err.Error()) return err } - logDir := filepath.Join(cst.MySQLMonitorInstallPath, "logs") - - ac, err := authByMachineType(mmp.MachineType, rtap) - if err != nil { - return err - } - cfg := config.Config{ - BkBizId: instance.BkBizId, - Ip: instance.Ip, - Port: instance.Port, - BkInstanceId: instance.BkInstanceId, - ImmuteDomain: instance.ImmuteDomain, - MachineType: mmp.MachineType, - Role: &instance.Role, - BkCloudID: &mmp.BkCloudId, - DBModuleID: &instance.DBModuleId, + BkBizId: c.Params.BKBizId, + Ip: c.Params.IP, + Port: port, + BkInstanceId: bkInstanceId, + ImmuteDomain: c.Params.ImmuteDomain, + MachineType: c.Params.MachineType, + Role: &c.Params.Role, + BkCloudID: &c.Params.BkCloudId, + DBModuleID: &c.Params.DBModuleId, Log: &config.LogConfig{ Console: false, LogFileDir: &logDir, @@ -105,11 +102,11 @@ func generateRuntimeConfigIns(mmp *MySQLMonitorParam, instance *internal.Instanc }, ItemsConfigFile: filepath.Join( cst.MySQLMonitorInstallPath, - fmt.Sprintf("items-config_%d.yaml", instance.Port), + fmt.Sprintf("items-config_%d.yaml", port), ), Auth: *ac, - ApiUrl: mmp.ApiUrl, - DBASysDbs: mmp.SystemDbs, + ApiUrl: c.Params.ApiUrl, + DBASysDbs: c.Params.SystemDbs, InteractTimeout: 5 * time.Second, DefaultSchedule: "@every 1m", } @@ -122,34 +119,40 @@ func generateRuntimeConfigIns(mmp *MySQLMonitorParam, instance *internal.Instanc cfgFilePath := filepath.Join( filepath.Join(cst.MySQLMonitorInstallPath, - fmt.Sprintf("monitor-config_%d.yaml", instance.Port)), + fmt.Sprintf("monitor-config_%d.yaml", port)), ) - return internal.WriteConfig(cfgFilePath, b) + err = internal.WriteConfig(cfgFilePath, b) + if err != nil { + logger.Error(err.Error()) + return err + } + //} + return nil } -func authByMachineType(machineType string, rtap *components.RuntimeAccountParam) (ac *config.AuthCollect, err error) { - switch machineType { +func (c *MySQLMonitorComp) authByMachineType() (ac *config.AuthCollect, err error) { + switch c.Params.MachineType { case "proxy": ac = &config.AuthCollect{ Proxy: &config.ConnectAuth{ - User: rtap.MonitorAccessAllUser, - Password: rtap.MonitorAccessAllPwd, + User: c.GeneralParam.RuntimeAccountParam.MonitorAccessAllUser, + Password: c.GeneralParam.RuntimeAccountParam.MonitorAccessAllPwd, }, ProxyAdmin: &config.ConnectAuth{ - User: rtap.ProxyAdminUser, - Password: rtap.ProxyAdminPwd, + User: c.GeneralParam.RuntimeAccountParam.ProxyAdminUser, + Password: c.GeneralParam.RuntimeAccountParam.ProxyAdminUser, }, } case "backend", "single", "remote", "spider": ac = &config.AuthCollect{ Mysql: &config.ConnectAuth{ - User: rtap.MonitorUser, - Password: rtap.MonitorPwd, + User: c.GeneralParam.RuntimeAccountParam.MonitorUser, + Password: c.GeneralParam.RuntimeAccountParam.MonitorPwd, }, } default: - err = errors.Errorf("not support machine type: %s", machineType) + err = errors.Errorf("not support machine type: %s", c.Params.MachineType) logger.Error(err.Error()) return nil, err } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/prepare_binary.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/prepare_binary.go new file mode 100644 index 0000000000..50213aab3f --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/prepare_binary.go @@ -0,0 +1,81 @@ +package peripheraltools + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/checksum" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/crond" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dba_toolkit" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/dbbackup" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/monitor" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog" + + "github.com/pkg/errors" +) + +const ( + DepartDBAToolKit = "dba-toolkit" + DepartMySQLCrond = "mysql-crond" + DepartMySQLMonitor = "mysql-monitor" + DepartMySQLDBBackup = "mysql-dbbackup" + DepartMySQLRotateBinlog = "rotate-binlog" + DepartMySQLTableChecksum = "mysql-checksum" +) + +type PrepareBinary struct { + GeneralParam *components.GeneralParam `json:"general"` + Params *PrepareBinaryParams `json:"extend"` +} + +type PrepareBinaryParams struct { + /* + key 是 + MySQLCrond = EnumField("mysql-crond", _("mysql-rond")) + MySQLMonitor = EnumField("mysql-monitor", _("mysql-monitor")) + MySQLDBBackup = EnumField("mysql-dbbackup", _("mysql-dbbackup")) + MySQLRotateBinlog = EnumField("rotate-binlog", _("rotate-binlog")) + MySQLTableChecksum = EnumField("mysql-checksum", _("mysql-checksum")) + */ + Departs map[string]*components.Medium `json:"departs"` +} + +func (c *PrepareBinary) Run() (err error) { + for k, _ := range c.Params.Departs { + err = c.prepareOne(k) + if err != nil { + logger.Error(err.Error()) + return err + } + } + return nil +} + +func (c *PrepareBinary) prepareOne(depart string) (err error) { + switch depart { + case DepartMySQLCrond: + return crond.DeployBinary(c.Params.Departs[DepartMySQLCrond]) + case DepartMySQLMonitor: + return monitor.DeployBinary(c.Params.Departs[DepartMySQLMonitor]) + case DepartMySQLDBBackup: + return dbbackup.DeployBinary(c.Params.Departs[DepartMySQLDBBackup]) + case DepartMySQLRotateBinlog: + return rotatebinlog.DeployBinary(c.Params.Departs[DepartMySQLRotateBinlog]) + case DepartMySQLTableChecksum: + return checksum.DeployBinary(c.Params.Departs[DepartMySQLTableChecksum]) + case DepartDBAToolKit: + return dba_toolkit.DeployBinary(c.Params.Departs[DepartDBAToolKit]) + default: + err = errors.New("unknown depart " + depart) + logger.Error(err.Error()) + return err + } +} + +func (c *PrepareBinary) Example() interface{} { + return PrepareBinaryParams{Departs: map[string]*components.Medium{ + DepartMySQLCrond: { + Pkg: "mysql-crond", + PkgMd5: "12346", + }, + }} +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/binary.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/binary.go index b1262874f7..7b414909e0 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/binary.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/binary.go @@ -2,23 +2,25 @@ package rotatebinlog import ( "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" "fmt" "os" "path/filepath" ) -func (c *MySQLRotateBinlogComp) DeployBinary() (err error) { - err = os.MkdirAll(filepath.Join(c.installPath, "logs"), 0755) +func DeployBinary(medium *components.Medium) (err error) { + err = os.MkdirAll(filepath.Join(cst.MysqlRotateBinlogInstallPath, "logs"), 0755) if err != nil { - logger.Error("mkdir %s failed: %s", c.installPath, err.Error()) + logger.Error("mkdir %s failed: %s", cst.MysqlRotateBinlogInstallPath, err.Error()) return err } decompressCmd := fmt.Sprintf( `tar zxf %s -C %s`, - c.Params.Medium.GetAbsolutePath(), cst.MYSQL_TOOL_INSTALL_PATH, + medium.GetAbsolutePath(), cst.MYSQL_TOOL_INSTALL_PATH, ) _, err = osutil.ExecShellCommand(false, decompressCmd) if err != nil { @@ -26,10 +28,14 @@ func (c *MySQLRotateBinlogComp) DeployBinary() (err error) { return err } - chownCmd := fmt.Sprintf(`chown -R mysql.mysql %s && chmod +x %s`, c.installPath, c.binPath) + chownCmd := fmt.Sprintf( + `chown -R mysql.mysql %s && chmod +x %s`, + cst.MysqlRotateBinlogInstallPath, + filepath.Join(cst.MysqlRotateBinlogInstallPath, string(tools.ToolMysqlRotatebinlog)), + ) _, err = osutil.ExecShellCommand(false, chownCmd) if err != nil { - logger.Error("chown %s to mysql failed: %s", c.installPath, err.Error()) + logger.Error("chown %s to mysql failed: %s", cst.MysqlRotateBinlogInstallPath, err.Error()) return err } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/example.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/example.go index 01cf4c6205..1eb89de6d6 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/example.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/example.go @@ -24,10 +24,6 @@ func (c *MySQLRotateBinlogComp) Example() interface{} { }, }, Params: MySQLRotateBinlogParam{ - Medium: components.Medium{ - Pkg: "mysql-rotatebinlog.tar.gz", - PkgMd5: "12345", - }, Configs: rotate.Config{ Public: rotate.PublicCfg{ KeepPolicy: "most", @@ -54,15 +50,13 @@ func (c *MySQLRotateBinlogComp) Example() interface{} { "ibs": json.RawMessage(ibsExample), }, }, - Instances: []*rotate.ServerObj{ - { - Host: "1.1.1.1", Port: 3306, - Tags: rotate.InstanceMeta{ - BkBizId: 100, ClusterId: 10, ClusterDomain: "a.b.c", DBRole: "master", - }, - }, - }, - ExecUser: "sys", + IP: "127.0.0.1", + Ports: []int{20000, 20001}, + Role: "master", + BkBizId: 1, + ClusterDomain: "cluster.local", + ClusterId: 123, + ExecUser: "sys", }, } } diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/init.go index 6a667e18d5..8336c7503d 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/init.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/peripheraltools/rotatebinlog/init.go @@ -23,23 +23,43 @@ type MySQLRotateBinlogComp struct { } type MySQLRotateBinlogParam struct { - components.Medium - Configs rotate.Config `json:"configs" validate:"required"` - Instances []*rotate.ServerObj `json:"instances"` - ExecUser string `json:"exec_user"` + Configs rotate.Config `json:"configs" validate:"required"` + IP string `json:"ip"` + Ports []int `json:"port_list"` + Role string `json:"role"` + BkBizId int `json:"bk_biz_id"` + ClusterDomain string `json:"cluster_domain"` + ClusterId int `json:"cluster_id"` + ExecUser string `json:"exec_user"` } func (c *MySQLRotateBinlogComp) Init() (err error) { - c.Params.Configs.Servers = c.Params.Instances - for _, s := range c.Params.Configs.Servers { - s.Username = c.GeneralParam.RuntimeAccountParam.MonitorUser - s.Password = c.GeneralParam.RuntimeAccountParam.MonitorPwd + for _, port := range c.Params.Ports { + c.Params.Configs.Servers = append(c.Params.Configs.Servers, &rotate.ServerObj{ + Host: c.Params.IP, + Port: port, + Username: c.GeneralParam.RuntimeAccountParam.MonitorUser, + Password: c.GeneralParam.RuntimeAccountParam.MonitorPwd, + Socket: "", + Tags: rotate.InstanceMeta{ + BkBizId: c.Params.BkBizId, + ClusterId: c.Params.ClusterId, + ClusterDomain: c.Params.ClusterDomain, + DBRole: c.Params.Role, + }, + }) + var instObj = native.InsObject{ - Host: s.Host, Port: s.Port, User: s.Username, Pwd: s.Password, Socket: s.Socket, + Host: c.Params.IP, Port: port, + User: c.GeneralParam.RuntimeAccountParam.MonitorUser, Pwd: c.GeneralParam.RuntimeAccountParam.MonitorPwd, + Socket: "", } if dbw, err := instObj.Conn(); err != nil { - logger.Error("install mysql-rotatebinlog test connect failed: %s. instance:%+v", err.Error(), *s) - // return err + logger.Error( + "install mysql-rotatebinlog test connect failed: %s. instance:%s:%d", + err.Error(), c.Params.IP, port, + ) + } else { dbw.Stop() } @@ -55,9 +75,5 @@ func (c *MySQLRotateBinlogComp) Init() (err error) { } func (c *MySQLRotateBinlogComp) PreCheck() (err error) { - if err = c.Params.Medium.Check(); err != nil { - logger.Error("check mysql-rotatebinlog pkg failed: %s", err.Error()) - return err - } return nil } diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go index a3d764f6e6..17799d2226 100644 --- a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go @@ -63,7 +63,10 @@ func summary(stdout string) (summaries []ChecksumSummary, err error) { cs.Time, _ = strconv.Atoi(splitRow[6]) cs.Table = splitRow[7] - summaries = append(summaries, cs) + if cs.Errors != 0 || cs.Diffs != 0 || cs.Chunks == 0 || cs.Skipped != 0 { + summaries = append(summaries, cs) + } + } } return summaries, nil diff --git a/dbm-ui/backend/db_meta/api/cluster/tendbha/decommission.py b/dbm-ui/backend/db_meta/api/cluster/tendbha/decommission.py index 2e0eedc7b1..425a47200d 100644 --- a/dbm-ui/backend/db_meta/api/cluster/tendbha/decommission.py +++ b/dbm-ui/backend/db_meta/api/cluster/tendbha/decommission.py @@ -68,9 +68,13 @@ def decommission(cluster: Cluster): ClusterDBHAExt.objects.filter(cluster=cluster).delete() # 删除集群相关的配置模板 TendbOpenAreaConfig.objects.filter(source_cluster_id=cluster.id).delete() - DBPartitionApi.cluster_del_conf( - params={"cluster_type": cluster.cluster_type, "bk_biz_id": cluster.bk_biz_id, "cluster_ids": [cluster.id]} - ) + try: + DBPartitionApi.cluster_del_conf( + params={"cluster_type": cluster.cluster_type, "bk_biz_id": cluster.bk_biz_id, "cluster_ids": [cluster.id]} + ) + except Exception as e: # noqa + logger.error(e) + # 删除集群在bkcc对应的模块 # TODO CC 目前没有把主机移出当前模块的接口,主机还在模块下,无法删除 # cc_manage.delete_cluster_modules(db_type=DBType.MySQL.value, cluster=cluster) diff --git a/dbm-ui/backend/db_meta/enums/type_maps.py b/dbm-ui/backend/db_meta/enums/type_maps.py index 5cb66a9baa..21487604bb 100644 --- a/dbm-ui/backend/db_meta/enums/type_maps.py +++ b/dbm-ui/backend/db_meta/enums/type_maps.py @@ -126,7 +126,7 @@ AccessLayer.STORAGE: MachineType.BACKEND, }, ClusterType.TenDBCluster: { - AccessLayer.PROXY: MachineType.SINGLE, + AccessLayer.PROXY: MachineType.SPIDER, AccessLayer.STORAGE: MachineType.REMOTE, }, ClusterType.Riak: { diff --git a/dbm-ui/backend/db_meta/migrations/0043_auto_20241106_1704.py b/dbm-ui/backend/db_meta/migrations/0043_auto_20241106_1704.py new file mode 100644 index 0000000000..f4db403b47 --- /dev/null +++ b/dbm-ui/backend/db_meta/migrations/0043_auto_20241106_1704.py @@ -0,0 +1,42 @@ +# Generated by Django 3.2.25 on 2024-11-06 09:04 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("db_meta", "0042_auto_20240903_1138"), + ] + + operations = [ + migrations.AlterModelOptions( + name="bksubzone", + options={"verbose_name": "蓝鲸园区表(BKSubzone)", "verbose_name_plural": "蓝鲸园区表(BKSubzone)"}, + ), + migrations.AlterField( + model_name="spec", + name="cpu", + field=models.JSONField(help_text='cpu规格描述:{"min":1,"max":10}', null=True), + ), + migrations.AlterField( + model_name="spec", + name="device_class", + field=models.JSONField(help_text='实际机器机型: ["class1","class2"]', null=True), + ), + migrations.AlterField( + model_name="spec", + name="mem", + field=models.JSONField(help_text='mem规格描述:{"min":100,"max":1000}', null=True), + ), + migrations.AlterField( + model_name="spec", + name="qps", + field=models.JSONField(default=dict, help_text='qps规格描述:{"min": 1, "max": 100}'), + ), + migrations.AlterField( + model_name="spec", + name="storage_spec", + field=models.JSONField(help_text='存储磁盘需求配置:[{"mount_point":"/data","size":500,"type":"ssd"}]', null=True), + ), + ] diff --git a/dbm-ui/backend/db_meta/migrations/0046_merge_20250117_0804.py b/dbm-ui/backend/db_meta/migrations/0046_merge_20250117_0804.py new file mode 100644 index 0000000000..9ddcdfcede --- /dev/null +++ b/dbm-ui/backend/db_meta/migrations/0046_merge_20250117_0804.py @@ -0,0 +1,13 @@ +# Generated by Django 3.2.25 on 2025-01-17 00:04 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("db_meta", "0043_auto_20241106_1704"), + ("db_meta", "0045_alter_polarisentrydetail_polaris_name"), + ] + + operations = [] diff --git a/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/__init__.py b/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/__init__.py index 73d80986e6..cccfd9bee0 100644 --- a/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/__init__.py +++ b/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/__init__.py @@ -10,3 +10,4 @@ """ from .get_instance_admin_password import get_instance_admin_password from .list_instance_info import list_instance_info +from .list_instance_monitor_config import list_instance_monitor_config diff --git a/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_info.py b/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_info.py index bdc1f13917..aaefeffda7 100644 --- a/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_info.py +++ b/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_info.py @@ -59,6 +59,9 @@ def list_storageinstance_info(q: Q) -> List: "ip": i.machine.ip, "port": i.port, "immute_domain": i.cluster.all()[0].immute_domain, + "cluster_type": i.cluster_type, + "db_module_id": i.db_module_id, + "bk_instance_id": i.bk_instance_id, "phase": i.phase, "status": i.status, "access_layer": i.access_layer, @@ -91,6 +94,9 @@ def list_proxyinstance_info(q: Q) -> List: "ip": i.machine.ip, "port": i.port, "immute_domain": i.cluster.all()[0].immute_domain, + "cluster_type": i.cluster_type, + "db_module_id": i.db_module_id, + "bk_instance_id": i.bk_instance_id, "phase": i.phase, "status": i.status, "access_layer": i.access_layer, diff --git a/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_monitor_config.py b/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_monitor_config.py new file mode 100644 index 0000000000..22488bc135 --- /dev/null +++ b/dbm-ui/backend/db_proxy/reverse_api/mysql/impl/list_instance_monitor_config.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from typing import List, Optional + +from django.db.models import Q, QuerySet + +from backend.components import DBConfigApi +from backend.db_meta.enums import MachineType +from backend.db_meta.models import Machine, ProxyInstance, StorageInstance +from backend.flow.consts import SYSTEM_DBS + + +def list_instance_monitor_config(bk_cloud_id: int, ip: str, port_list: Optional[List[int]] = None) -> List[dict]: + m = Machine.objects.get(bk_cloud_id=bk_cloud_id, ip=ip) + + q = Q() + q |= Q(**{"machine": m}) + + if port_list: + q &= Q(**{"port__in": port_list}) + + res = [] + if m.machine_type in [MachineType.PROXY, MachineType.SPIDER]: + qs = ProxyInstance.objects.filter(q).prefetch_related("cluster", "machine") + res = generate_from_qs(bk_cloud_id=bk_cloud_id, qs=qs, has_role=False) + elif m.machine_type in [MachineType.BACKEND, MachineType.SINGLE, MachineType.REMOTE]: + qs = StorageInstance.objects.filter(q).prefetch_related("cluster", "machine") + res = generate_from_qs(bk_cloud_id=bk_cloud_id, qs=qs, has_role=True) + + # zip_str = zlib.compress(json.dumps(res).encode("utf-8")) + # print(len(zip_str), len(json.dumps(res).encode("utf-8"))) + + return res + + +def generate_from_qs(bk_cloud_id: int, qs: QuerySet, has_role: bool) -> List[dict]: + res = [] + for i in qs.all(): + cluster = i.cluster.all()[0] + role = "" + if has_role: + role = i.instance_inner_role + + res.append( + { + "system_dbs": SYSTEM_DBS, + "api_urs": "http://127.0.0.1:9999", + "machine_type": i.machine_type, + "bk_cloud_id": bk_cloud_id, + "bk_biz_id": i.bk_biz_id, + "ip": i.machine.ip, + "port": i.port, + "role": role, + "bk_instance_id": i.bk_instance_id, + "immute_domain": cluster.immute_domain, + "db_module_id": cluster.db_module_id, + "cluster_id": cluster.id, + "items_config": DBConfigApi.query_conf_item( + { + "bk_biz_id": f"{cluster.bk_cloud_id}", + "level_name": "cluster", + "level_value": cluster.immute_domain, + "conf_file": "items-config.yaml", + "conf_type": "mysql_monitor", + "namespace": cluster.cluster_type, + "level_info": {"module": f"{cluster.db_module_id}"}, + "format": "map", + } + )["content"], + } + ) + + return res diff --git a/dbm-ui/backend/db_proxy/reverse_api/mysql/views.py b/dbm-ui/backend/db_proxy/reverse_api/mysql/views.py index 85e73383bb..8efe4c73f4 100644 --- a/dbm-ui/backend/db_proxy/reverse_api/mysql/views.py +++ b/dbm-ui/backend/db_proxy/reverse_api/mysql/views.py @@ -16,7 +16,7 @@ from backend.bk_web.swagger import common_swagger_auto_schema from backend.db_proxy.reverse_api.base_reverse_api_view import BaseReverseApiView from backend.db_proxy.reverse_api.decorators import reverse_api -from backend.db_proxy.reverse_api.mysql.impl import list_instance_info +from backend.db_proxy.reverse_api.mysql.impl import list_instance_info, list_instance_monitor_config logger = logging.getLogger("root") @@ -38,3 +38,20 @@ def list_instance_info(self, request, *args, **kwargs): "errors": None, } ) + + @common_swagger_auto_schema(operation_summary=_("生成监控配置")) + @reverse_api(url_path="list_instance_monitor_config") + def list_instance_monitor_config(self, request, *args, **kwargs): + bk_cloud_id, ip, port_list = self.get_api_params() + logger.info(f"bk_cloud_id: {bk_cloud_id}, ip: {ip}, port:{port_list}") + res = list_instance_monitor_config(bk_cloud_id=bk_cloud_id, ip=ip, port_list=port_list) + logger.info(f"instance monitor config: {res}") + return JsonResponse( + { + "result": True, + "code": 0, + "data": res, + "message": "", + "errors": None, + } + ) diff --git a/dbm-ui/backend/db_services/meta_import/serializers.py b/dbm-ui/backend/db_services/meta_import/serializers.py index 743cd1dc29..3960df87ad 100644 --- a/dbm-ui/backend/db_services/meta_import/serializers.py +++ b/dbm-ui/backend/db_services/meta_import/serializers.py @@ -15,6 +15,7 @@ from backend.db_meta.enums import ClusterType from backend.db_meta.enums.spec import SpecClusterType, SpecMachineType from backend.db_meta.models import AppCache, DBModule, Spec +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import ALLDEPARTS class CustomChoiceField(serializers.ChoiceField): @@ -68,20 +69,21 @@ def validate(self, attrs): return attrs -class TenDBHAStandardizeSerializer(serializers.Serializer): - bk_biz_id = BizChoiceField(help_text=_("业务ID")) - file = serializers.FileField(help_text=_("域名列表文件")) - - def validate(self, attrs): - return attrs - - -class TenDBClusterStandardizeSerializer(serializers.Serializer): +class MySQLClusterStandardizeSerializer(serializers.Serializer): bk_biz_id = BizChoiceField(help_text=_("业务ID")) + cluster_type = serializers.ChoiceField( + choices=[ + (ClusterType.TenDBSingle.value, ClusterType.TenDBSingle.name), + (ClusterType.TenDBHA.value, ClusterType.TenDBHA.name), + (ClusterType.TenDBCluster.value, ClusterType.TenDBCluster.name), + ] + ) + # immute_domains = serializers.CharField(allow_null=False, allow_blank=False) file = serializers.FileField(help_text=_("域名列表文件")) - - def validate(self, attrs): - return attrs + departs = serializers.MultipleChoiceField(choices=[(c.value, c.name) for c in ALLDEPARTS], default=ALLDEPARTS) + deploy_binary = serializers.BooleanField(default=True) + push_config = serializers.BooleanField(default=True) + collect_sysinfo = serializers.BooleanField(default=True) class TenDBClusterMetadataImportSerializer(serializers.Serializer): @@ -126,11 +128,3 @@ class TenDBSingleMetadataImportSerializer(serializers.Serializer): def validate(self, attrs): return attrs - - -class TenDBSingleStandardizeSerializer(serializers.Serializer): - bk_biz_id = BizChoiceField(help_text=_("业务ID")) - file = serializers.FileField(help_text=_("域名列表文件")) - - def validate(self, attrs): - return attrs diff --git a/dbm-ui/backend/db_services/meta_import/views.py b/dbm-ui/backend/db_services/meta_import/views.py index 294ceb61eb..392c90e6a8 100644 --- a/dbm-ui/backend/db_services/meta_import/views.py +++ b/dbm-ui/backend/db_services/meta_import/views.py @@ -25,22 +25,17 @@ from backend.db_meta.models import Cluster from backend.db_services.meta_import.constants import SWAGGER_TAG from backend.db_services.meta_import.serializers import ( + MySQLClusterStandardizeSerializer, TenDBClusterAppendCTLSerializer, TenDBClusterMetadataImportSerializer, - TenDBClusterStandardizeSerializer, TenDBHAMetadataImportSerializer, - TenDBHAStandardizeSerializer, TenDBSingleMetadataImportSerializer, - TenDBSingleStandardizeSerializer, ) from backend.iam_app.handlers.drf_perm.base import RejectPermission from backend.ticket.builders.mysql.mysql_ha_metadata_import import TenDBHAMetadataImportDetailSerializer -from backend.ticket.builders.mysql.mysql_ha_standardize import TenDBHAStandardizeDetailSerializer from backend.ticket.builders.spider.metadata_import import TenDBClusterMetadataImportDetailSerializer -from backend.ticket.builders.spider.mysql_spider_standardize import TenDBClusterStandardizeDetailSerializer from backend.ticket.builders.tendbcluster.append_deploy_ctl import TenDBClusterAppendDeployCTLDetailSerializer from backend.ticket.builders.tendbsingle.metadata_import import TenDBSingleMetadataImportDetailSerializer -from backend.ticket.builders.tendbsingle.standardize import TenDBSingleStandardizeDetailSerializer from backend.ticket.constants import TicketType from backend.ticket.models import Ticket @@ -48,7 +43,6 @@ class DBMetadataImportViewSet(viewsets.SystemViewSet): - pagination_class = None def _get_custom_permissions(self): @@ -82,52 +76,52 @@ def tendbha_metadata_import(self, request, *args, **kwargs): ) return Response(data) - @common_swagger_auto_schema( - operation_summary=_("TenDB HA 标准化接入"), - tags=[SWAGGER_TAG], - ) - @action( - methods=["POST"], - detail=False, - serializer_class=TenDBHAStandardizeSerializer, - parser_classes=[MultiPartParser], - ) - def tendbha_standardize(self, request, *args, **kwargs): - data = self.params_validate(self.get_serializer_class()) - - domain_list = [] - for line in data.pop("file").readlines(): - domain_list.append(line.decode("utf-8").strip().rstrip(".")) - - cluster_ids = list( - Cluster.objects.filter(immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value).values_list( - "id", flat=True - ) - ) - logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) - - exists_domains = list( - Cluster.objects.filter(immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value).values_list( - "immute_domain", flat=True - ) - ) - diff = list(set(domain_list) - set(exists_domains)) - if diff: - raise serializers.ValidationError(_("cluster {} not found".format(diff))) - - data["cluster_ids"] = cluster_ids - - data["infos"] = {"cluster_ids": data["cluster_ids"]} - # 创建标准化ticket - TenDBHAStandardizeDetailSerializer(data=data).is_valid(raise_exception=True) - Ticket.create_ticket( - ticket_type=TicketType.MYSQL_HA_STANDARDIZE, - creator=request.user.username, - bk_biz_id=data["bk_biz_id"], - remark=self.tendbha_standardize.__name__, - details=data, - ) - return Response(data) + # @common_swagger_auto_schema( + # operation_summary=_("TenDB HA 标准化接入"), + # tags=[SWAGGER_TAG], + # ) + # @action( + # methods=["POST"], + # detail=False, + # serializer_class=TenDBHAStandardizeSerializer, + # parser_classes=[MultiPartParser], + # ) + # def tendbha_standardize(self, request, *args, **kwargs): + # data = self.params_validate(self.get_serializer_class()) + # + # domain_list = [] + # for line in data.pop("file").readlines(): + # domain_list.append(line.decode("utf-8").strip().rstrip(".")) + # + # cluster_ids = list( + # Cluster.objects.filter(immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value).values_list( + # "id", flat=True + # ) + # ) + # logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) + # + # exists_domains = list( + # Cluster.objects.filter(immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value).values_list( + # "immute_domain", flat=True + # ) + # ) + # diff = list(set(domain_list) - set(exists_domains)) + # if diff: + # raise serializers.ValidationError(_("cluster {} not found".format(diff))) + # + # data["cluster_ids"] = cluster_ids + # + # data["infos"] = {"cluster_ids": data["cluster_ids"]} + # # 创建标准化ticket + # TenDBHAStandardizeDetailSerializer(data=data).is_valid(raise_exception=True) + # Ticket.create_ticket( + # ticket_type=TicketType.MYSQL_HA_STANDARDIZE, + # creator=request.user.username, + # bk_biz_id=data["bk_biz_id"], + # remark=self.tendbha_standardize.__name__, + # details=data, + # ) + # return Response(data) @common_swagger_auto_schema( operation_summary=_("TenDB Cluster 元数据导入"), @@ -152,52 +146,52 @@ def tendbcluster_metadata_import(self, request, *args, **kwargs): ) return Response(data) - @common_swagger_auto_schema( - operation_summary=_("TenDB Cluster 集群标准化"), - tags=[SWAGGER_TAG], - ) - @action( - methods=["POST"], - detail=False, - serializer_class=TenDBClusterStandardizeSerializer, - parser_classes=[MultiPartParser], - ) - def tendbcluster_standardize(self, request, *args, **kwargs): - data = self.params_validate(self.get_serializer_class()) - - domain_list = [] - for line in data.pop("file").readlines(): - domain_list.append(line.decode("utf-8").strip().rstrip(".")) - - cluster_ids = list( - Cluster.objects.filter( - bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value - ).values_list("id", flat=True) - ) - logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) - - exists_domains = list( - Cluster.objects.filter( - bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value - ).values_list("immute_domain", flat=True) - ) - diff = list(set(domain_list) - set(exists_domains)) - if diff: - raise serializers.ValidationError(_("cluster {} not found".format(diff))) - - data["cluster_ids"] = cluster_ids - - data["infos"] = {"cluster_ids": data["cluster_ids"]} - # 创建标准化ticket - TenDBClusterStandardizeDetailSerializer(data=data).is_valid(raise_exception=True) - Ticket.create_ticket( - ticket_type=TicketType.TENDBCLUSTER_STANDARDIZE, - creator=request.user.username, - bk_biz_id=data["bk_biz_id"], - remark=self.tendbcluster_standardize.__name__, - details=data, - ) - return Response(data) + # @common_swagger_auto_schema( + # operation_summary=_("TenDB Cluster 集群标准化"), + # tags=[SWAGGER_TAG], + # ) + # @action( + # methods=["POST"], + # detail=False, + # serializer_class=TenDBClusterStandardizeSerializer, + # parser_classes=[MultiPartParser], + # ) + # def tendbcluster_standardize(self, request, *args, **kwargs): + # data = self.params_validate(self.get_serializer_class()) + # + # domain_list = [] + # for line in data.pop("file").readlines(): + # domain_list.append(line.decode("utf-8").strip().rstrip(".")) + # + # cluster_ids = list( + # Cluster.objects.filter( + # bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value + # ).values_list("id", flat=True) + # ) + # logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) + # + # exists_domains = list( + # Cluster.objects.filter( + # bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value + # ).values_list("immute_domain", flat=True) + # ) + # diff = list(set(domain_list) - set(exists_domains)) + # if diff: + # raise serializers.ValidationError(_("cluster {} not found".format(diff))) + # + # data["cluster_ids"] = cluster_ids + # + # data["infos"] = {"cluster_ids": data["cluster_ids"]} + # # 创建标准化ticket + # TenDBClusterStandardizeDetailSerializer(data=data).is_valid(raise_exception=True) + # Ticket.create_ticket( + # ticket_type=TicketType.TENDBCLUSTER_STANDARDIZE, + # creator=request.user.username, + # bk_biz_id=data["bk_biz_id"], + # remark=self.tendbcluster_standardize.__name__, + # details=data, + # ) + # return Response(data) @common_swagger_auto_schema( operation_summary=_("TenDB Cluster 追加部署中控"), @@ -269,48 +263,54 @@ def tendbsingle_metadata_import(self, request, *args, **kwargs): return Response(data) @common_swagger_auto_schema( - operation_summary=_("TenDB Single 集群标准化"), + operation_summary=_("MySQL 集群标准化"), tags=[SWAGGER_TAG], ) @action( methods=["POST"], detail=False, - serializer_class=TenDBSingleStandardizeSerializer, + serializer_class=MySQLClusterStandardizeSerializer, parser_classes=[MultiPartParser], ) - def tendbsingle_standardize(self, request, *args, **kwargs): + def mysql_standardize(self, request, *args, **kwargs): data = self.params_validate(self.get_serializer_class()) + # domain_list = [line.strip() for line in data["immute_domains"].splitlines()] domain_list = [] for line in data.pop("file").readlines(): domain_list.append(line.decode("utf-8").strip().rstrip(".")) cluster_ids = list( - Cluster.objects.filter( - immute_domain__in=domain_list, cluster_type=ClusterType.TenDBSingle.value - ).values_list("id", flat=True) + Cluster.objects.filter(immute_domain__in=domain_list, cluster_type=data["cluster_type"]).values_list( + "id", flat=True + ) ) logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) - + logger.info("departs: {}", data["departs"]) + logger.info("cluster_type: {}", data["cluster_type"]) exists_domains = list( Cluster.objects.filter( - immute_domain__in=domain_list, cluster_type=ClusterType.TenDBSingle.value + immute_domain__in=domain_list, cluster_type=data["cluster_type"], bk_biz_id=data["bk_biz_id"] ).values_list("immute_domain", flat=True) ) diff = list(set(domain_list) - set(exists_domains)) if diff: raise serializers.ValidationError(_("cluster {} not found".format(diff))) - data["cluster_ids"] = cluster_ids - - data["infos"] = {"cluster_ids": data["cluster_ids"]} - - TenDBSingleStandardizeDetailSerializer(data=data).is_valid(raise_exception=True) + body = { + "bk_biz_id": data["bk_biz_id"], + "cluster_type": data["cluster_type"], + "cluster_ids": cluster_ids, + "departs": list(data["departs"]), + "with_deploy_binary": data["deploy_binary"], + "with_push_config": data["push_config"], + "with_collect_sysinfo": data["collect_sysinfo"], + } Ticket.create_ticket( - ticket_type=TicketType.TENDBSINGLE_STANDARDIZE, + ticket_type=TicketType.MYSQL_CLUSTER_STANDARDIZE, creator=request.user.username, bk_biz_id=data["bk_biz_id"], - remark=self.tendbsingle_standardize.__name__, - details=data, + remark=self.mysql_standardize.__name__, + details=body, ) - return Response(data) + return Response(body) diff --git a/dbm-ui/backend/flow/consts.py b/dbm-ui/backend/flow/consts.py index 9f1e5b0b58..67db9c23de 100644 --- a/dbm-ui/backend/flow/consts.py +++ b/dbm-ui/backend/flow/consts.py @@ -450,12 +450,16 @@ class DBActuatorActionEnum(str, StructuredEnum): CreateToDBViaCtl = EnumField("create-to-db-via-ctl", _("重命名在中控建立目标库")) RenamePreDropToOnRemote = EnumField("rename-pre-drop-to-on-remote", _("TenDBCluster 重命名在remote预清理目标库")) RenameDropFromViaCtl = EnumField("rename-drop-from-via-ctl", _("TenDBCluster 重命名在中控删除源库")) - PushMySQLCrondConfig = EnumField("push-mysql-crond-config", _("推送mysql-crond配置")) + PushMySQLMonitorConfig = EnumField("push-mysql-monitor-config", _("推送mysql-monitor配置")) PushChecksumConfig = EnumField("push-checksum-config", _("推送mysql-table-checksum配置")) PushNewDbBackupConfig = EnumField("push-new-db-backup-config", _("推送备份配置")) PushMySQLRotatebinlogConfig = EnumField("push-mysql-rotatebinlog-config", _("推送rotatebinlog配置")) ChangeServerId = EnumField("change-server-id", _("change-server-id")) + # --- + PushExporterCnf = EnumField("push-exporter-cnf", _("push-exporter-cnf")) + PreparePeripheraltoolsBinary = EnumField("prepare-peripheraltools-binary", _("prepare-peripheraltools-binary")) + PushMySQLCrondConfig = EnumField("push-mysql-crond-config", _("推送mysql-crond配置")) class RedisActuatorActionEnum(str, StructuredEnum): diff --git a/dbm-ui/backend/flow/views/mysql_push_peripheral_config.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/__init__.py similarity index 54% rename from dbm-ui/backend/flow/views/mysql_push_peripheral_config.py rename to dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/__init__.py index cddcbd4e6c..aa5085c628 100644 --- a/dbm-ui/backend/flow/views/mysql_push_peripheral_config.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/__init__.py @@ -8,23 +8,3 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import logging - -from django.utils.translation import ugettext as _ - -from backend.flow.engine.controller.mysql import MySQLController -from backend.flow.views.base import FlowTestView -from backend.utils.basic import generate_root_id - -logger = logging.getLogger("root") - - -class MySQLPushPeripheralConfigView(FlowTestView): - @staticmethod - def post(request): - logger.info(_("开始下发周边配置场景")) - root_id = generate_root_id() - logger.info("define root_id: {}".format(root_id)) - - c = MySQLController(root_id=root_id, ticket_data=request.data) - c.push_peripheral_config_scene() diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/cc_trans_module.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/cc_trans_module.py new file mode 100644 index 0000000000..6e12ef999e --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/cc_trans_module.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from dataclasses import asdict +from typing import Dict, List + +from bamboo_engine.builder import SubProcess +from django.utils.translation import ugettext as _ + +from backend.db_meta.enums import AccessLayer, ClusterMachineAccessTypeDefine, ClusterType +from backend.db_meta.models import Cluster +from backend.flow.consts import DBA_ROOT_USER +from backend.flow.engine.bamboo.scene.common.builder import SubBuilder +from backend.flow.plugins.components.collections.mysql.cluster_standardize_trans_module import ( + ClusterStandardizeTransModuleComponent, +) +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.utils.mysql.mysql_act_dataclass import ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload + + +def cc_trans_module( + root_id: str, data: Dict, cluster_type: ClusterType, cluster_objects: List[Cluster], proxy_group, storage_group +) -> SubProcess: + """ + 1. 按实例下发 exporter 配置 + 2. cc 模块移动 + """ + sp = SubBuilder(root_id=root_id, data=data) + + sub_flow_list = [ + push_exporter_cnf( + root_id=root_id, + data=data, + cloud_ip_group=storage_group, + machine_type=ClusterMachineAccessTypeDefine[cluster_type][AccessLayer.STORAGE], + ) + ] + if cluster_type != ClusterType.TenDBSingle: + sub_flow_list.append( + push_exporter_cnf( + root_id=root_id, + data=data, + cloud_ip_group=proxy_group, + machine_type=ClusterMachineAccessTypeDefine[cluster_type][AccessLayer.PROXY], + ) + ) + + sp.add_parallel_sub_pipeline(sub_flow_list=sub_flow_list) + + acts = [] + for cluster_obj in cluster_objects: + acts.append( + { + "act_name": _("{} CC 标准化".format(cluster_obj.immute_domain)), + "act_component_code": ClusterStandardizeTransModuleComponent.code, + "kwargs": { + "cluster_id": cluster_obj.id, + }, + } + ) + + sp.add_parallel_acts(acts_list=acts) + + return sp.build_sub_process(sub_name=_("CC 标准化")) + + +def push_exporter_cnf(root_id: str, data: Dict, cloud_ip_group, machine_type): + acts = [] + for bk_cloud_id, ip_dicts in cloud_ip_group.items(): + for ip, port_list in ip_dicts.items(): + acts.append( + { + "act_name": _(f"{ip}:{port_list}"), + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.push_exporter_cnf.__name__, + cluster={"port_list": port_list, "machine_type": machine_type}, + bk_cloud_id=bk_cloud_id, + ) + ), + } + ) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_acts(acts_list=acts) + return sp.build_sub_process(sub_name=_("{} 生成 exporter 配置".format(machine_type))) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/collect_sysinfo.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/collect_sysinfo.py new file mode 100644 index 0000000000..b8b40abbc0 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/collect_sysinfo.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from typing import Dict + +from bamboo_engine.builder import SubProcess +from django.utils.translation import ugettext as _ + +from backend.flow.engine.bamboo.scene.common.builder import SubBuilder +from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import update_machine_system_info_flow + + +def collect_sysinfo(root_id: str, data: Dict, proxy_group, storage_group) -> SubProcess: + """ + 牵扯到上下文嵌套, 标准化流程不合适搞这个 + """ + pipes = [] + for bk_cloud_id, ip_dicts in { + k: {**proxy_group[k], **storage_group[k]} for k in set(list(proxy_group.keys()) + list(storage_group.keys())) + }.items(): + ips = list(ip_dicts.keys()) + pipes.append( + update_machine_system_info_flow( + root_id=root_id, bk_cloud_id=bk_cloud_id, parent_global_data=data, ip_list=ips + ) + ) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_sub_pipeline(sub_flow_list=pipes) + return sp.build_sub_process(sub_name=_("收集系统信息")) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/departs.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/departs.py new file mode 100644 index 0000000000..822cab1cc3 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/departs.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from typing import List + +from django.utils.translation import ugettext as _ + +from blue_krill.data_types.enum import EnumField, StructuredEnum + + +class DeployPeripheralToolsDepart(str, StructuredEnum): + BackupClient = EnumField("backup-client", _("backup-client")) + MySQLDBBackup = EnumField("mysql-dbbackup", _("mysql-dbbackup")) + # 下面这些要保证和介质命名一致 + DBAToolKit = EnumField("dba-toolkit", _("dba-toolkit")) + MySQLCrond = EnumField("mysql-crond", _("mysql-rond")) + MySQLMonitor = EnumField("mysql-monitor", _("mysql-monitor")) + MySQLRotateBinlog = EnumField("rotate-binlog", _("rotate-binlog")) + MySQLTableChecksum = EnumField("mysql-checksum", _("mysql-checksum")) + + +ALLDEPARTS = [ + DeployPeripheralToolsDepart.BackupClient, + DeployPeripheralToolsDepart.MySQLDBBackup, + DeployPeripheralToolsDepart.DBAToolKit, + DeployPeripheralToolsDepart.MySQLCrond, + DeployPeripheralToolsDepart.MySQLMonitor, + DeployPeripheralToolsDepart.MySQLRotateBinlog, + DeployPeripheralToolsDepart.MySQLTableChecksum, +] + + +def remove_depart(d: DeployPeripheralToolsDepart, departs: List[DeployPeripheralToolsDepart]): + if d in departs: + departs.remove(d) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/flow.py new file mode 100644 index 0000000000..b01b805543 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/flow.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging +from copy import deepcopy +from typing import Dict, Optional + +from django.utils.translation import ugettext as _ + +from backend.flow.engine.bamboo.scene.common.builder import Builder +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.subflow import standardize_mysql_cluster_subflow +from backend.flow.utils.mysql.mysql_context_dataclass import SystemInfoContext + +logger = logging.getLogger("flow") + + +class MySQLStandardizeFlow(object): + def __init__(self, root_id: str, data: Optional[Dict]): + self.root_id = root_id + self.data = deepcopy(data) + + def doit(self): + bk_biz_id = self.data.get("bk_biz_id") + cluster_type = self.data.get("cluster_type") + cluster_ids = list(set(self.data.get("cluster_ids"))) + departs = self.data.get("departs") + with_deploy_binary = self.data.get("with_deploy_binary") + with_push_config = self.data.get("with_push_config") + with_collect_sysinfo = self.data.get("with_collect_sysinfo") + + pipe = Builder( + root_id=self.root_id, + data=self.data, + need_random_pass_cluster_ids=cluster_ids, + ) + + pipe.add_sub_pipeline( + sub_flow=standardize_mysql_cluster_subflow( + root_id=self.root_id, + data=self.data, + bk_biz_id=bk_biz_id, + cluster_type=cluster_type, + cluster_ids=cluster_ids, + departs=departs, + with_deploy_binary=with_deploy_binary, + with_push_config=with_push_config, + with_collect_sysinfo=with_collect_sysinfo, + ) + ) + + logger.info(_("构建MySQL标准化流程成功")) + pipe.run_pipeline(is_drop_random_user=True, init_trans_data_class=SystemInfoContext()) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/group_ips.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/group_ips.py new file mode 100644 index 0000000000..3209fe79bf --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/group_ips.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from collections import defaultdict +from typing import List, Tuple + +from backend.db_meta.models import Cluster + + +def group_ips(cluster_objects: List[Cluster]) -> Tuple: + """ + 聚合集群 ip + proxy_group: { + bk_cloud_id(int): { + ip(str): [ports] + } + } + storage_group: { + bk_cloud_id(int): { + ip(str): [ports] + } + } + """ + + proxy_group = defaultdict(lambda: defaultdict(list)) + storage_group = defaultdict(lambda: defaultdict(list)) + + for cluster in cluster_objects: + cluster.proxyinstance_set.values("machine__ip") + for i in cluster.proxyinstance_set.all(): + ip = i.machine.ip + bk_cloud_id = i.machine.bk_cloud_id + proxy_group[bk_cloud_id][ip].append(i.port) + for i in cluster.storageinstance_set.all(): + ip = i.machine.ip + bk_cloud_id = i.machine.bk_cloud_id + storage_group[bk_cloud_id][ip].append(i.port) + + return proxy_group, storage_group diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/instance_standardize.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/instance_standardize.py new file mode 100644 index 0000000000..b8bf129b04 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/instance_standardize.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from dataclasses import asdict +from typing import Dict, List + +from bamboo_engine.builder import SubProcess +from django.utils.translation import ugettext as _ + +from backend.db_meta.enums import AccessLayer, ClusterMachineAccessTypeDefine, ClusterType, MachineType +from backend.flow.consts import DBA_ROOT_USER +from backend.flow.engine.bamboo.scene.common.builder import SubBuilder +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.utils.mysql.mysql_act_dataclass import ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload + + +def instance_standardize( + root_id: str, data: Dict, cluster_type: ClusterType, proxy_group, storage_group +) -> SubProcess: + acts = make_mysql_standardize_acts( + storage_group, machine_type=ClusterMachineAccessTypeDefine[cluster_type][AccessLayer.STORAGE] + ) + + if cluster_type == ClusterType.TenDBCluster: + acts.extend(make_mysql_standardize_acts(ip_group=proxy_group, machine_type=MachineType.SPIDER)) + elif cluster_type == ClusterType.TenDBHA: + acts.extend(make_proxy_standardize_acts(proxy_group)) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_acts(acts_list=acts) + return sp.build_sub_process(sub_name=_("实例标准化")) + + +def make_proxy_standardize_acts(ip_group) -> List: + acts = [] + for bk_cloud_id, ip_dicts in ip_group.items(): + for ip, port_list in ip_dicts.items(): + acts.append( + { + "act_name": _(f"{ip}"), + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.standardize_proxy.__name__, + cluster={"port_list": port_list}, + bk_cloud_id=bk_cloud_id, + ) + ), + } + ) + return acts + + +def make_mysql_standardize_acts(ip_group, machine_type: MachineType) -> List: + acts = [] + for bk_cloud_id, ip_group in ip_group.items(): + for ip, port_list in ip_group.items(): + acts.append( + { + "act_name": _(f"{ip}"), + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.standardize_mysql.__name__, + cluster={"port_list": port_list, "machine_type": machine_type}, + bk_cloud_id=bk_cloud_id, + ) + ), + } + ) + return acts diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/prepare_departs_binary.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/prepare_departs_binary.py new file mode 100644 index 0000000000..76b7ba4c03 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/prepare_departs_binary.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from copy import deepcopy +from dataclasses import asdict +from typing import Dict, List + +from bamboo_engine.builder import SubProcess +from django.utils.translation import ugettext as _ + +from backend.configuration.constants import DBType +from backend.db_meta.enums import AccessLayer, ClusterMachineAccessTypeDefine, ClusterType, MachineType +from backend.flow.consts import DBA_ROOT_USER +from backend.flow.engine.bamboo.scene.common.builder import SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import ( + DeployPeripheralToolsDepart, + remove_depart, +) +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent +from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload + + +def prepare_departs_binary( + root_id: str, + data: Dict, + cluster_type: ClusterType, + departs: List[DeployPeripheralToolsDepart], + proxy_cloud_ip_list: Dict[int, List[str]], + storage_cloud_ip_list: Dict[int, List[str]], +) -> SubProcess: + """ + { + 0: [1.1.1.1, 2.2.2.2], 云区域对应 ip + 1: [11.11.11] + } + """ + sp = SubBuilder(root_id=root_id, data=data) + + # 周边工具下发放在这里可以增加这个 subflow 的独立性 + acts = [] + for bk_cloud_id, ips in proxy_cloud_ip_list.items(): + acts.append( + { + "act_name": _("下发 MySQL 周边程序介质"), + "act_component_code": TransFileComponent.code, + "kwargs": asdict( + DownloadMediaKwargs( + bk_cloud_id=bk_cloud_id, + exec_ip=ips, + file_list=GetFileList(db_type=DBType.MySQL).get_mysql_surrounding_apps_package(), + ) + ), + } + ) + # for bk_cloud_id, ips in storage_cloud_ip_list.items(): + # acts.append( + # { + # "act_name": _("下发 actuator"), + # "act_component_code": TransFileComponent.code, + # "kwargs": asdict( + # DownloadMediaKwargs( + # bk_cloud_id=bk_cloud_id, + # exec_ip=ips, + # file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + # ) + # ), + # }) + sp.add_parallel_acts(acts_list=acts) + + acts = make_prepare_departs_binary_acts( + machine_type=ClusterMachineAccessTypeDefine[cluster_type][AccessLayer.STORAGE], + departs=departs, + cloud_ip_list=storage_cloud_ip_list, + ) + + if cluster_type != ClusterType.TenDBSingle: + departs_on_proxy = deepcopy(departs) + remove_depart(DeployPeripheralToolsDepart.MySQLTableChecksum, departs_on_proxy) + + if cluster_type == ClusterType.TenDBHA: + remove_depart(DeployPeripheralToolsDepart.MySQLRotateBinlog, departs_on_proxy) + remove_depart(DeployPeripheralToolsDepart.MySQLDBBackup, departs_on_proxy) + + acts.extend( + make_prepare_departs_binary_acts( + machine_type=ClusterMachineAccessTypeDefine[cluster_type][AccessLayer.PROXY], + departs=departs_on_proxy, + cloud_ip_list=proxy_cloud_ip_list, + ) + ) + + sp.add_parallel_acts(acts_list=acts) + return sp.build_sub_process(sub_name=_("准备周边组件二进制")) + + +def make_prepare_departs_binary_acts( + machine_type: MachineType, departs: List[DeployPeripheralToolsDepart], cloud_ip_list +) -> List[Dict]: + acts = [] + for bk_cloud_id, ip_list in cloud_ip_list.items(): + for ip in ip_list: + acts.append( + { + "act_name": _(f"{ip}"), + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.prepare_peripheraltools_binary.__name__, + cluster={"departs": departs, "machine_type": machine_type}, + bk_cloud_id=bk_cloud_id, + ) + ), + } + ) + + return acts diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/push_config.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/push_config.py new file mode 100644 index 0000000000..b1a2649b20 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/push_config.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from collections import defaultdict +from copy import deepcopy +from dataclasses import asdict +from typing import Dict, List + +from bamboo_engine.builder import SubProcess +from django.utils.translation import ugettext as _ + +from backend.db_meta.enums import AccessLayer, ClusterMachineAccessTypeDefine, ClusterType, MachineType +from backend.db_meta.models import Cluster +from backend.flow.consts import DBA_ROOT_USER +from backend.flow.engine.bamboo.scene.common.builder import SubBuilder +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import ( + DeployPeripheralToolsDepart, + remove_depart, +) +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.utils.mysql.mysql_act_dataclass import ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload + + +def push_mysql_crond_config( + root_id: str, data: Dict, bk_biz_id: int, proxy_group: Dict, storage_group: Dict +) -> SubProcess: + """ + 按机器独立推送 mysql-crond 配置 + """ + acts = [] + for bk_cloud_id, ip_dicts in { + k: {**proxy_group[k], **storage_group[k]} for k in set(list(proxy_group.keys()) + list(storage_group.keys())) + }.items(): + ips = list(ip_dicts.keys()) + for ip in ips: + acts.append( + { + "act_name": _(f"{ip}"), + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.push_mysql_crond_config.__name__, + cluster={"bk_biz_id": bk_biz_id}, + bk_cloud_id=bk_cloud_id, + ) + ), + } + ) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_acts(acts_list=acts) + return sp.build_sub_process(sub_name=_("推送 mysql-crond 配置")) + + +def push_departs_config( + root_id: str, + data: Dict, + cluster_objects: List[Cluster], + departs: List[DeployPeripheralToolsDepart], +) -> SubProcess: + """ + 按集群推送配置 + """ + pipes = [] + for cluster_obj in cluster_objects: + pipes.append( + push_departs_config_for_cluster(root_id=root_id, data=data, cluster_obj=cluster_obj, departs=departs) + ) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_sub_pipeline(sub_flow_list=pipes) + return sp.build_sub_process(sub_name=_("推送周边工具配置")) + + +def push_departs_config_for_cluster( + root_id: str, + data: Dict, + cluster_obj: Cluster, + departs: List[DeployPeripheralToolsDepart], +) -> SubProcess: + """ + 集群内同机器上的多实例按机器推送 + """ + # 聚合机器端口 + proxy_ip_ports = defaultdict(list) + storage_ip_ports = defaultdict(list) + for i in cluster_obj.proxyinstance_set.all(): + proxy_ip_ports[i.machine.ip].append(i.port) + for i in cluster_obj.storageinstance_set.all(): + storage_ip_ports[i.machine.ip].append(i.port) + + pipes = [ + # 存储机器推送所有组件配置 + push_departs_config_for_cluster_ips( + root_id=root_id, + data=data, + cluster_obj=cluster_obj, + ip_ports=storage_ip_ports, + departs=departs, + machine_type=ClusterMachineAccessTypeDefine[cluster_obj.cluster_type][AccessLayer.STORAGE], + ) + ] + + # TenDBSingle 没有 proxy, 不用跑这个分支 + # 但是有人提过想要有 proxy 的 TenDBSingle + if cluster_obj.cluster_type != ClusterType.TenDBSingle: + departs_on_proxy = deepcopy(departs) + # 接入层不跑校验, 强制删除 + remove_depart(DeployPeripheralToolsDepart.MySQLTableChecksum, departs_on_proxy) + if cluster_obj.cluster_type == ClusterType.TenDBHA: + # proxy 不 rotate 和 备份 + # spider 要, 所以不会进入这里 + remove_depart(DeployPeripheralToolsDepart.MySQLRotateBinlog, departs_on_proxy) + remove_depart(DeployPeripheralToolsDepart.MySQLDBBackup, departs_on_proxy) + + # 接入层组件配置推送 + if { + DeployPeripheralToolsDepart.MySQLDBBackup, + DeployPeripheralToolsDepart.MySQLRotateBinlog, + DeployPeripheralToolsDepart.MySQLMonitor, + } & set(departs_on_proxy): + pipes.append( + push_departs_config_for_cluster_ips( + root_id=root_id, + data=data, + cluster_obj=cluster_obj, + ip_ports=proxy_ip_ports, + departs=departs_on_proxy, + machine_type=ClusterMachineAccessTypeDefine[cluster_obj.cluster_type][AccessLayer.PROXY], + ) + ) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_sub_pipeline(sub_flow_list=pipes) + return sp.build_sub_process(sub_name=_(f"{cluster_obj.immute_domain}")) + + +def push_departs_config_for_cluster_ips( + root_id: str, + data: Dict, + cluster_obj: Cluster, + ip_ports: Dict[str, List[int]], + departs: List[DeployPeripheralToolsDepart], + machine_type: MachineType, +): + pipes = [] + for ip, port_list in ip_ports.items(): + pipe = SubBuilder(root_id=root_id, data=data) + # 不能并行, 不然有可能 mysql-crond 没起来导致其他任务失败 + for act in make_push_departs_config_for_ip( + ip=ip, port_list=port_list, departs=departs, cluster_obj=cluster_obj, machine_type=machine_type + ): + pipe.add_act(**act) + + pipes.append(pipe.build_sub_process(sub_name=_(f"{ip}"))) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_sub_pipeline(sub_flow_list=pipes) + return sp.build_sub_process(sub_name=_(f"{machine_type}")) + + +def make_push_departs_config_for_ip( + cluster_obj: Cluster, + ip: str, + port_list: List[int], + departs: List[DeployPeripheralToolsDepart], + machine_type: MachineType, +) -> List: + """ + 这肯定是同一个集群的, 所以配置只会有端口差异 + """ + acts = [] + if DeployPeripheralToolsDepart.MySQLMonitor in departs: + acts.append( + make_push_mysql_monitor_config_act( + cluster_obj=cluster_obj, ip=ip, port_list=port_list, machine_type=machine_type + ) + ), + if DeployPeripheralToolsDepart.MySQLDBBackup in departs: + acts.append( + make_push_mysql_dbbackup_config_act( + cluster_obj=cluster_obj, ip=ip, port_list=port_list, machine_type=machine_type + ) + ), + if DeployPeripheralToolsDepart.MySQLTableChecksum in departs: + acts.append(make_push_mysql_table_checksum_config_act(cluster_obj=cluster_obj, ip=ip, port_list=port_list)) + if DeployPeripheralToolsDepart.MySQLRotateBinlog in departs: + acts.append(make_push_mysql_rotatebinlog_config_act(cluster_obj=cluster_obj, ip=ip, port_list=port_list)) + + return acts + + +def make_push_mysql_monitor_config_act( + cluster_obj: Cluster, ip: str, port_list: List[int], machine_type: MachineType +) -> Dict: + """ + 每个端口都有独立配置, 需要端口信息 + 这些端口肯定属于同一个集群 + """ + return { + "act_name": DeployPeripheralToolsDepart.MySQLMonitor, + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.push_mysql_monitor_config.__name__, + cluster={ + "port_list": port_list, + "bk_biz_id": cluster_obj.bk_biz_id, + "immute_domain": cluster_obj.immute_domain, + "machine_type": machine_type, + "db_module_id": cluster_obj.db_module_id, + "cluster_id": cluster_obj.pk, + }, + bk_cloud_id=cluster_obj.bk_cloud_id, + ) + ), + } + + +def make_push_mysql_dbbackup_config_act( + cluster_obj: Cluster, ip: str, port_list: List[int], machine_type: MachineType +) -> Dict: + """ + 每个端口都有独立配置, 需要端口信息 + """ + return { + "act_name": DeployPeripheralToolsDepart.MySQLDBBackup, + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.push_mysql_dbbackup_config.__name__, + cluster={ + "port_list": port_list, + "bk_biz_id": cluster_obj.bk_biz_id, + "immute_domain": cluster_obj.immute_domain, + "machine_type": machine_type, + "cluster_type": cluster_obj.cluster_type, + "db_module_id": cluster_obj.db_module_id, + "cluster_id": cluster_obj.pk, + }, + bk_cloud_id=cluster_obj.bk_cloud_id, + ) + ), + } + + +def make_push_mysql_rotatebinlog_config_act(cluster_obj: Cluster, ip: str, port_list: List[int]) -> Dict: + """ + 每个端口都有独立配置, 需要端口信息 + """ + return { + "act_name": DeployPeripheralToolsDepart.MySQLRotateBinlog, + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.push_mysql_rotatebinlog_config.__name__, + cluster={ + "port_list": port_list, + "bk_biz_id": cluster_obj.bk_biz_id, + "immute_domain": cluster_obj.immute_domain, + "cluster_type": cluster_obj.cluster_type, + }, + bk_cloud_id=cluster_obj.bk_cloud_id, + ) + ), + } + + +def make_push_mysql_table_checksum_config_act(cluster_obj: Cluster, ip: str, port_list: List[int]) -> Dict: + """ + 每个端口都有独立配置, 需要端口信息 + """ + return { + "act_name": DeployPeripheralToolsDepart.MySQLTableChecksum, + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + exec_ip=ip, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.push_mysql_table_checksum_config.__name__, + cluster={ + "port_list": port_list, + "bk_biz_id": cluster_obj.bk_biz_id, + "immute_domain": cluster_obj.immute_domain, + "cluster_type": cluster_obj.cluster_type, + }, + bk_cloud_id=cluster_obj.bk_cloud_id, + ) + ), + } diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/subflow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/subflow.py new file mode 100644 index 0000000000..a23f483fc2 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/subflow.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from typing import Dict, List + +from django.utils.translation import ugettext as _ + +from backend.db_meta.enums import ClusterType +from backend.db_meta.models import Cluster +from backend.flow.engine.bamboo.scene.common.builder import SubBuilder, SubProcess +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.cc_trans_module import cc_trans_module +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.collect_sysinfo import collect_sysinfo +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import ( + DeployPeripheralToolsDepart, + remove_depart, +) +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.group_ips import group_ips +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.instance_standardize import instance_standardize +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.prepare_departs_binary import prepare_departs_binary +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.push_config import ( + push_departs_config, + push_mysql_crond_config, +) +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.trans_files import trans_common_files + + +def standardize_mysql_cluster_subflow( + root_id: str, + data: Dict, + bk_biz_id: int, + cluster_type: ClusterType, + cluster_ids: List[int], + departs: List[DeployPeripheralToolsDepart], + with_deploy_binary: bool = True, + with_push_config: bool = True, + with_collect_sysinfo: bool = True, + with_actuator: bool = True, +) -> SubProcess: + """ + ToDo dbm-ui/backend/flow/views/mysql_push_peripheral_config.py 和这个相关的代码废弃 + proxy_group, storage_group 的结构是 + { + bk_cloud_id: { + ip: [port list] + } + } + """ + # TenDBSingle 不需要校验 + if cluster_type == ClusterType.TenDBSingle: + remove_depart(DeployPeripheralToolsDepart.MySQLTableChecksum, departs) + + cluster_objects = Cluster.objects.filter( + pk__in=cluster_ids, cluster_type=cluster_type, bk_biz_id=bk_biz_id + ).prefetch_related( + "proxyinstance_set", "storageinstance_set", "proxyinstance_set__machine", "storageinstance_set__machine" + ) + + proxy_group, storage_group = group_ips(cluster_objects=cluster_objects) + + pipe = SubBuilder(root_id=root_id, data=data) + + pipe.add_sub_pipeline( + sub_flow=trans_common_files( + root_id=root_id, + data=data, + bk_biz_id=bk_biz_id, + proxy_group=proxy_group, + storage_group=storage_group, + with_actuator=with_actuator, + with_backup_client=DeployPeripheralToolsDepart.BackupClient in departs, + ) + ) + + if with_collect_sysinfo: + pipe.add_sub_pipeline( + sub_flow=collect_sysinfo( + root_id=root_id, + data=data, + proxy_group=proxy_group, + storage_group=storage_group, + ) + ) + + remove_depart(DeployPeripheralToolsDepart.BackupClient, departs) + + # 如果是 TenDBSingle, proxy_group 为空, cc_trans_module 内部也不会构造 proxy 子流程 + pipe.add_sub_pipeline( + sub_flow=cc_trans_module( + root_id=root_id, + data=data, + cluster_type=cluster_type, + cluster_objects=cluster_objects, + proxy_group=proxy_group, + storage_group=storage_group, + ) + ) + + if with_deploy_binary: + # 如果是 TenDBSingle, proxy_group 为空, prepare_departs_binary 内部也不会构造 proxy 子流程 + pipe.add_sub_pipeline( + sub_flow=prepare_departs_binary( + root_id=root_id, + data=data, + cluster_type=cluster_type, + departs=departs, + proxy_cloud_ip_list={k: list(v.keys()) for k, v in proxy_group.items()}, + storage_cloud_ip_list={k: list(v.keys()) for k, v in storage_group.items()}, + ) + ) + + # 实例标准化 + # TenDBHA proxy 会 1) 清理旧 crontab, 2) 确认添加 DBHA 白名单 + # 其他实例会 1) 清理旧 crontab, 2) 清理旧系统账号, 3) 新系统库表初始化 + # 这个不需要按集群来, 每台机器把端口下发下去执行就行 + pipe.add_sub_pipeline( + sub_flow=instance_standardize( + root_id=root_id, data=data, cluster_type=cluster_type, proxy_group=proxy_group, storage_group=storage_group + ) + ) + + if with_push_config and { + DeployPeripheralToolsDepart.MySQLDBBackup, + DeployPeripheralToolsDepart.MySQLRotateBinlog, + DeployPeripheralToolsDepart.MySQLMonitor, + DeployPeripheralToolsDepart.MySQLTableChecksum, + DeployPeripheralToolsDepart.MySQLCrond, + } & set(departs): + # mysql-crond 要提前独立做, 按机器 + if DeployPeripheralToolsDepart.MySQLCrond in departs: + remove_depart(DeployPeripheralToolsDepart.MySQLCrond, departs) + pipe.add_sub_pipeline( + sub_flow=push_mysql_crond_config( + root_id=root_id, + data=data, + bk_biz_id=bk_biz_id, + proxy_group=proxy_group, + storage_group=storage_group, + ) + ) + # 如果是 TenDBSingle, proxy_group 为空, push_departs_config 内部也不会构造 proxy 子流程 + pipe.add_sub_pipeline( + sub_flow=push_departs_config(root_id=root_id, data=data, cluster_objects=cluster_objects, departs=departs) + ) + + return pipe.build_sub_process(sub_name=_("{} 集群标准化".format(cluster_type))) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/trans_files.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/trans_files.py new file mode 100644 index 0000000000..4c937130e3 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/deploy_peripheraltools/trans_files.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from dataclasses import asdict +from typing import Dict, List + +from bamboo_engine.builder import SubProcess +from django.utils.translation import ugettext as _ + +from backend.configuration.constants import DBType +from backend.flow.consts import DEPENDENCIES_PLUGINS +from backend.flow.engine.bamboo.scene.common.builder import SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList +from backend.flow.plugins.components.collections.common.download_backup_client import DownloadBackupClientComponent +from backend.flow.plugins.components.collections.common.install_nodeman_plugin import ( + InstallNodemanPluginServiceComponent, +) +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent +from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs, InstallNodemanPluginKwargs +from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs + + +def trans_common_files( + root_id: str, + data: Dict, + bk_biz_id: int, + proxy_group: Dict[int, Dict[str, List[int]]], + storage_group: Dict[int, Dict[str, List[int]]], + with_backup_client: bool, + with_actuator: bool, +) -> SubProcess: + """ + 下发公共文件 + 1. actuator, 某些复用场景不需要下发 + 2. 安装蓝鲸插件 + """ + pipes = [] + + # 这样合并 proxy 和 storage group 的前提是机器不会混用 + for bk_cloud_id, ip_dicts in { + k: {**proxy_group[k], **storage_group[k]} for k in set(list(proxy_group.keys()) + list(storage_group.keys())) + }.items(): + ips = list(ip_dicts.keys()) + acts = [] + if with_actuator: + acts.append( + { + "act_name": _("下发 actuator"), + "act_component_code": TransFileComponent.code, + "kwargs": asdict( + DownloadMediaKwargs( + bk_cloud_id=bk_cloud_id, + exec_ip=ips, + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + }, + ) + if with_backup_client: + acts.append( + { + "act_name": _("安装 backup client"), + "act_component_code": DownloadBackupClientComponent.code, + "kwargs": asdict( + DownloadBackupClientKwargs( + bk_cloud_id=bk_cloud_id, + bk_biz_id=bk_biz_id, + download_host_list=ips, + ) + ), + } + ) + + for plugin_name in DEPENDENCIES_PLUGINS: + acts.append( + { + "act_name": _("安装 {}".format(plugin_name)), + "act_component_code": InstallNodemanPluginServiceComponent.code, + "kwargs": asdict( + InstallNodemanPluginKwargs(ips=ips, plugin_name=plugin_name, bk_cloud_id=bk_cloud_id) + ), + } + ) + + subpipe = SubBuilder(root_id=root_id, data=data) + subpipe.add_parallel_acts(acts_list=acts) + pipes.append(subpipe.build_sub_process(sub_name=_("cloud_{}".format(bk_cloud_id)))) + + sp = SubBuilder(root_id=root_id, data=data) + sp.add_parallel_sub_pipeline(sub_flow_list=pipes) + return sp.build_sub_process(sub_name=_("下发公共文件")) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_apply_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_apply_flow.py index a440af30be..6350c4a4ee 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_apply_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_apply_flow.py @@ -19,12 +19,13 @@ from backend.db_meta.enums import ClusterType from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList -from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import ( - build_surrounding_apps_sub_flow, - init_machine_sub_flow, -) +from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import init_machine_sub_flow from backend.flow.plugins.components.collections.mysql.dns_manage import MySQLDnsManageComponent from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.generate_mysql_cluster_standardize_flow import ( + GenerateMySQLClusterStandardizeFlowComponent, + GenerateMySQLClusterStandardizeFlowService, +) from backend.flow.plugins.components.collections.mysql.mysql_db_meta import MySQLDBMetaComponent from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.utils.mysql.mysql_act_dataclass import ( @@ -155,19 +156,6 @@ def deploy_mysql_ha_flow_with_manual(self): ] ) - acts_list = [] - for ip_info in info["mysql_ip_list"] + info["proxy_ip_list"]: - exec_act_kwargs.exec_ip = ip_info["ip"] - exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_deploy_mysql_crond_payload.__name__ - acts_list.append( - { - "act_name": _("部署mysql-crond"), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict(exec_act_kwargs), - } - ) - sub_pipeline.add_parallel_acts(acts_list=acts_list) - # 阶段3 并发安装mysql、proxy 实例(一个活动节点部署多实例) acts_list = [] for proxy_ip in info["proxy_ip_list"]: @@ -292,22 +280,25 @@ def deploy_mysql_ha_flow_with_manual(self): ), ) - # 阶段7 部署周边组件 - sub_pipeline.add_sub_pipeline( - sub_flow=build_surrounding_apps_sub_flow( - bk_cloud_id=int(self.data["bk_cloud_id"]), - master_ip_list=[info["mysql_ip_list"][0]["ip"]], - slave_ip_list=[info["mysql_ip_list"][1]["ip"]], - proxy_ip_list=[ip_info["ip"] for ip_info in info["proxy_ip_list"]], - root_id=self.root_id, - parent_global_data=copy.deepcopy(sub_flow_context), - is_init=True, - collect_sysinfo=True, - cluster_type=ClusterType.TenDBHA.value, - ) - ) - sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("部署MySQL高可用集群"))) mysql_ha_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + + # 集群域名输入 + immute_domains = [] + for ele in self.data["apply_infos"]: + immute_domains.extend([c["master"] for c in ele["clusters"]]) + + gp = SubBuilder(root_id=self.root_id, data=copy.deepcopy(self.data)) + gp.add_act( + act_name=_("生成标准化单据"), + act_component_code=GenerateMySQLClusterStandardizeFlowComponent.code, + kwargs={ + "trans_func": GenerateMySQLClusterStandardizeFlowService.generate_from_immute_domains.__name__, + "immute_domains": immute_domains, + }, + ) + + mysql_ha_pipeline.add_sub_pipeline(sub_flow=gp.build_sub_process(sub_name=_("生成标准化单据"))) + mysql_ha_pipeline.run_pipeline(init_trans_data_class=HaApplyManualContext()) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_standardize_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_standardize_flow.py deleted file mode 100644 index feffbe75c7..0000000000 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_standardize_flow.py +++ /dev/null @@ -1,379 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" - -import copy -import logging -from collections import defaultdict -from dataclasses import asdict -from typing import Dict, List, Optional - -from django.utils.translation import ugettext as _ - -from backend.configuration.constants import DBType -from backend.db_meta.enums import ClusterType -from backend.db_meta.exceptions import DBMetaException -from backend.db_meta.models import Cluster, StorageInstance -from backend.db_package.models import Package -from backend.flow.consts import DBA_ROOT_USER, DEPENDENCIES_PLUGINS, MediumEnum -from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder, SubProcess -from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList -from backend.flow.plugins.components.collections.common.download_backup_client import DownloadBackupClientComponent -from backend.flow.plugins.components.collections.common.install_nodeman_plugin import ( - InstallNodemanPluginServiceComponent, -) -from backend.flow.plugins.components.collections.mysql.cluster_standardize_trans_module import ( - ClusterStandardizeTransModuleComponent, -) -from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent -from backend.flow.plugins.components.collections.mysql.mysql_cluster_instantiate_config import ( - MySQLClusterInstantiateConfigComponent, -) -from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent -from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs, InstallNodemanPluginKwargs -from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs -from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload - -logger = logging.getLogger("flow") - - -class MySQLHAStandardizeFlow(object): - def __init__(self, root_id: str, data: Optional[Dict]): - self.root_id = root_id - self.data = data - - def standardize(self): - """ - self.data = { - "uid": "20230830", - "created_by": "xxx", - "bk_biz_id": "11", - "ticket_type": "MYSQL_HA_STANDARDIZE", - "infos": { - "cluster_ids": [1, 2, 3], - } - } - 增加单据临时ADMIN账号的添加和删除逻辑 - """ - cluster_ids = self.data["infos"]["cluster_ids"] - bk_biz_id = self.data["bk_biz_id"] - - cluster_objects = Cluster.objects.filter( - pk__in=cluster_ids, bk_biz_id=bk_biz_id, cluster_type=ClusterType.TenDBHA.value - ).prefetch_related( - "proxyinstance_set", "storageinstance_set", "proxyinstance_set__machine", "storageinstance_set__machine" - ) - if cluster_objects.count() != len(cluster_ids): - raise DBMetaException( - message="input {} clusters, but found {}".format(len(cluster_ids), cluster_objects.count()) - ) - - standardize_pipe = Builder( - root_id=self.root_id, - data=self.data, - need_random_pass_cluster_ids=list(set(self.data["infos"]["cluster_ids"])), - ) - - standardize_pipe.add_sub_pipeline(self._build_trans_module_sub(clusters=cluster_objects)) - - standardize_pipe.add_sub_pipeline(self._build_instantiate_mysql_config_sub(clusters=cluster_objects)) - - # 为了代码方便这里稍微特殊点 - # 这两个字典的 key 是 ip 地址 - # value 是 bk cloud id - # 省得要搞字典去重 - proxy_ips = {} - storage_ips = {} - # 为了方便下发文件, ip 还要按 bk_cloud_id 分组 - ip_group_by_cloud = defaultdict(list) - for cluster_obj in cluster_objects: - for ins in cluster_obj.proxyinstance_set.all(): - ip = ins.machine.ip - bk_cloud_id = ins.machine.bk_cloud_id - proxy_ips[ip] = bk_cloud_id - ip_group_by_cloud[bk_cloud_id].append(ip) - - for ins in cluster_obj.storageinstance_set.all(): - ip = ins.machine.ip - bk_cloud_id = ins.machine.bk_cloud_id - storage_ips[ip] = bk_cloud_id - ip_group_by_cloud[bk_cloud_id].append(ip) - - # 按 bk_cloud_id 批量下发文件 - standardize_pipe.add_sub_pipeline(self._trans_file(ips_group=ip_group_by_cloud)) - - standardize_pipe.add_parallel_sub_pipeline( - sub_flow_list=[ - self._build_proxy_sub(ips=proxy_ips), - self._build_storage_sub(ips=storage_ips), - ] - ) - - logger.info(_("构建TenDBHA集群标准化流程成功")) - standardize_pipe.run_pipeline(is_drop_random_user=True) - - def _trans_file(self, ips_group: Dict) -> SubProcess: - trans_file_pipes = [] - for bk_cloud_id, ips in ips_group.items(): - unique_ips = list(set(ips)) - - cloud_trans_file_pipe = SubBuilder(root_id=self.root_id, data=self.data) - - cloud_trans_file_pipe.add_act( - act_name=_("下发MySQL周边程序介质"), - act_component_code=TransFileComponent.code, - kwargs=asdict( - DownloadMediaKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=unique_ips, - file_list=GetFileList(db_type=DBType.MySQL).get_mysql_surrounding_apps_package(), - ) - ), - ) - cloud_trans_file_pipe.add_act( - act_name=_("下发db-actuator介质"), - act_component_code=TransFileComponent.code, - kwargs=asdict( - DownloadMediaKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=unique_ips, - file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), - ) - ), - ) - - for plugin_name in DEPENDENCIES_PLUGINS: - cloud_trans_file_pipe.add_act( - act_name=_("安装{}插件".format(plugin_name)), - act_component_code=InstallNodemanPluginServiceComponent.code, - kwargs=asdict( - InstallNodemanPluginKwargs(ips=unique_ips, plugin_name=plugin_name, bk_cloud_id=bk_cloud_id) - ), - ) - - cloud_trans_file_pipe.add_act( - act_name=_("安装backup-client工具"), - act_component_code=DownloadBackupClientComponent.code, - kwargs=asdict( - DownloadBackupClientKwargs( - bk_cloud_id=bk_cloud_id, - bk_biz_id=self.data["bk_biz_id"], - download_host_list=unique_ips, - ) - ), - ) - - trans_file_pipes.append( - cloud_trans_file_pipe.build_sub_process(sub_name=_("cloud {} 下发文件".format(bk_cloud_id))) - ) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=trans_file_pipes) - return p.build_sub_process(sub_name=_("下发文件")) - - def _build_trans_module_sub(self, clusters: List[Cluster]) -> SubProcess: - pipes = [] - for cluster in clusters: - cluster_pipe = SubBuilder( - root_id=self.root_id, data={**copy.deepcopy(self.data), "cluster_id": cluster.id} - ) - cluster_pipe.add_act( - act_name=_("模块标准化"), act_component_code=ClusterStandardizeTransModuleComponent.code, kwargs={} - ) - - pipes.append(cluster_pipe.build_sub_process(sub_name=_("{} CC 模块标准化".format(cluster.immute_domain)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("CC标准化")) - - def _build_instantiate_mysql_config_sub(self, clusters: List[Cluster]) -> SubProcess: - pipes = [] - for cluster in clusters: - cluster_pipe = SubBuilder( - root_id=self.root_id, data={**copy.deepcopy(self.data), "cluster_id": cluster.id} - ) - cluster_pipe.add_act( - act_name=_("实例化配置"), act_component_code=MySQLClusterInstantiateConfigComponent.code, kwargs={} - ) - pipes.append(cluster_pipe.build_sub_process(sub_name=_("实例化 {} 配置".format(cluster.immute_domain)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("实例化集群配置")) - - def _build_proxy_sub(self, ips: Dict) -> SubProcess: - pipes = [] - for ip, bk_cloud_id in ips.items(): - single_pipe = SubBuilder(root_id=self.root_id, data=self.data) - - single_pipe.add_act( - act_name=_("标准化proxy"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - run_as_system_user=DBA_ROOT_USER, - get_mysql_payload_func=MysqlActPayload.get_standardize_tendbha_proxy_payload.__name__, - bk_cloud_id=bk_cloud_id, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署mysql-crond"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_crond_payload.__name__, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署监控程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_monitor_payload.__name__, - cluster={"cluster_ids": self.data["infos"]["cluster_ids"]}, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - pipes.append(single_pipe.build_sub_process(sub_name=_("{} 部署dba工具".format(ip)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - - return p.build_sub_process(sub_name=_("接入层标准化")) - - def _build_storage_sub(self, ips: Dict) -> SubProcess: - pipes = [] - for ip, bk_cloud_id in ips.items(): - single_pipe = SubBuilder(root_id=self.root_id, data=self.data) - - # 同一机器所有集群的 major version 应该是一样的 - major_version = Cluster.objects.filter(storageinstance__machine__ip=ip).first().major_version - mysql_pkg = Package.get_latest_package(version=major_version, pkg_type=MediumEnum.MySQL) - - ports = StorageInstance.objects.filter(machine__ip=ip, bk_biz_id=self.data["bk_biz_id"]).values_list( - "port", flat=True - ) - - single_pipe.add_act( - act_name=_("实例标准化"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - run_as_system_user=DBA_ROOT_USER, - cluster_type=ClusterType.TenDBHA.value, - cluster={ - "ports": list(ports), - "mysql_pkg": {"name": mysql_pkg.name, "md5": mysql_pkg.md5}, - "version": major_version, - }, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_standardize_mysql_instance_payload.__name__, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署mysql-crond"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_crond_payload.__name__, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署监控程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_monitor_payload.__name__, - cluster={"cluster_ids": self.data["infos"]["cluster_ids"]}, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署备份程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_install_db_backup_payload.__name__, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署rotate binlog"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_install_mysql_rotatebinlog_payload.__name__, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署数据校验程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_install_mysql_checksum_payload.__name__, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署DBA工具箱"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=ip, - get_mysql_payload_func=MysqlActPayload.get_install_dba_toolkit_payload.__name__, - cluster_type=ClusterType.TenDBHA.value, - ) - ), - ) - - pipes.append(single_pipe.build_sub_process(sub_name=_("{} 标准化".format(ip)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("存储层标准化")) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_add.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_add.py index a11bc04181..055c5f44b5 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_add.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_add.py @@ -21,6 +21,8 @@ from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import init_machine_sub_flow +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import ALLDEPARTS +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.subflow import standardize_mysql_cluster_subflow from backend.flow.plugins.components.collections.mysql.clone_proxy_client_in_backend import ( CloneProxyUsersInBackendComponent, ) @@ -29,6 +31,10 @@ ) from backend.flow.plugins.components.collections.mysql.dns_manage import MySQLDnsManageComponent from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.generate_mysql_cluster_standardize_flow import ( + GenerateMySQLClusterStandardizeFlowComponent, + GenerateMySQLClusterStandardizeFlowService, +) from backend.flow.plugins.components.collections.mysql.mysql_db_meta import MySQLDBMetaComponent from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.utils.mysql.mysql_act_dataclass import ( @@ -161,12 +167,12 @@ def add_mysql_cluster_proxy_flow(self): ), ) - exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_deploy_mysql_crond_payload.__name__ - sub_pipeline.add_act( - act_name=_("部署mysql-crond"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict(exec_act_kwargs), - ) + # exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_deploy_mysql_crond_payload.__name__ + # sub_pipeline.add_act( + # act_name=_("部署mysql-crond"), + # act_component_code=ExecuteDBActuatorScriptComponent.code, + # kwargs=asdict(exec_act_kwargs), + # ) exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_install_proxy_payload.__name__ sub_pipeline.add_act( @@ -262,17 +268,45 @@ def add_mysql_cluster_proxy_flow(self): ), ) - exec_act_kwargs.exec_ip = info["proxy_ip"]["ip"] - exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_deploy_mysql_monitor_payload.__name__ - sub_pipeline.add_act( - act_name=_("Proxy安装mysql-monitor"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict(exec_act_kwargs), - ) + # exec_act_kwargs.exec_ip = info["proxy_ip"]["ip"] + # exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_deploy_mysql_monitor_payload.__name__ + # sub_pipeline.add_act( + # act_name=_("Proxy安装mysql-monitor"), + # act_component_code=ExecuteDBActuatorScriptComponent.code, + # kwargs=asdict(exec_act_kwargs), + # ) sub_pipelines.append( sub_pipeline.build_sub_process(sub_name=_("添加proxy子流程[{}]".format(info["proxy_ip"]["ip"]))) ) mysql_proxy_cluster_add_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + mysql_proxy_cluster_add_pipeline.add_sub_pipeline( + sub_flow=standardize_mysql_cluster_subflow( + root_id=self.root_id, + data=copy.deepcopy(self.data), + bk_biz_id=self.data["bk_biz_id"], + cluster_type=ClusterType.TenDBHA, + cluster_ids=cluster_ids, + departs=ALLDEPARTS, + with_deploy_binary=True, + with_push_config=True, + with_collect_sysinfo=False, + with_actuator=False, + ) + ) + + gp = SubBuilder(root_id=self.root_id, data=copy.deepcopy(self.data)) + gp.add_act( + act_name=_("生成标准化单据"), + act_component_code=GenerateMySQLClusterStandardizeFlowComponent.code, + kwargs={ + "trans_func": GenerateMySQLClusterStandardizeFlowService.generate_from_immute_domains.__name__, + "immute_domains": list( + Cluster.objects.filter(pk__in=cluster_ids).values_list("immute_domain", flat=True) + ), + }, + ) + mysql_proxy_cluster_add_pipeline.add_sub_pipeline(sub_flow=gp.build_sub_process(sub_name=_("生成标准化单据"))) + mysql_proxy_cluster_add_pipeline.run_pipeline() diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_switch.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_switch.py index 15849286c3..29a08375d3 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_switch.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_switch.py @@ -35,6 +35,10 @@ DropProxyUsersInBackendComponent, ) from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.generate_mysql_cluster_standardize_flow import ( + GenerateMySQLClusterStandardizeFlowComponent, + GenerateMySQLClusterStandardizeFlowService, +) from backend.flow.plugins.components.collections.mysql.mysql_db_meta import MySQLDBMetaComponent from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.utils.mysql.mysql_act_dataclass import ( @@ -122,6 +126,8 @@ def switch_mysql_cluster_proxy_flow(self): mysql_proxy_cluster_add_pipeline = Builder(root_id=self.root_id, data=self.data) sub_pipelines = [] + all_cluster_ids = [] + # 多集群操作时循环加入集群proxy替换子流程 for info in self.data["infos"]: # 拼接子流程需要全局参数 @@ -164,14 +170,6 @@ def switch_mysql_cluster_proxy_flow(self): ), ) - # 阶段2 部署mysql-crond - exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_deploy_mysql_crond_payload.__name__ - sub_pipeline.add_act( - act_name=_("部署mysql-crond"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict(exec_act_kwargs), - ) - exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_install_proxy_payload.__name__ sub_pipeline.add_act( act_name=_("部署proxy实例"), @@ -183,6 +181,8 @@ def switch_mysql_cluster_proxy_flow(self): # 阶段2 根据需要替换的proxy的集群,依次添加 switch_proxy_sub_list = [] + + all_cluster_ids.extend(info["cluster_ids"]) for cluster_id in info["cluster_ids"]: # 拼接子流程需要全局参数 @@ -295,14 +295,6 @@ def switch_mysql_cluster_proxy_flow(self): ), ) - # 阶段3 新的proxy添加事件监控 - exec_act_kwargs.get_mysql_payload_func = MysqlActPayload.get_deploy_mysql_monitor_payload.__name__ - sub_pipeline.add_act( - act_name=_("Proxy安装mysql-monitor"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict(exec_act_kwargs), - ) - # 阶段4 后续流程需要在这里加一个暂停节点,让用户在合适的时间执行下架旧实例操作 sub_pipeline.add_act(act_name=_("人工确认"), act_component_code=PauseComponent.code, kwargs={}) @@ -350,7 +342,21 @@ def switch_mysql_cluster_proxy_flow(self): ) mysql_proxy_cluster_add_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) - mysql_proxy_cluster_add_pipeline.run_pipeline() + + gp = SubBuilder(root_id=self.root_id, data=copy.deepcopy(self.data)) + gp.add_act( + act_name=_("生成标准化单据"), + act_component_code=GenerateMySQLClusterStandardizeFlowComponent.code, + kwargs={ + "trans_func": GenerateMySQLClusterStandardizeFlowService.generate_from_immute_domains.__name__, + "immute_domains": list( + Cluster.objects.filter(pk__in=all_cluster_ids).values_list("immute_domain", flat=True) + ), + }, + ) + mysql_proxy_cluster_add_pipeline.add_sub_pipeline(sub_flow=gp.build_sub_process(sub_name=_("生成标准化单据"))) + + mysql_proxy_cluster_add_pipeline.run_pipeline(is_drop_random_user=True) def proxy_reduce_sub_flow(self, cluster_id: int, bk_cloud_id: int, origin_proxy_ip: str, origin_proxy_port: int): """ diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_push_peripheral_config.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_push_peripheral_config.py deleted file mode 100644 index 88e02fb87e..0000000000 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_push_peripheral_config.py +++ /dev/null @@ -1,294 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" - -import collections -import logging -from dataclasses import asdict -from typing import Dict, Optional - -from django.db.models import Q -from django.utils.translation import ugettext as _ - -from backend.configuration.constants import DBType -from backend.db_meta.enums import AccessLayer, ClusterType -from backend.db_meta.exceptions import DBMetaException -from backend.db_meta.models import Cluster, Machine -from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder -from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList -from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent -from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent -from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs -from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload - -logger = logging.getLogger("flow") - - -class MySQLPushPeripheralConfigFlow(object): - def __init__(self, root_id: str, data: Optional[Dict]): - self.root_id = root_id - self.data = data - - def push_config(self): - """ - self.data = { - "uid": 12345, - "created_by": "xxx", - "bk_biz_id": 12345, - "ticket_type": "MYSQL_PUSH_PERIPHERAL_CONFIG", - "cluster_ids": [1, 2, 3], - } - """ - cluster_ids = list(set(self.data["cluster_ids"])) - bk_biz_id = self.data["bk_biz_id"] - - cluster_objects = Cluster.objects.filter( - pk__in=cluster_ids, - bk_biz_id=bk_biz_id, - cluster_type__in=[ClusterType.TenDBSingle, ClusterType.TenDBHA, ClusterType.TenDBCluster], - ) - if cluster_objects.count() != len(cluster_ids): - raise DBMetaException( - message="input {} clusters, but found {}".format(len(cluster_ids), cluster_objects.count()) - ) - - mysql_ips_by_cloud = collections.defaultdict(list) - ips_by_cloud = collections.defaultdict(list) - for cluster_object in cluster_objects: - ips_by_cloud[cluster_object.bk_cloud_id].extend( - list(cluster_object.storageinstance_set.values_list("machine__ip", flat=True)) - ) - ips_by_cloud[cluster_object.bk_cloud_id].extend( - list(cluster_object.proxyinstance_set.values_list("machine__ip", flat=True)) - ) - - mysql_ips_by_cloud[cluster_object.bk_cloud_id].extend( - list(cluster_object.storageinstance_set.values_list("machine__ip", flat=True)) - ) - if cluster_object.cluster_type == ClusterType.TenDBCluster: - # tendbcluster 接入层按存储处理 - mysql_ips_by_cloud[cluster_object.bk_cloud_id].extend( - list(cluster_object.proxyinstance_set.values_list("machine__ip", flat=True)) - ) - - trans_file_acts = [] - push_mysql_crond_config_acts = [] - for k, v in ips_by_cloud.items(): - trans_file_acts.append( - { - "act_name": _("下发actuator介质 云区域ID: {}".format(k)), - "act_component_code": TransFileComponent.code, - "kwargs": asdict( - DownloadMediaKwargs( - bk_cloud_id=k, - exec_ip=list(set(v)), - file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), - ) - ), - } - ) - trans_file_acts.append( - { - "act_name": _("下发MySQL周边程序介质"), - "act_component_code": TransFileComponent.code, - "kwargs": asdict( - DownloadMediaKwargs( - bk_cloud_id=k, - exec_ip=list(set(v)), - file_list=GetFileList(db_type=DBType.MySQL).get_mysql_surrounding_apps_package(), - ) - ), - } - ) - - for ip in list(set(v)): - push_mysql_crond_config_acts.append( - { - "act_name": _("下发mysql-crond配置 {}".format(ip)), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict( - ExecActuatorKwargs( - bk_cloud_id=k, - exec_ip=ip, - get_mysql_payload_func=MysqlActPayload.push_mysql_crond_config_payload.__name__, - ) - ), - } - ) - - push_mysql_rotatebinlog_config_acts = [] - for k, v in mysql_ips_by_cloud.items(): - for ip in list(set(v)): - push_mysql_rotatebinlog_config_acts.append( - { - "act_name": _("下发MySQL rotatebinlog配置"), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict( - ExecActuatorKwargs( - bk_cloud_id=k, - exec_ip=ip, - cluster_type=Cluster.objects.filter( - Q(storageinstance__machine__ip=ip) | Q(proxyinstance__machine__ip=ip) - ) - .first() - .cluster_type, - get_mysql_payload_func=MysqlActPayload.push_mysql_rotatebinlog_config_payload.__name__, - ) - ), - } - ) - - pipeline = Builder(root_id=self.root_id, data=self.data, need_random_pass_cluster_ids=cluster_ids) - push_config_pipeline = SubBuilder(root_id=self.root_id, data=self.data) - - push_config_pipeline.add_parallel_acts(acts_list=trans_file_acts) - push_config_pipeline.add_parallel_acts(acts_list=push_mysql_crond_config_acts) - push_config_pipeline.add_parallel_acts(acts_list=push_mysql_rotatebinlog_config_acts) - - cluster_pipes = [] - for cluster_obj in cluster_objects: - cluster_pipe = self.__foo(cluster_obj) - cluster_pipes.append(cluster_pipe) - - push_config_pipeline.add_parallel_sub_pipeline(sub_flow_list=cluster_pipes) - pipeline.add_sub_pipeline(sub_flow=push_config_pipeline.build_sub_process(sub_name=_("配置推送"))) - logger.info(_("构建配置推送流程完成")) - pipeline.run_pipeline(is_drop_random_user=True) - - def __foo(self, cluster_obj: Cluster): - proxy_ip_ports = collections.defaultdict(list) - backend_ip_ports = collections.defaultdict(list) - - for ins in cluster_obj.proxyinstance_set.all(): - proxy_ip_ports[ins.machine.ip].append(ins.port) - - for ins in cluster_obj.storageinstance_set.all(): - backend_ip_ports[ins.machine.ip].append(ins.port) - - push_mysql_monitor_config_acts = [] - push_mysql_checksum_config_acts = [] - push_dbbackup_config_acts = [] - - for ip, ports in proxy_ip_ports.items(): - push_mysql_monitor_config_acts.append( - { - "act_name": _("{}下发mysql-monitor配置".format(ip)), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict( - ExecActuatorKwargs( - bk_cloud_id=cluster_obj.bk_cloud_id, - exec_ip=ip, - cluster={ - "ports": ports, - "access_layer": AccessLayer.PROXY.value, - "machine_type": Machine.objects.get(ip=ip).machine_type, - "cluster_id": cluster_obj.id, - "immute_domain": cluster_obj.immute_domain, - "db_module_id": cluster_obj.db_module_id, - }, - get_mysql_payload_func=MysqlActPayload.push_mysql_monitor_config_payload.__name__, - ) - ), - } - ) - if cluster_obj.cluster_type == ClusterType.TenDBCluster.value: - push_dbbackup_config_acts.append( - { - "act_name": _("{}下发备份配置".format(ip)), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict( - ExecActuatorKwargs( - bk_cloud_id=cluster_obj.bk_cloud_id, - exec_ip=ip, - cluster={ - "ports": ports, - "machine_type": Machine.objects.get(ip=ip).machine_type, - "cluster_id": cluster_obj.id, - "immute_domain": cluster_obj.immute_domain, - "db_module_id": cluster_obj.db_module_id, - "cluster_type": cluster_obj.cluster_type, - }, - get_mysql_payload_func=MysqlActPayload.push_dbbackup_config_payload.__name__, - ) - ), - } - ) - - for ip, ports in backend_ip_ports.items(): - push_mysql_checksum_config_acts.append( - { - "act_name": _("{}下发mysql-monitor配置".format(ip)), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict( - ExecActuatorKwargs( - bk_cloud_id=cluster_obj.bk_cloud_id, - exec_ip=ip, - cluster={ - "ports": ports, - "access_layer": AccessLayer.STORAGE.value, - "machine_type": Machine.objects.get(ip=ip).machine_type, - "cluster_id": cluster_obj.id, - "immute_domain": cluster_obj.immute_domain, - "db_module_id": cluster_obj.db_module_id, - }, - get_mysql_payload_func=MysqlActPayload.push_mysql_monitor_config_payload.__name__, - ) - ), - } - ) - push_mysql_checksum_config_acts.append( - { - "act_name": _("{}下发mysql-table-checksum配置".format(ip)), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict( - ExecActuatorKwargs( - bk_cloud_id=cluster_obj.bk_cloud_id, - exec_ip=ip, - cluster={ - "ports": ports, - # "access_layer": AccessLayer.STORAGE.value, - "machine_type": Machine.objects.get(ip=ip).machine_type, - "cluster_id": cluster_obj.id, - "immute_domain": cluster_obj.immute_domain, - "db_module_id": cluster_obj.db_module_id, - }, - get_mysql_payload_func=MysqlActPayload.push_mysql_checksum_config_payload.__name__, - ) - ), - } - ) - push_dbbackup_config_acts.append( - { - "act_name": _("{}下发备份配置".format(ip)), - "act_component_code": ExecuteDBActuatorScriptComponent.code, - "kwargs": asdict( - ExecActuatorKwargs( - bk_cloud_id=cluster_obj.bk_cloud_id, - exec_ip=ip, - cluster={ - "ports": ports, - "machine_type": Machine.objects.get(ip=ip).machine_type, - "cluster_id": cluster_obj.id, - "immute_domain": cluster_obj.immute_domain, - "db_module_id": cluster_obj.db_module_id, - "cluster_type": cluster_obj.cluster_type, - }, - get_mysql_payload_func=MysqlActPayload.push_dbbackup_config_payload.__name__, - ) - ), - } - ) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_acts(acts_list=push_mysql_monitor_config_acts) - p.add_parallel_acts(acts_list=push_mysql_checksum_config_acts) - p.add_parallel_acts(acts_list=push_dbbackup_config_acts) - - return p.build_sub_process(sub_name=_("{} 推送周边配置".format(cluster_obj.immute_domain))) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_standardize_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_standardize_flow.py deleted file mode 100644 index 5040e2f9df..0000000000 --- a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_standardize_flow.py +++ /dev/null @@ -1,377 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" -import copy -import logging -from collections import defaultdict -from dataclasses import asdict -from typing import Dict, List, Optional - -from django.utils.translation import ugettext as _ - -from backend.configuration.constants import DBType -from backend.db_meta.enums import ClusterType, TenDBClusterSpiderRole -from backend.db_meta.exceptions import DBMetaException -from backend.db_meta.models import Cluster, ProxyInstance, StorageInstance -from backend.db_package.models import Package -from backend.flow.consts import DBA_ROOT_USER, DEPENDENCIES_PLUGINS, MediumEnum -from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder, SubProcess -from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList -from backend.flow.plugins.components.collections.common.download_backup_client import DownloadBackupClientComponent -from backend.flow.plugins.components.collections.common.install_nodeman_plugin import ( - InstallNodemanPluginServiceComponent, -) -from backend.flow.plugins.components.collections.mysql.cluster_standardize_trans_module import ( - ClusterStandardizeTransModuleComponent, -) -from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent -from backend.flow.plugins.components.collections.mysql.mysql_cluster_instantiate_config import ( - MySQLClusterInstantiateConfigComponent, -) -from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent -from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs, InstallNodemanPluginKwargs -from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs -from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload - -logger = logging.getLogger("flow") - - -class SpiderClusterStandardizeFlow(object): - def __init__(self, root_id: str, data: Optional[Dict]): - self.root_id = root_id - self.data = data - - def standardize(self): - """ - self.data = { - "uid": "20230830", - "created_by": "xxx", - "bk_biz_id": "11", - "ticket_type": "TENDBCLUSTER_STANDARDIZE", - "infos": { - "cluster_ids": [1, 2, 3], - } - } - """ - cluster_ids = self.data["infos"]["cluster_ids"] - bk_biz_id = self.data["bk_biz_id"] - - cluster_objects = Cluster.objects.filter( - pk__in=cluster_ids, bk_biz_id=bk_biz_id, cluster_type=ClusterType.TenDBCluster.value - ).prefetch_related( - "proxyinstance_set", - "storageinstance_set", - "proxyinstance_set__machine", - "storageinstance_set__machine", - "proxyinstance_set__tendbclusterspiderext", - ) - if cluster_objects.count() != len(cluster_ids): - raise DBMetaException( - message="input {} clusters, but found {}".format(len(cluster_ids), cluster_objects.count()) - ) - - standardize_pipe = Builder( - root_id=self.root_id, - data=self.data, - need_random_pass_cluster_ids=list(set(self.data["infos"]["cluster_ids"])), - ) - standardize_pipe.add_sub_pipeline(self._build_trans_module_sub(clusters=cluster_objects)) - standardize_pipe.add_sub_pipeline(self._build_instantiate_config_sub(clusters=cluster_objects)) - - spider_master_ips = {} - spider_slave_ips = {} - spider_mnt_ips = {} - storage_ips = {} - - ip_group_by_cloud = defaultdict(list) - for cluster_obj in cluster_objects: - for ins in cluster_obj.proxyinstance_set.all(): - ip = ins.machine.ip - bk_cloud_id = ins.machine.bk_cloud_id - ip_group_by_cloud[bk_cloud_id].append(ip) - - if ins.tendbclusterspiderext.spider_role == TenDBClusterSpiderRole.SPIDER_MASTER.value: - spider_master_ips[ip] = bk_cloud_id - elif ins.tendbclusterspiderext.spider_role == TenDBClusterSpiderRole.SPIDER_SLAVE.value: - spider_slave_ips[ip] = bk_cloud_id - elif ins.tendbclusterspiderext.spider_role == TenDBClusterSpiderRole.SPIDER_MNT.value: - spider_mnt_ips[ip] = bk_cloud_id - else: - raise Exception # ToDo - - for ins in cluster_obj.storageinstance_set.all(): - ip = ins.machine.ip - bk_cloud_id = ins.machine.bk_cloud_id - ip_group_by_cloud[bk_cloud_id].append(ip) - storage_ips[ip] = bk_cloud_id - - # 按 bk_cloud_id 批量下发文件 - standardize_pipe.add_sub_pipeline(self._trans_file(ips_group=ip_group_by_cloud)) - - standardize_pipe.add_parallel_sub_pipeline( - sub_flow_list=[ - self._build_spider_master_sub(ips=spider_master_ips), - self._build_remote_sub(ips=storage_ips), - ] - ) - - if spider_slave_ips: - standardize_pipe.add_sub_pipeline(self._build_spider_slave_sub(ips=spider_slave_ips)) - - if spider_mnt_ips: - standardize_pipe.add_sub_pipeline(self._build_spider_mnt_sub(ips=spider_mnt_ips)) - - logger.info(_("构建TenDBCluster集群标准化流程成功")) - standardize_pipe.run_pipeline(is_drop_random_user=True) - - def _build_instantiate_config_sub(self, clusters: List[Cluster]) -> SubProcess: - pipes = [] - for cluster in clusters: - cluster_pipe = SubBuilder( - root_id=self.root_id, data={**copy.deepcopy(self.data), "cluster_id": cluster.id} - ) - cluster_pipe.add_act( - act_name=_("实例化配置"), act_component_code=MySQLClusterInstantiateConfigComponent.code, kwargs={} - ) - pipes.append(cluster_pipe.build_sub_process(sub_name=_("实例化 {} 配置".format(cluster.immute_domain)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("实例化集群配置")) - - def _build_trans_module_sub(self, clusters: List[Cluster]) -> SubProcess: - pipes = [] - for cluster in clusters: - cluster_pipe = SubBuilder( - root_id=self.root_id, data={**copy.deepcopy(self.data), "cluster_id": cluster.id} - ) - cluster_pipe.add_act( - act_name=_("模块标准化"), act_component_code=ClusterStandardizeTransModuleComponent.code, kwargs={} - ) - - pipes.append(cluster_pipe.build_sub_process(sub_name=_("{} CC 模块标准化".format(cluster.immute_domain)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("CC标准化")) - - def _trans_file(self, ips_group: Dict) -> SubProcess: - trans_file_pipes = [] - for bk_cloud_id, ips in ips_group.items(): - unique_ips = list(set(ips)) - - cloud_trans_file_pipe = SubBuilder(root_id=self.root_id, data=self.data) - - cloud_trans_file_pipe.add_act( - act_name=_("下发MySQL周边程序介质"), - act_component_code=TransFileComponent.code, - kwargs=asdict( - DownloadMediaKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=unique_ips, - file_list=GetFileList(db_type=DBType.MySQL).get_mysql_surrounding_apps_package(), - ) - ), - ) - cloud_trans_file_pipe.add_act( - act_name=_("下发db-actuator介质"), - act_component_code=TransFileComponent.code, - kwargs=asdict( - DownloadMediaKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=unique_ips, - file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), - ) - ), - ) - - for plugin_name in DEPENDENCIES_PLUGINS: - cloud_trans_file_pipe.add_act( - act_name=_("安装{}插件".format(plugin_name)), - act_component_code=InstallNodemanPluginServiceComponent.code, - kwargs=asdict( - InstallNodemanPluginKwargs(ips=unique_ips, plugin_name=plugin_name, bk_cloud_id=bk_cloud_id) - ), - ) - - cloud_trans_file_pipe.add_act( - act_name=_("安装backup-client工具"), - act_component_code=DownloadBackupClientComponent.code, - kwargs=asdict( - DownloadBackupClientKwargs( - bk_cloud_id=bk_cloud_id, - bk_biz_id=self.data["bk_biz_id"], - download_host_list=unique_ips, - ) - ), - ) - - trans_file_pipes.append( - cloud_trans_file_pipe.build_sub_process(sub_name=_("cloud {} 下发文件".format(bk_cloud_id))) - ) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=trans_file_pipes) - return p.build_sub_process(sub_name=_("下发文件")) - - def _build_spider_master_sub(self, ips: Dict) -> SubProcess: - return self._build_spider_sub(ips=ips, name=_("spider master 标准化")) - - def _build_spider_slave_sub(self, ips: Dict) -> SubProcess: - return self._build_spider_sub(ips=ips, name=_("spider slave 标准化")) - - def _build_spider_mnt_sub(self, ips: Dict) -> SubProcess: - return self._build_spider_sub(ips=ips, name=_("spider mnt 标准化")) - - def _build_remote_sub(self, ips: Dict) -> SubProcess: - pipes = [] - for ip, bk_cloud_id in ips.items(): - # 同机器实例版本肯定一样 - major_version = Cluster.objects.filter(storageinstance__machine__ip=ip).first().major_version - mysql_pkg = Package.get_latest_package(version=major_version, pkg_type=MediumEnum.MySQL) - - ports = StorageInstance.objects.filter(machine__ip=ip, bk_biz_id=self.data["bk_biz_id"]).values_list( - "port", flat=True - ) - - single_pipe = self._build_single_instance_sub( - bk_cloud_id=bk_cloud_id, ip=ip, mysql_pkg=mysql_pkg, ports=ports, version=major_version - ) - - pipes.append(single_pipe.build_sub_process(sub_name=_("{} 标准化".format(ip)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("remote 标准化")) - - def _build_spider_sub(self, ips: Dict, name: str) -> SubProcess: - pipes = [] - for ip, bk_cloud_id in ips.items(): - # 同机器实例版本肯定一样 - qs = ProxyInstance.objects.filter(machine__ip=ip, bk_biz_id=self.data["bk_biz_id"]) - version = "Spider-{}".format(qs.first().version.split(".")[0]) - mysql_pkg = Package.get_latest_package(version=version, pkg_type=MediumEnum.Spider) - - ports = qs.values_list("port", flat=True) - - single_pipe = self._build_single_instance_sub( - bk_cloud_id=bk_cloud_id, ip=ip, mysql_pkg=mysql_pkg, ports=ports, version=version - ) - - pipes.append(single_pipe.build_sub_process(sub_name=_("{} 标准化".format(ip)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_(name)) - - def _build_single_instance_sub( - self, bk_cloud_id: int, ip: str, mysql_pkg: Package, ports: List[int], version: str - ) -> SubBuilder: - single_pipe = SubBuilder(root_id=self.root_id, data=self.data) - - single_pipe.add_act( - act_name=_("系统库表权限标准化"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - run_as_system_user=DBA_ROOT_USER, - cluster_type=ClusterType.TenDBCluster.value, - cluster={ - "ports": list(ports), - "mysql_pkg": {"name": mysql_pkg.name, "md5": mysql_pkg.md5}, - "version": version, - }, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_standardize_mysql_instance_payload.__name__, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署mysql-crond"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_crond_payload.__name__, - cluster_type=ClusterType.TenDBCluster.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署监控程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_monitor_payload.__name__, - cluster={"cluster_ids": self.data["infos"]["cluster_ids"]}, - cluster_type=ClusterType.TenDBCluster.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署备份程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_install_db_backup_payload.__name__, - cluster_type=ClusterType.TenDBCluster.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署rotate binlog"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_install_mysql_rotatebinlog_payload.__name__, - cluster_type=ClusterType.TenDBCluster.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署数据校验程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_install_mysql_checksum_payload.__name__, - cluster_type=ClusterType.TenDBCluster.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署DBA工具箱"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=ip, - get_mysql_payload_func=MysqlActPayload.get_install_dba_toolkit_payload.__name__, - cluster_type=ClusterType.TenDBCluster.value, - ) - ), - ) - - return single_pipe diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/tendbsingle/standardize.py b/dbm-ui/backend/flow/engine/bamboo/scene/tendbsingle/standardize.py deleted file mode 100644 index c3ffaceee1..0000000000 --- a/dbm-ui/backend/flow/engine/bamboo/scene/tendbsingle/standardize.py +++ /dev/null @@ -1,271 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" -import copy -import logging -from collections import defaultdict -from dataclasses import asdict -from typing import Dict, List, Optional - -from django.utils.translation import ugettext as _ - -from backend.configuration.constants import DBType -from backend.db_meta.enums import ClusterType -from backend.db_meta.exceptions import DBMetaException -from backend.db_meta.models import Cluster, StorageInstance -from backend.db_package.models import Package -from backend.flow.consts import DBA_ROOT_USER, DEPENDENCIES_PLUGINS, MediumEnum -from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder, SubProcess -from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList -from backend.flow.plugins.components.collections.common.download_backup_client import DownloadBackupClientComponent -from backend.flow.plugins.components.collections.common.install_nodeman_plugin import ( - InstallNodemanPluginServiceComponent, -) -from backend.flow.plugins.components.collections.mysql.cluster_standardize_trans_module import ( - ClusterStandardizeTransModuleComponent, -) -from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent -from backend.flow.plugins.components.collections.mysql.mysql_cluster_instantiate_config import ( - MySQLClusterInstantiateConfigComponent, -) -from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent -from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs, InstallNodemanPluginKwargs -from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs -from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload - -logger = logging.getLogger("flow") - - -class TenDBSingleStandardizeFlow(object): - def __init__(self, root_id: str, data: Optional[Dict]): - self.root_id = root_id - self.data = data - - def standardize(self): - cluster_ids = self.data["infos"]["cluster_ids"] - bk_biz_id = self.data["bk_biz_id"] - - cluster_objects = Cluster.objects.filter( - pk__in=cluster_ids, bk_biz_id=bk_biz_id, cluster_type=ClusterType.TenDBSingle.value - ).prefetch_related("storageinstance_set", "storageinstance_set__machine") - if cluster_objects.count() != len(cluster_ids): - raise DBMetaException( - message="input {} clusters, but found {}".format(len(cluster_ids), cluster_objects.count()) - ) - - standardize_pipe = Builder( - root_id=self.root_id, - data=self.data, - need_random_pass_cluster_ids=list(set(self.data["infos"]["cluster_ids"])), - ) - - standardize_pipe.add_sub_pipeline(self._build_trans_module_sub(clusters=cluster_objects)) - standardize_pipe.add_sub_pipeline(self._build_instantiate_mysql_config_sub(clusters=cluster_objects)) - - storage_ips = {} - ip_group_by_cloud = defaultdict(list) - for cluster_object in cluster_objects: - for ins in cluster_object.storageinstance_set.all(): - ip = ins.machine.ip - bk_cloud_id = ins.machine.bk_cloud_id - storage_ips[ip] = bk_cloud_id - ip_group_by_cloud[bk_cloud_id].append(ip) - - standardize_pipe.add_sub_pipeline(self._trans_file(ips_group=ip_group_by_cloud)) - - standardize_pipe.add_parallel_sub_pipeline( - sub_flow_list=[ - self._build_storage_sub(ips=storage_ips), - ] - ) - - logger.info(_("构建TenDBSingle集群标准化流程成功")) - standardize_pipe.run_pipeline(is_drop_random_user=True) - - def _trans_file(self, ips_group: Dict) -> SubProcess: - trans_file_pipes = [] - for bk_cloud_id, ips in ips_group.items(): - unique_ips = list(set(ips)) - - cloud_trans_file_pipe = SubBuilder(root_id=self.root_id, data=self.data) - - cloud_trans_file_pipe.add_act( - act_name=_("下发MySQL周边程序介质"), - act_component_code=TransFileComponent.code, - kwargs=asdict( - DownloadMediaKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=unique_ips, - file_list=GetFileList(db_type=DBType.MySQL).get_mysql_surrounding_apps_package(), - ) - ), - ) - cloud_trans_file_pipe.add_act( - act_name=_("下发db-actuator介质"), - act_component_code=TransFileComponent.code, - kwargs=asdict( - DownloadMediaKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=unique_ips, - file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), - ) - ), - ) - - for plugin_name in DEPENDENCIES_PLUGINS: - cloud_trans_file_pipe.add_act( - act_name=_("安装{}插件".format(plugin_name)), - act_component_code=InstallNodemanPluginServiceComponent.code, - kwargs=asdict( - InstallNodemanPluginKwargs(ips=unique_ips, plugin_name=plugin_name, bk_cloud_id=bk_cloud_id) - ), - ) - - cloud_trans_file_pipe.add_act( - act_name=_("安装backup-client工具"), - act_component_code=DownloadBackupClientComponent.code, - kwargs=asdict( - DownloadBackupClientKwargs( - bk_cloud_id=bk_cloud_id, - bk_biz_id=self.data["bk_biz_id"], - download_host_list=unique_ips, - ) - ), - ) - - trans_file_pipes.append( - cloud_trans_file_pipe.build_sub_process(sub_name=_("cloud {} 下发文件".format(bk_cloud_id))) - ) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=trans_file_pipes) - return p.build_sub_process(sub_name=_("下发文件")) - - def _build_trans_module_sub(self, clusters: List[Cluster]) -> SubProcess: - pipes = [] - for cluster in clusters: - cluster_pipe = SubBuilder( - root_id=self.root_id, data={**copy.deepcopy(self.data), "cluster_id": cluster.id} - ) - cluster_pipe.add_act( - act_name=_("模块标准化"), act_component_code=ClusterStandardizeTransModuleComponent.code, kwargs={} - ) - - pipes.append(cluster_pipe.build_sub_process(sub_name=_("{} CC 模块标准化".format(cluster.immute_domain)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("CC标准化")) - - def _build_instantiate_mysql_config_sub(self, clusters: List[Cluster]) -> SubProcess: - pipes = [] - for cluster in clusters: - cluster_pipe = SubBuilder( - root_id=self.root_id, data={**copy.deepcopy(self.data), "cluster_id": cluster.id} - ) - cluster_pipe.add_act( - act_name=_("实例化配置"), act_component_code=MySQLClusterInstantiateConfigComponent.code, kwargs={} - ) - pipes.append(cluster_pipe.build_sub_process(sub_name=_("实例化 {} 配置".format(cluster.immute_domain)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("实例化集群配置")) - - def _build_storage_sub(self, ips: Dict) -> SubProcess: - pipes = [] - for ip, bk_cloud_id in ips.items(): - single_pipe = SubBuilder(root_id=self.root_id, data=self.data) - - # 同一机器所有集群的 major version 应该是一样的 - major_version = Cluster.objects.filter(storageinstance__machine__ip=ip).first().major_version - mysql_pkg = Package.get_latest_package(version=major_version, pkg_type=MediumEnum.MySQL) - - ports = StorageInstance.objects.filter(machine__ip=ip, bk_biz_id=self.data["bk_biz_id"]).values_list( - "port", flat=True - ) - - single_pipe.add_act( - act_name=_("实例标准化"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - run_as_system_user=DBA_ROOT_USER, - cluster_type=ClusterType.TenDBSingle.value, - cluster={ - "ports": list(ports), - "mysql_pkg": {"name": mysql_pkg.name, "md5": mysql_pkg.md5}, - "version": major_version, - }, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_standardize_mysql_instance_payload.__name__, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署mysql-crond"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_crond_payload.__name__, - cluster_type=ClusterType.TenDBSingle.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署监控程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_deploy_mysql_monitor_payload.__name__, - cluster={"cluster_ids": self.data["infos"]["cluster_ids"]}, - cluster_type=ClusterType.TenDBSingle.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署备份程序"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - get_mysql_payload_func=MysqlActPayload.get_install_db_backup_payload.__name__, - cluster_type=ClusterType.TenDBSingle.value, - ) - ), - ) - - single_pipe.add_act( - act_name=_("部署DBA工具箱"), - act_component_code=ExecuteDBActuatorScriptComponent.code, - kwargs=asdict( - ExecActuatorKwargs( - bk_cloud_id=bk_cloud_id, - exec_ip=ip, - get_mysql_payload_func=MysqlActPayload.get_install_dba_toolkit_payload.__name__, - cluster_type=ClusterType.TenDBSingle.value, - ) - ), - ) - - pipes.append(single_pipe.build_sub_process(sub_name=_("{} 标准化".format(ip)))) - - p = SubBuilder(root_id=self.root_id, data=self.data) - p.add_parallel_sub_pipeline(sub_flow_list=pipes) - return p.build_sub_process(sub_name=_("存储层标准化")) diff --git a/dbm-ui/backend/flow/engine/controller/mysql.py b/dbm-ui/backend/flow/engine/controller/mysql.py index 283a5f81ea..3f60d3cf2c 100644 --- a/dbm-ui/backend/flow/engine/controller/mysql.py +++ b/dbm-ui/backend/flow/engine/controller/mysql.py @@ -16,6 +16,7 @@ from backend.flow.engine.bamboo.scene.common.download_file import DownloadFileFlow from backend.flow.engine.bamboo.scene.common.transfer_cluster_to_other_biz import TransferMySQLClusterToOtherBizFlow from backend.flow.engine.bamboo.scene.mysql.dbconsole import DbConsoleDumpSqlFlow +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.flow import MySQLStandardizeFlow from backend.flow.engine.bamboo.scene.mysql.import_sqlfile_flow import ImportSQLFlow from backend.flow.engine.bamboo.scene.mysql.mysql_authorize_rules import MySQLAuthorizeRulesFlows from backend.flow.engine.bamboo.scene.mysql.mysql_checksum import MysqlChecksumFlow @@ -30,7 +31,6 @@ from backend.flow.engine.bamboo.scene.mysql.mysql_ha_disable_flow import MySQLHADisableFlow from backend.flow.engine.bamboo.scene.mysql.mysql_ha_enable_flow import MySQLHAEnableFlow from backend.flow.engine.bamboo.scene.mysql.mysql_ha_metadata_import import TenDBHAMetadataImportFlow -from backend.flow.engine.bamboo.scene.mysql.mysql_ha_standardize_flow import MySQLHAStandardizeFlow from backend.flow.engine.bamboo.scene.mysql.mysql_ha_upgrade import ( DestroyNonStanbySlaveMySQLFlow, TendbClusterUpgradeFlow, @@ -44,7 +44,6 @@ from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_cluster_add import MySQLProxyClusterAddFlow from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_cluster_switch import MySQLProxyClusterSwitchFlow from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_upgrade import MySQLProxyLocalUpgradeFlow -from backend.flow.engine.bamboo.scene.mysql.mysql_push_peripheral_config import MySQLPushPeripheralConfigFlow from backend.flow.engine.bamboo.scene.mysql.mysql_random_password import MySQLRandomizePassword from backend.flow.engine.bamboo.scene.mysql.mysql_rename_database_flow import MySQLRenameDatabaseFlow from backend.flow.engine.bamboo.scene.mysql.mysql_restore_slave_remote_flow import MySQLRestoreSlaveRemoteFlow @@ -547,10 +546,6 @@ def mysql_single_rename_database_scene(self): ) flow.rename_database() - def mysql_ha_standardize_scene(self): - flow = MySQLHAStandardizeFlow(root_id=self.root_id, data=self.ticket_data) - flow.standardize() - def mysql_randomize_password(self): flow = MySQLRandomizePassword(root_id=self.root_id, data=self.ticket_data) flow.mysql_randomize_password() @@ -659,14 +654,6 @@ def tranfer_biz_scene(self): flow = TransferMySQLClusterToOtherBizFlow(root_id=self.root_id, data=self.ticket_data) flow.transfer_to_other_biz_flow() - def push_peripheral_config_scene(self): - """ - 下发周边配置 - """ - - flow = MySQLPushPeripheralConfigFlow(root_id=self.root_id, data=self.ticket_data) - flow.push_config() - def non_standby_slaves_upgrade_scene(self): """ 非Standby从库升级 @@ -688,6 +675,10 @@ def non_standby_slaves_destroy_scene(self): flow = DestroyNonStanbySlaveMySQLFlow(root_id=self.root_id, ticket_data=self.ticket_data) flow.destroy() + def cluster_standardize(self): + flow = MySQLStandardizeFlow(root_id=self.root_id, data=self.ticket_data) + flow.doit() + def mysql_machine_clear_scene(self): """ 清理mysql机器 diff --git a/dbm-ui/backend/flow/engine/controller/spider.py b/dbm-ui/backend/flow/engine/controller/spider.py index 5a1c4868aa..522872cb12 100644 --- a/dbm-ui/backend/flow/engine/controller/spider.py +++ b/dbm-ui/backend/flow/engine/controller/spider.py @@ -28,7 +28,6 @@ from backend.flow.engine.bamboo.scene.spider.spider_cluster_flashback import TenDBClusterFlashbackFlow from backend.flow.engine.bamboo.scene.spider.spider_cluster_metadata_import_flow import SpiderClusterMetadataImportFlow from backend.flow.engine.bamboo.scene.spider.spider_cluster_rollback_flow import TenDBRollBackDataFlow -from backend.flow.engine.bamboo.scene.spider.spider_cluster_standardize_flow import SpiderClusterStandardizeFlow from backend.flow.engine.bamboo.scene.spider.spider_cluster_truncate_database import SpiderTruncateDatabaseFlow from backend.flow.engine.bamboo.scene.spider.spider_partition import SpiderPartitionFlow from backend.flow.engine.bamboo.scene.spider.spider_partition_cron import SpiderPartitionCronFlow @@ -234,9 +233,9 @@ def append_deploy_ctl_scene(self): flow = AppendDeployCTLFlow(root_id=self.root_id, data=self.ticket_data) flow.run() - def tendbcluster_standardize_scene(self): - flow = SpiderClusterStandardizeFlow(root_id=self.root_id, data=self.ticket_data) - flow.standardize() + # def tendbcluster_standardize_scene(self): + # flow = SpiderClusterStandardizeFlow(root_id=self.root_id, data=self.ticket_data) + # flow.standardize() def metadata_import_scene(self): flow = SpiderClusterMetadataImportFlow(root_id=self.root_id, data=self.ticket_data) diff --git a/dbm-ui/backend/flow/engine/controller/tendbsingle.py b/dbm-ui/backend/flow/engine/controller/tendbsingle.py index 49d256a0b4..149456ea94 100644 --- a/dbm-ui/backend/flow/engine/controller/tendbsingle.py +++ b/dbm-ui/backend/flow/engine/controller/tendbsingle.py @@ -9,7 +9,6 @@ specific language governing permissions and limitations under the License. """ from backend.flow.engine.bamboo.scene.tendbsingle.metadata_import import TenDBSingleMetadataImportFlow -from backend.flow.engine.bamboo.scene.tendbsingle.standardize import TenDBSingleStandardizeFlow from backend.flow.engine.controller.base import BaseController @@ -18,6 +17,6 @@ def metadata_import_scene(self): flow = TenDBSingleMetadataImportFlow(root_id=self.root_id, data=self.ticket_data) flow.import_meta() - def standardize_scene(self): - flow = TenDBSingleStandardizeFlow(root_id=self.root_id, data=self.ticket_data) - flow.standardize() + # def standardize_scene(self): + # flow = TenDBSingleStandardizeFlow(root_id=self.root_id, data=self.ticket_data) + # flow.standardize() diff --git a/dbm-ui/backend/flow/plugins/components/collections/common/create_random_job_user.py b/dbm-ui/backend/flow/plugins/components/collections/common/create_random_job_user.py index 3b90350021..ae64910ce4 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/common/create_random_job_user.py +++ b/dbm-ui/backend/flow/plugins/components/collections/common/create_random_job_user.py @@ -48,7 +48,6 @@ def __add_priv(self, params): def _execute(self, data, parent_data, callback=None) -> bool: kwargs = data.get_one_of_inputs("kwargs") global_data = data.get_one_of_inputs("global_data") - encrypt_switch_pwd = global_data["job_root_id"] common_param = { "bk_cloud_id": -1, diff --git a/dbm-ui/backend/flow/plugins/components/collections/mysql/cluster_standardize_trans_module.py b/dbm-ui/backend/flow/plugins/components/collections/mysql/cluster_standardize_trans_module.py index 9cc4e05348..0a7f7f4076 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/mysql/cluster_standardize_trans_module.py +++ b/dbm-ui/backend/flow/plugins/components/collections/mysql/cluster_standardize_trans_module.py @@ -22,7 +22,11 @@ def _execute(self, data, parent_data) -> bool: kwargs = data.get_one_of_inputs("kwargs") global_data = data.get_one_of_inputs("global_data") - cluster_id = global_data["cluster_id"] + if "cluster_id" in global_data: + cluster_id = global_data["cluster_id"] + else: + cluster_id = kwargs["cluster_id"] + cluster_obj = Cluster.objects.get(pk=cluster_id) MysqlCCTopoOperator(cluster_obj).transfer_instances_to_cluster_module( diff --git a/dbm-ui/backend/flow/plugins/components/collections/mysql/generate_mysql_cluster_standardize_flow.py b/dbm-ui/backend/flow/plugins/components/collections/mysql/generate_mysql_cluster_standardize_flow.py new file mode 100644 index 0000000000..f48dc49b55 --- /dev/null +++ b/dbm-ui/backend/flow/plugins/components/collections/mysql/generate_mysql_cluster_standardize_flow.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from django.utils.translation import gettext as _ +from pipeline.component_framework.component import Component + +from backend.db_meta.models import Cluster +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import ALLDEPARTS +from backend.flow.plugins.components.collections.common.base_service import BaseService +from backend.ticket.constants import TicketType +from backend.ticket.models import Ticket + + +class GenerateMySQLClusterStandardizeFlowService(BaseService): + def _execute(self, data, parent_data) -> bool: + global_data = data.get_one_of_inputs("global_data") + kwargs = data.get_one_of_inputs("kwargs") + + getattr(self, kwargs.get("trans_func"))(global_data, kwargs) + return True + + @staticmethod + def generate_from_immute_domains(global_data, kwargs): + immute_domains = kwargs.get("immute_domains") + cluster_objects = Cluster.objects.filter(immute_domain__in=immute_domains) + + ticket = Ticket.objects.get(id=global_data["uid"]) + bk_biz_id = global_data["bk_biz_id"] + + Ticket.create_ticket( + ticket_type=TicketType.MYSQL_CLUSTER_STANDARDIZE, + creator=global_data["created_by"], + bk_biz_id=bk_biz_id, + remark=_("集群标准化, 关联单据: {}".format(ticket.url)), + details={ + "bk_biz_id": bk_biz_id, + "cluster_type": cluster_objects.first().cluster_type, + "cluster_ids": list(cluster_objects.values_list("id", flat=True)), + "departs": kwargs.get("departs", ALLDEPARTS), + "with_deploy_binary": kwargs.get("with_deploy_binary", True), + "with_push_config": kwargs.get("with_push_config", True), + "with_collect_sysinfo": kwargs.get("with_collect_sysinfo", True), + }, + ) + + +class GenerateMySQLClusterStandardizeFlowComponent(Component): + name = __name__ + code = "generate_mysql_cluster_standardize_flow" + bound_service = GenerateMySQLClusterStandardizeFlowService diff --git a/dbm-ui/backend/flow/urls.py b/dbm-ui/backend/flow/urls.py index 6b87278e8c..77f6875cb2 100644 --- a/dbm-ui/backend/flow/urls.py +++ b/dbm-ui/backend/flow/urls.py @@ -265,7 +265,6 @@ from backend.flow.views.tendb_cluster_remote_slave_recover import RemoteSlaveRecoverSceneApiView from backend.flow.views.tendb_cluster_remote_switch import RemoteSwitchSceneApiView from backend.flow.views.tendb_cluster_rollback_data import TendbClusterRollbackDataSceneApiView -from backend.flow.views.tendb_ha_standardize import TenDBHAStandardizeView from backend.flow.views.vm_apply import InstallVmSceneApiView from backend.flow.views.vm_destroy import DestroyVmSceneApiView from backend.flow.views.vm_disable import DisableVmSceneApiView @@ -511,7 +510,7 @@ url("^scene/switch_tbinlogumper$", SwitchTBinlogDumperSceneApiView.as_view()), url("^scene/enable_tbinlogumper$", EnableTBinlogDumperSceneApiView.as_view()), url("^scene/disable_tbinlogumper$", DisableTBinlogDumperSceneApiView.as_view()), - url("^scene/tendbha_standardize$", TenDBHAStandardizeView.as_view()), + # url("^scene/tendbha_standardize$", TenDBHAStandardizeView.as_view()), url("^scene/mysql_open_area$", MysqlOpenAreaSceneApiView.as_view()), # migrate url("^scene/append_deploy_ctl$", AppendDeployCTLView.as_view()), diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py index f71ac29cb5..097e46b3ef 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py @@ -25,7 +25,7 @@ from backend.core.consts import BK_PKG_INSTALL_PATH from backend.core.encrypt.constants import AsymmetricCipherConfigType from backend.core.encrypt.handlers import AsymmetricHandler -from backend.db_meta.enums import AccessLayer, InstanceInnerRole, MachineType +from backend.db_meta.enums import InstanceInnerRole, MachineType from backend.db_meta.exceptions import DBMetaException from backend.db_meta.models import Cluster, Machine, ProxyInstance, StorageInstance, StorageInstanceTuple from backend.db_package.models import Package @@ -47,6 +47,7 @@ MysqlVersionToDBBackupForMap, ) from backend.flow.engine.bamboo.scene.common.get_real_version import get_mysql_real_version, get_spider_real_version +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import DeployPeripheralToolsDepart from backend.flow.engine.bamboo.scene.spider.common.exceptions import TendbGetBackupInfoFailedException from backend.flow.utils.base.bkrepo import get_bk_repo_url from backend.flow.utils.base.payload_handler import PayloadHandler @@ -105,7 +106,7 @@ def __get_version_and_charset(self, db_module_id) -> Any: )["content"] return data["charset"], data["db_version"] - def __get_mysql_rotatebinlog_config(self) -> dict: + def __get_mysql_rotatebinlog_config(self, cluster_type) -> dict: """ 远程获取rotate_binlog配置 """ @@ -116,7 +117,7 @@ def __get_mysql_rotatebinlog_config(self) -> dict: "level_value": str(self.db_module_id), "conf_file": "binlog_rotate.yaml", "conf_type": "backup", - "namespace": self.cluster_type, + "namespace": cluster_type, "format": FormatType.MAP_LEVEL, } ) @@ -748,85 +749,6 @@ def get_install_db_backup_payload(self, **kwargs) -> dict: }, } - def push_dbbackup_config_payload(self, **kwargs) -> dict: - ini = get_backup_ini_config( - bk_biz_id=self.ticket_data["bk_biz_id"], - db_module_id=self.cluster["db_module_id"], - cluster_type=self.cluster["cluster_type"], - ) - - port_domain_map = {} - cluster_id_map = {} - shard_port_map = {} # port as key - options_map = {} - - if self.cluster["machine_type"] == MachineType.SPIDER.value: - ins_list = ProxyInstance.objects.filter(machine__ip=kwargs["ip"], port__in=self.cluster["ports"]) - role = ins_list[0].tendbclusterspiderext.spider_role - elif self.cluster["machine_type"] in [ - MachineType.REMOTE.value, - MachineType.BACKEND.value, - MachineType.SINGLE.value, - ]: - ins_list = StorageInstance.objects.filter(machine__ip=kwargs["ip"], port__in=self.cluster["ports"]) - role = ins_list[0].instance_inner_role - else: - raise DBMetaException(message=_("不支持的机器类型: {}".format(self.cluster["machine_type"]))) - - if self.cluster["machine_type"] == MachineType.REMOTE.value: - for ins in ins_list: - if ins.instance_inner_role == InstanceInnerRole.MASTER.value: - tp = StorageInstanceTuple.objects.filter(ejector=ins).first() - else: - tp = StorageInstanceTuple.objects.get(receiver=ins) - shard_port_map[ins.port] = tp.tendbclusterstorageset.shard_id - - for instance in ins_list: - port_domain_map[instance.port] = self.cluster["immute_domain"] - cluster_id_map[instance.port] = self.cluster["cluster_id"] - - shard_port_map[instance.port] = shard_port_map.get(instance.port, 0) - options_map[instance.port] = get_backup_options_config( - bk_biz_id=self.ticket_data["bk_biz_id"], - db_module_id=self.cluster["db_module_id"], - cluster_type=self.cluster["cluster_type"], - cluster_domain=self.cluster["immute_domain"], - ) - - db_backup_pkg_type = self.cluster.get("db_backup_pkg_type", MediumEnum.DbBackup) - if self.cluster["machine_type"] != MachineType.SPIDER.value: - db_version = ins_list[0].cluster.get().major_version - db_backup_pkg_type = MysqlVersionToDBBackupForMap[db_version] - - db_backup_pkg = Package.get_latest_package( - version=MediumEnum.Latest, - pkg_type=db_backup_pkg_type, - ) - - return { - "db_type": DBActuatorTypeEnum.MySQL.value, - "action": DBActuatorActionEnum.PushNewDbBackupConfig.value, - "payload": { - "general": {"runtime_account": self.account}, - "extend": { - "pkg": db_backup_pkg.name, - "pkg_md5": db_backup_pkg.md5, - "host": kwargs["ip"], - "ports": self.cluster["ports"], - "bk_cloud_id": int(self.bk_cloud_id), - "bk_biz_id": int(self.ticket_data["bk_biz_id"]), - "role": role, - "configs": ini, - "options": options_map, - "cluster_address": port_domain_map, - "cluster_id": cluster_id_map, - "cluster_type": self.cluster["cluster_type"], - "exec_user": self.ticket_data["created_by"], - "shard_value": shard_port_map, - }, - }, - } - def get_import_sqlfile_payload(self, **kwargs) -> dict: """ return import sqlfile payload @@ -1329,47 +1251,6 @@ def get_install_mysql_checksum_payload(self, **kwargs) -> dict: }, } - def push_mysql_checksum_config_payload(self, **kwargs) -> dict: - """ - ToDo - 和监控一样, 安装校验的 get_install_mysql_checksum_payload - 现在也是机器级别 - """ - checksum_pkg = Package.get_latest_package(version=MediumEnum.Latest, pkg_type=MediumEnum.MySQLChecksum) - - instances_info = [] - for ins_obj in StorageInstance.objects.filter(machine__ip=kwargs["ip"], port__in=self.cluster["ports"]): - instances_info.append( - { - "bk_biz_id": self.ticket_data["bk_biz_id"], - "ip": kwargs["ip"], - "port": ins_obj.port, - "role": ins_obj.instance_inner_role, - "cluster_id": self.cluster["cluster_id"], - "immute_domain": self.cluster["immute_domain"], - "db_module_id": self.cluster["db_module_id"], - "schedule": "0 5 2 * * 1-5", - } - ) - - return { - "db_type": DBActuatorTypeEnum.MySQL.value, - "action": DBActuatorActionEnum.PushChecksumConfig.value, - "payload": { - "general": {"runtime_account": self.account}, - "extend": { - "pkg": checksum_pkg.name, - "pkg_md5": checksum_pkg.md5, - "system_dbs": SYSTEM_DBS, - "stage_db_header": STAGE_DB_HEADER, - "rollback_db_tail": ROLLBACK_DB_TAIL, - "instances_info": instances_info, - "exec_user": self.ticket_data["created_by"], - "api_url": "http://127.0.0.1:9999", # 长时间可以写死 - }, - }, - } - def get_mysql_edit_config_payload(self, **kwargs) -> dict: """ mysql 配置修改 @@ -1416,6 +1297,7 @@ def get_install_mysql_rotatebinlog_payload(self, **kwargs): # 拼接主机需要安装实例备份配置关系 ins_list = StorageInstance.objects.filter(machine__ip=kwargs["ip"]) + cluster_type = "" for instance in ins_list: ins = { "host": kwargs["ip"], @@ -1423,6 +1305,7 @@ def get_install_mysql_rotatebinlog_payload(self, **kwargs): "tags": {"bk_biz_id": int(self.ticket_data["bk_biz_id"])}, } cluster = instance.cluster.get() + cluster_type = cluster.cluster_type ins["tags"]["cluster_domain"] = cluster.immute_domain ins["tags"]["cluster_id"] = cluster.id ins["tags"]["db_role"] = instance.instance_inner_role @@ -1436,17 +1319,12 @@ def get_install_mysql_rotatebinlog_payload(self, **kwargs): "extend": { "pkg": mysql_rotatebinlog.name, "pkg_md5": mysql_rotatebinlog.md5, - "configs": self.__get_mysql_rotatebinlog_config(), + "configs": self.__get_mysql_rotatebinlog_config(cluster_type=cluster_type), "instances": instances, }, }, } - def push_mysql_rotatebinlog_config_payload(self, **kwargs) -> dict: - res = self.get_install_mysql_rotatebinlog_payload(**kwargs) - res["action"] = DBActuatorActionEnum.PushMySQLRotatebinlogConfig.value - return res - def get_install_dba_toolkit_payload(self, **kwargs): """ 获取安装实例dba_toolkit工具的参数 @@ -1596,11 +1474,6 @@ def get_deploy_mysql_crond_payload(self, **kwargs) -> dict: }, } - def push_mysql_crond_config_payload(self, **kwargs) -> dict: - res = self.get_deploy_mysql_crond_payload(**kwargs) - res["action"] = DBActuatorActionEnum.PushMySQLCrondConfig.value - return res - def get_deploy_mysql_monitor_payload(self, **kwargs) -> dict: """ 部署mysql/proxy/spider事件监控程序 @@ -1708,91 +1581,6 @@ def get_deploy_mysql_monitor_payload(self, **kwargs) -> dict: }, } - def push_mysql_monitor_config_payload(self, **kwargs) -> dict: - """ - ToDo - 上面的get_deploy_mysql_monitor_payload有点问题 - 是基于机器级别生成配置的 - 在迁移完成后, 实际维护中应该是基于集群级别 - 所以 push 单独实现 - 以后应该都替换成这个函数 - """ - mysql_monitor_pkg = Package.get_latest_package(version=MediumEnum.Latest, pkg_type=MediumEnum.MySQLMonitor) - - instances_info = [] - - config_items = DBConfigApi.query_conf_item( - { - "bk_biz_id": "{}".format(self.ticket_data["bk_biz_id"]), - "level_name": "cluster", - "level_value": "act3", - "conf_file": "items-config.yaml", - "conf_type": "mysql_monitor", - "namespace": "tendbha", - "level_info": {"module": "act"}, - "format": "map", - } - ) - logger.info("config_items: {}".format(config_items)) - - # instance_info = { - # "bk_biz_id": self.ticket_data["bk_biz_id"], - # "ip": kwargs["ip"], - # "cluster_id": self.cluster["cluster_id"], - # "immute_domain": self.cluster["immute_domain"], - # "items_config": config_items["content"], - # } - if self.cluster["access_layer"] == AccessLayer.PROXY.value: - for ins_obj in ProxyInstance.objects.filter(machine__ip=kwargs["ip"], port__in=self.cluster["ports"]): - instance_info = { - "bk_biz_id": self.ticket_data["bk_biz_id"], - "ip": kwargs["ip"], - "cluster_id": self.cluster["cluster_id"], - "immute_domain": self.cluster["immute_domain"], - "items_config": config_items["content"], - "port": ins_obj.port, - "bk_instance_id": ins_obj.bk_instance_id, - "db_module_id": ins_obj.db_module_id, - } - - if self.cluster["machine_type"] == MachineType.SPIDER.value: - instance_info["role"] = ins_obj.tendbclusterspiderext.spider_role - - instances_info.append(instance_info) - else: - for ins_obj in StorageInstance.objects.filter(machine__ip=kwargs["ip"], port__in=self.cluster["ports"]): - instance_info = { - "bk_biz_id": self.ticket_data["bk_biz_id"], - "ip": kwargs["ip"], - "cluster_id": self.cluster["cluster_id"], - "immute_domain": self.cluster["immute_domain"], - "items_config": config_items["content"], - "port": ins_obj.port, - "bk_instance_id": ins_obj.bk_instance_id, - "db_module_id": ins_obj.db_module_id, - "role": ins_obj.instance_inner_role, - } - - instances_info.append(instance_info) - - return { - "db_type": DBActuatorTypeEnum.MySQL.value, - "action": DBActuatorActionEnum.PushMySQLMonitorConfig.value, - "payload": { - "general": {"runtime_account": {**self.account, **self.proxy_account}}, - "extend": { - "pkg": mysql_monitor_pkg.name, - "pkg_md5": mysql_monitor_pkg.md5, - "system_dbs": SYSTEM_DBS, - "exec_user": self.ticket_data["created_by"], - "api_url": "http://127.0.0.1:9999", - "machine_type": self.cluster["machine_type"], - "bk_cloud_id": int(self.bk_cloud_id), - "instances_info": instances_info, - }, - }, - } - def get_grant_repl_for_ctl_payload(self, **kwargs) -> dict: """ 针对spider中控集群部署场景(一主多从,基于GTID) @@ -2467,38 +2255,6 @@ def get_mysql_upgrade_payload(self, **kwargs) -> dict: }, } - def get_standardize_mysql_instance_payload(self, **kwargs): - # 这个包其实没有用, 所以只要传包名, 不需要下发 - # 是因为复用了 mysql install actor 需要包名做条件分支 - # self.mysql_pkg = Package.get_latest_package(version=db_version, pkg_type=MediumEnum.MySQL) - drs_account, dbha_account = self.get_super_account() - return { - "db_type": DBActuatorTypeEnum.MySQL.value, - "action": DBActuatorActionEnum.StandardizeMySQLInstance.value, - "payload": { - "general": {"runtime_account": self.account}, - "extend": { - "pkg": self.cluster["mysql_pkg"]["name"], - "pkg_md5": self.cluster["mysql_pkg"]["md5"], - "ip": kwargs["ip"], - "ports": self.cluster["ports"], - "mysql_version": self.cluster["version"], - "super_account": drs_account, - "dbha_account": dbha_account, - "webconsolers_account": self.get_webconsolers_account(), - "partition_yw_account": self.get_partition_yw_account(), - }, - }, - } - - @staticmethod - def get_standardize_tendbha_proxy_payload(**kwargs): - return { - "db_type": DBActuatorTypeEnum.MySQL.value, - "action": DBActuatorActionEnum.StandardizeTenDBHAProxy.value, - "payload": {"general": {}, "extend": {}}, # {"runtime_account": self.account}, - } - def get_data_migrate_dump_payload(self, **kwargs): """ 数据迁移导出库表结构与数据 @@ -2823,3 +2579,336 @@ def mysql_change_server_id(self, **kwargs): }, } return payload + + def push_exporter_cnf(self, **kwargs): + payload = { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.PushExporterCnf.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "ip": kwargs["ip"], + "port_list": self.cluster["port_list"], + "machine_type": self.cluster["machine_type"], + }, + }, + } + return payload + + def prepare_peripheraltools_binary(self, **kwargs): + """ + 不要随意调用, 输入有依赖处理 + """ + departs = self.cluster["departs"] + machine_type = self.cluster["machine_type"] + ip = kwargs["ip"] + + depart_pkgs = {} + # 调用方已经决定了组件包含备份时, machine_type 肯定不是 proxy + + if DeployPeripheralToolsDepart.MySQLDBBackup in departs: + departs.remove(DeployPeripheralToolsDepart.MySQLDBBackup) + if machine_type == MachineType.SPIDER: + dbbackup_pkg_type = MediumEnum.DbBackup + else: + db_version = Cluster.objects.filter(storageinstance__machine__ip=ip).first().major_version + dbbackup_pkg_type = MysqlVersionToDBBackupForMap[db_version] + + dbbackup_pkg = Package.get_latest_package(version=MediumEnum.Latest, pkg_type=dbbackup_pkg_type) + depart_pkgs[DeployPeripheralToolsDepart.MySQLDBBackup] = { + "pkg": dbbackup_pkg.name, + "pkg_md5": dbbackup_pkg.md5, + } + + for depart in departs: + pkg = Package.get_latest_package(version=MediumEnum.Latest, pkg_type=depart) + depart_pkgs[depart] = { + "pkg": pkg.name, + "pkg_md5": pkg.md5, + } + + payload = { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.PreparePeripheraltoolsBinary.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + # "ip": kwargs["ip"], + "departs": depart_pkgs, + }, + }, + } + return payload + + def standardize_proxy(self, **kwargs) -> dict: + _, dbha_account = self.get_super_account() + return { + "db_type": DBActuatorTypeEnum.Proxy.value, + "action": DBActuatorActionEnum.StandardizeTenDBHAProxy.value, + "payload": { + "general": { + "runtime_account": self.proxy_account, + }, + "extend": { + "dbha_account": dbha_account["user"], + "port_list": self.cluster["port_list"], + "ip": kwargs["ip"], + }, + }, + } + + def standardize_mysql(self, **kwargs) -> dict: + ip = kwargs["ip"] + machine_type = self.cluster["machine_type"] + + if machine_type == MachineType.SPIDER: + major_version = Cluster.objects.filter(proxyinstance__machine__ip=ip).first().major_version + else: + major_version = Cluster.objects.filter(storageinstance__machine__ip=ip).first().major_version + + # 这个包其实没有用, 所以只要传包名, 不需要下发 + # 是因为复用了 mysql install actor 需要包名做条件分支 + pkg = Package.get_latest_package(version=major_version, pkg_type=MediumEnum.MySQL) + drs_account, dbha_account = self.get_super_account() + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.StandardizeMySQLInstance.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "pkg": pkg.name, + "pkg_md5": pkg.md5, + "ip": kwargs["ip"], + "ports": self.cluster["port_list"], + "mysql_version": major_version, + "super_account": drs_account, + "dbha_account": dbha_account, + "webconsolers_account": self.get_webconsolers_account(), + "partition_yw_account": self.get_partition_yw_account(), + }, + }, + } + + def push_mysql_crond_config(self, **kwargs) -> dict: + """ + dup: get_deploy_mysql_crond_payload + """ + mysql_crond_pkg = Package.get_latest_package(version=MediumEnum.Latest, pkg_type=MediumEnum.MySQLCrond) + + bkm_dbm_report = SystemSettings.get_setting_value(key="BKM_DBM_REPORT") + event_data_id = bkm_dbm_report["event"]["data_id"] + event_data_token = bkm_dbm_report["event"]["token"] + metrics_data_id = bkm_dbm_report["metric"]["data_id"] + metrics_data_token = bkm_dbm_report["metric"]["token"] + + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.PushMySQLCrondConfig.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "pkg": mysql_crond_pkg.name, + "pkg_md5": mysql_crond_pkg.md5, + "ip": kwargs["ip"], + "bk_cloud_id": int(self.bk_cloud_id), + "event_data_id": int(event_data_id), + "event_data_token": event_data_token, + "metrics_data_id": int(metrics_data_id), + "metrics_data_token": metrics_data_token, + "beat_path": env.MYSQL_CROND_BEAT_PATH, + "agent_address": env.MYSQL_CROND_AGENT_ADDRESS, + "bk_biz_id": int(self.cluster["bk_biz_id"]), + "nginx_addrs": list_nginx_addrs(bk_cloud_id=self.bk_cloud_id), + }, + }, + } + + def push_mysql_monitor_config(self, **kwargs) -> dict: + """ + dup: get_deploy_mysql_monitor_payload + """ + ip = kwargs["ip"] + bk_biz_id = self.cluster["bk_biz_id"] + immute_domain = self.cluster["immute_domain"] + port_list = self.cluster["port_list"] + machine_type = self.cluster["machine_type"] + db_module_id = self.cluster["db_module_id"] + cluster_id = self.cluster["cluster_id"] + + port_bk_instance_list = [] + if machine_type in [MachineType.PROXY, MachineType.SPIDER]: + ins_list = ProxyInstance.objects.filter(machine__ip=ip, port__in=port_list) + role = "" # ToDo spider role + else: + ins_list = StorageInstance.objects.filter(machine__ip=ip, port__in=port_list) + role = ins_list[0].instance_inner_role + + for ins in ins_list: + port_bk_instance_list.append( + { + "port": ins.port, + "bk_instance_id": ins.bk_instance_id, + } + ) + + cluster_items_config = DBConfigApi.query_conf_item( + { + "bk_biz_id": f"{bk_biz_id}", + "level_name": "cluster", + "level_value": immute_domain, # 集群域名 + "conf_file": "items-config.yaml", + "conf_type": "mysql_monitor", + "namespace": "tendbha", # 现在TenDBSingle, TenDBHA, TenDBCluster 监控配置公用的这个 + "level_info": {"module": f"{db_module_id}"}, # module id + "format": "map", + } + ) + + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.PushMySQLMonitorConfig.value, + "payload": { + "general": {"runtime_account": {**self.account, **self.proxy_account}}, + "extend": { + "system_dbs": SYSTEM_DBS, + "exec_user": self.ticket_data["created_by"], + "api_url": "http://127.0.0.1:9999", + "machine_type": machine_type, + "bk_cloud_id": int(self.bk_cloud_id), + "bk_biz_id": int(bk_biz_id), + "port_bk_instance_list": port_bk_instance_list, + "ip": ip, + "immute_domain": immute_domain, + "db_module_id": db_module_id, + "role": role, + "cluster_id": cluster_id, + "items_config": cluster_items_config["content"], + }, + }, + } + + def push_mysql_dbbackup_config(self, **kwargs) -> dict: + """ + dup: get_install_db_backup_payload + """ + ip = kwargs["ip"] + bk_biz_id = self.cluster["bk_biz_id"] + immute_domain = self.cluster["immute_domain"] + port_list = self.cluster["port_list"] + machine_type = self.cluster["machine_type"] + cluster_type = self.cluster["cluster_type"] + db_module_id = self.cluster["db_module_id"] + cluster_id = self.cluster["cluster_id"] + + ini = get_backup_ini_config( + bk_biz_id=bk_biz_id, + db_module_id=db_module_id, + cluster_type=cluster_type, + ) + + backup_options = get_backup_options_config( + bk_biz_id=bk_biz_id, db_module_id=db_module_id, cluster_type=cluster_type, cluster_domain=immute_domain + ) + + # 获取实例列表和决定角色信息 + if machine_type == MachineType.SPIDER.value: + ins_list = ProxyInstance.objects.filter(machine__ip=ip, port__in=port_list) + role = ins_list[0].tendbclusterspiderext.spider_role + elif machine_type in [MachineType.REMOTE.value, MachineType.BACKEND.value, MachineType.SINGLE.value]: + ins_list = StorageInstance.objects.filter(machine__ip=ip, port__in=port_list) + role = ins_list[0].instance_inner_role + else: + raise DBMetaException(message=_("不支持的机器类型: {}".format(machine_type))) + + port_shard_map = {} # port as key, TenDBCluster 端口-分片信息 + for ins in ins_list: + port_shard_map[ins.port] = 0 # 非 TenDBCluster 默认分片 0 + if machine_type == MachineType.REMOTE: + if ins.instance_inner_role == InstanceInnerRole.MASTER: + tp = StorageInstanceTuple.objects.filter(ejector=ins).first() + else: + tp = StorageInstanceTuple.objects.get(receiver=ins) + port_shard_map[ins.port] = tp.tendbclusterstorageset.shard_id + + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.PushNewDbBackupConfig.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "configs": ini, + "options": backup_options, + "host": ip, + "ports": port_list, + "role": role, + "cluster_type": cluster_type, + "bk_biz_id": int(bk_biz_id), + "bk_cloud_id": int(self.bk_cloud_id), + "immute_domain": immute_domain, + "cluster_id": cluster_id, + "shard_value": port_shard_map, + "exec_user": self.ticket_data["created_by"], + }, + }, + } + + def push_mysql_rotatebinlog_config(self, **kwargs) -> dict: + """ + dup: get_install_mysql_rotatebinlog_payload + """ + ip = kwargs["ip"] + port_list = self.cluster["port_list"] + bk_biz_id = self.cluster["bk_biz_id"] + immute_domain = self.cluster["immute_domain"] + ins = StorageInstance.objects.filter(machine__ip=ip, port__in=port_list).first() + + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.PushMySQLRotatebinlogConfig.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "configs": self.__get_mysql_rotatebinlog_config(self.cluster["cluster_type"]), + "ip": kwargs["ip"], + "port_list": self.cluster["port_list"], + "role": ins.instance_inner_role, + "bk_biz_id": int(bk_biz_id), + "cluster_domain": immute_domain, + "cluster_id": ins.cluster.first().pk, + "exec_user": self.ticket_data["created_by"], + }, + }, + } + + def push_mysql_table_checksum_config(self, **kwargs) -> dict: + """ + dup: get_install_mysql_checksum_payload + """ + bk_biz_id = self.cluster["bk_biz_id"] + ip = kwargs["ip"] + port_list = self.cluster["port_list"] + immute_domain = self.cluster["immute_domain"] + ins = StorageInstance.objects.filter(machine__ip=ip, port__in=port_list).first() + + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.PushChecksumConfig.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "bk_biz_id": bk_biz_id, + "ip": ip, + "port_list": port_list, + "role": ins.instance_inner_role, + "cluster_id": ins.cluster.first().pk, + "immute_domain": immute_domain, + "db_module_id": ins.cluster.first().db_module_id, + "schedule": "0 5 2 * * 1-5", + "system_dbs": SYSTEM_DBS, + "stage_db_header": STAGE_DB_HEADER, + "rollback_db_tail": ROLLBACK_DB_TAIL, + "exec_user": self.ticket_data["created_by"], + "api_url": "http://127.0.0.1:9999", # 长时间可以写死 + }, + }, + } diff --git a/dbm-ui/backend/flow/views/spider_cluster_standardize.py b/dbm-ui/backend/flow/views/spider_cluster_standardize.py deleted file mode 100644 index 51d9335bfe..0000000000 --- a/dbm-ui/backend/flow/views/spider_cluster_standardize.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" -import logging - -from django.utils.translation import ugettext as _ -from rest_framework.response import Response - -from backend.flow.engine.controller.spider import SpiderController -from backend.flow.views.base import FlowTestView -from backend.utils.basic import generate_root_id - -logger = logging.getLogger("root") - - -class TenDBClusterStandardizeView(FlowTestView): - """ - api: /apis/v1/flow/scene/tendbcluster_standardize - """ - - @staticmethod - def post(request): - logger.info(_("开始TenDBCluster标准化")) - - root_id = generate_root_id() - logger.info("define root_id: {}".format(root_id)) - - c = SpiderController(root_id=root_id, ticket_data=request.data) - c.tendbcluster_standardize_scene() - - return Response({"root_id": root_id}) diff --git a/dbm-ui/backend/flow/views/tendb_ha_standardize.py b/dbm-ui/backend/flow/views/tendb_ha_standardize.py deleted file mode 100644 index 655a672b65..0000000000 --- a/dbm-ui/backend/flow/views/tendb_ha_standardize.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" -import logging - -from django.utils.translation import ugettext as _ -from rest_framework.response import Response - -from backend.flow.engine.controller.mysql import MySQLController -from backend.flow.views.base import FlowTestView -from backend.utils.basic import generate_root_id - -logger = logging.getLogger("root") - - -class TenDBHAStandardizeView(FlowTestView): - """ - api: /apis/v1/flow/scene/tendbha_standardize - """ - - @staticmethod - def post(request): - logger.info(_("开始TenDBHA标准化")) - - root_id = generate_root_id() - logger.info("define root_id: {}".format(root_id)) - - c = MySQLController(root_id=root_id, ticket_data=request.data) - c.mysql_ha_standardize_scene() - - return Response({"root_id": root_id}) diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_cluster_standardize.py b/dbm-ui/backend/ticket/builders/mysql/mysql_cluster_standardize.py new file mode 100644 index 0000000000..5a64fabed6 --- /dev/null +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_cluster_standardize.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from django.utils.translation import ugettext_lazy as _ +from rest_framework import serializers + +from backend.configuration.constants import DBType +from backend.db_meta.enums import ClusterType +from backend.flow.engine.bamboo.scene.mysql.deploy_peripheraltools.departs import DeployPeripheralToolsDepart +from backend.flow.engine.controller.mysql import MySQLController +from backend.ticket import builders +from backend.ticket.builders import TicketFlowBuilder +from backend.ticket.builders.mysql.base import MySQLBaseOperateDetailSerializer +from backend.ticket.constants import FlowRetryType, TicketType + + +class MySQLClusterStandardizeDetailSerializer(MySQLBaseOperateDetailSerializer): + bk_biz_id = serializers.IntegerField(help_text=_("业务ID")) + cluster_type = serializers.ChoiceField(choices=ClusterType.get_choices()) + cluster_ids = serializers.ListField(child=serializers.IntegerField()) + departs = serializers.ListField(child=serializers.ChoiceField(choices=DeployPeripheralToolsDepart.get_choices())) + with_deploy_binary = serializers.BooleanField() + with_deploy_config = serializers.BooleanField() + with_collect_sysinfo = serializers.BooleanField() + + +class MySQLClusterStandardizeFlowParamBuilder(builders.FlowParamBuilder): + controller = MySQLController.cluster_standardize + + +@builders.BuilderFactory.register(TicketType.MYSQL_CLUSTER_STANDARDIZE) +class MySQLClusterStandardizeFlowBuilder(TicketFlowBuilder): + default_need_itsm = False + default_need_manual_confirm = False + serializer = MySQLClusterStandardizeDetailSerializer + inner_flow_builder = MySQLClusterStandardizeFlowParamBuilder + inner_flow_name = _("MySQL集群标准化") + retry_type = FlowRetryType.MANUAL_RETRY + group = DBType.MySQL diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_ha_standardize.py b/dbm-ui/backend/ticket/builders/mysql/mysql_ha_standardize.py deleted file mode 100644 index 54479d298a..0000000000 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_ha_standardize.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" - - -from django.utils.translation import ugettext_lazy as _ -from rest_framework import serializers - -from backend.db_meta.enums import ClusterType, InstanceInnerRole -from backend.db_meta.models import AppCache, Cluster -from backend.flow.engine.controller.mysql import MySQLController -from backend.ticket import builders -from backend.ticket.builders.mysql.base import BaseMySQLHATicketFlowBuilder, MySQLBaseOperateDetailSerializer -from backend.ticket.constants import FlowRetryType, TicketType - - -class TenDBHAStandardizeDetailSerializer(MySQLBaseOperateDetailSerializer): - class HAStandardizeDetailSerializer(serializers.Serializer): - cluster_ids = serializers.ListField(help_text=_("集群ID列表")) - - bk_biz_id = serializers.IntegerField(help_text=_("业务ID")) - infos = HAStandardizeDetailSerializer(help_text=_("标准化信息")) - - def validate(self, attrs): - self.__validate_clusters(attrs=attrs) - return attrs - - def __validate_clusters(self, attrs): - AppCache.objects.get(bk_biz_id=attrs["bk_biz_id"]) - - for cluster_obj in Cluster.objects.filter(pk__in=attrs["infos"]["cluster_ids"]).all(): - if cluster_obj.cluster_type != ClusterType.TenDBHA.value: - raise serializers.ValidationError( - _("{} 不是 {} 集群".format(cluster_obj.immute_domain, ClusterType.TenDBHA.value)) - ) - - self.__validate_cluster_proxy(cluster_obj=cluster_obj, attrs=attrs) - self.__validate_cluster_master_storage(cluster_obj=cluster_obj, attrs=attrs) - self.__validate_cluster_slave_storage(cluster_obj=cluster_obj, attrs=attrs) - - @staticmethod - def __validate_cluster_proxy(cluster_obj: Cluster, attrs): - if cluster_obj.proxyinstance_set.count() < 2: - raise serializers.ValidationError(_("{} proxy 数量异常".format(cluster_obj.immute_domain))) - - @staticmethod - def __validate_cluster_master_storage(cluster_obj: Cluster, attrs): - if cluster_obj.storageinstance_set.filter(instance_inner_role=InstanceInnerRole.MASTER.value).count() != 1: - raise serializers.ValidationError(_("{} 存储 master 数量异常".format(cluster_obj.immute_domain))) - - @staticmethod - def __validate_cluster_slave_storage(cluster_obj: Cluster, attrs): - if cluster_obj.storageinstance_set.filter(instance_inner_role=InstanceInnerRole.SLAVE.value).count() < 1: - raise serializers.ValidationError(_("{} 存储 slave 数量异常".format(cluster_obj.immute_domain))) - - -class MysqlHaStandardizeFlowParamBuilder(builders.FlowParamBuilder): - controller = MySQLController.mysql_ha_standardize_scene - - -@builders.BuilderFactory.register(TicketType.MYSQL_HA_STANDARDIZE) -class MysqlStandardizeFlowBuilder(BaseMySQLHATicketFlowBuilder): - """Mysql下架流程的构建基类""" - - serializer = TenDBHAStandardizeDetailSerializer - inner_flow_builder = MysqlHaStandardizeFlowParamBuilder - inner_flow_name = _("MySQL高可用标准化") - retry_type = FlowRetryType.MANUAL_RETRY diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_push_peripheral_config.py b/dbm-ui/backend/ticket/builders/mysql/mysql_push_peripheral_config.py deleted file mode 100644 index c5250b1e16..0000000000 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_push_peripheral_config.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" -from django.utils.translation import ugettext_lazy as _ -from rest_framework import serializers - -from backend.flow.engine.controller.mysql import MySQLController -from backend.ticket import builders -from backend.ticket.builders.mysql.base import BaseMySQLTicketFlowBuilder, MySQLBaseOperateDetailSerializer -from backend.ticket.constants import FlowRetryType, TicketType - - -class MySQLPushPeripheralConfigSerializer(MySQLBaseOperateDetailSerializer): - class PushPeripheralConfigInfoSerializer(MySQLBaseOperateDetailSerializer): - cluster_ids = serializers.ListField(help_text=_("集群ID列表")) - - bk_biz_id = serializers.IntegerField(help_text=_("业务ID")) - infos = PushPeripheralConfigInfoSerializer(help_text=_("单据输入")) - - def validate(self, attrs): - return attrs - - -class MySQLPushPeripheralConfigFlowParamBuilder(builders.FlowParamBuilder): - controller = MySQLController.push_peripheral_config_scene - - -@builders.BuilderFactory.register(TicketType.MYSQL_PUSH_PERIPHERAL_CONFIG) -class MySQLPushPeripheralConfigFlowBuilder(BaseMySQLTicketFlowBuilder): - serializer = MySQLPushPeripheralConfigSerializer - inner_flow_builder = MySQLPushPeripheralConfigFlowParamBuilder - inner_flow_name = _("下发周边配置") - retry_type = FlowRetryType.MANUAL_RETRY diff --git a/dbm-ui/backend/ticket/builders/spider/mysql_spider_standardize.py b/dbm-ui/backend/ticket/builders/spider/mysql_spider_standardize.py deleted file mode 100644 index 990701d1eb..0000000000 --- a/dbm-ui/backend/ticket/builders/spider/mysql_spider_standardize.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" - - -from django.utils.translation import ugettext_lazy as _ -from rest_framework import serializers - -from backend.db_meta.enums import ClusterType -from backend.db_meta.models import AppCache, Cluster -from backend.flow.engine.controller.spider import SpiderController -from backend.ticket import builders -from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer -from backend.ticket.constants import FlowRetryType, TicketType - - -class TenDBClusterStandardizeDetailSerializer(TendbBaseOperateDetailSerializer): - class InnerDetailSerializer(serializers.Serializer): - cluster_ids = serializers.ListField(help_text=_("集群ID列表")) - - bk_biz_id = serializers.IntegerField(help_text=_("业务ID")) - infos = InnerDetailSerializer(help_text=_("标准化信息")) - - def validate(self, attrs): - self.__validate_clusters(attrs=attrs) - return attrs - - def __validate_clusters(self, attrs): - AppCache.objects.get(bk_biz_id=attrs["bk_biz_id"]) - - for cluster_obj in Cluster.objects.filter(pk__in=attrs["infos"]["cluster_ids"]).all(): - if cluster_obj.cluster_type != ClusterType.TenDBCluster.value: - raise serializers.ValidationError( - _("{} 不是 {} 集群".format(cluster_obj.immute_domain, ClusterType.TenDBCluster.value)) - ) - - self.__validate_cluster_proxy(cluster_obj=cluster_obj, attrs=attrs) - - @staticmethod - def __validate_cluster_proxy(cluster_obj: Cluster, attrs): - if cluster_obj.proxyinstance_set.count() < 2: - raise serializers.ValidationError(_("{} proxy 数量异常".format(cluster_obj.immute_domain))) - - # - # @staticmethod - # def __validate_cluster_master_storage(cluster_obj: Cluster, attrs): - # if cluster_obj.storageinstance_set.filter(instance_inner_role=InstanceInnerRole.MASTER.value).count() < 1: - # raise serializers.ValidationError(_("{} 存储 master 数量异常".format(cluster_obj.immute_domain))) - # - # @staticmethod - # def __validate_cluster_slave_storage(cluster_obj: Cluster, attrs): - # if cluster_obj.storageinstance_set.filter(instance_inner_role=InstanceInnerRole.SLAVE.value).count() < 1: - # raise serializers.ValidationError(_("{} 存储 slave 数量异常".format(cluster_obj.immute_domain))) - - -class TenDBClusterStandardizeFlowParamBuilder(builders.FlowParamBuilder): - controller = SpiderController.tendbcluster_standardize_scene - - -@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_STANDARDIZE) -class TenDBClusterStandardizeFlowBuilder(BaseTendbTicketFlowBuilder): - """Mysql下架流程的构建基类""" - - serializer = TenDBClusterStandardizeDetailSerializer - inner_flow_builder = TenDBClusterStandardizeFlowParamBuilder - inner_flow_name = _("TendbCluster 高可用标准化") - retry_type = FlowRetryType.MANUAL_RETRY diff --git a/dbm-ui/backend/ticket/builders/tendbsingle/standardize.py b/dbm-ui/backend/ticket/builders/tendbsingle/standardize.py deleted file mode 100644 index e2b5d35f1b..0000000000 --- a/dbm-ui/backend/ticket/builders/tendbsingle/standardize.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -""" -TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. -Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. -Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. -You may obtain a copy of the License at https://opensource.org/licenses/MIT -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on -an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. -""" -from django.utils.translation import ugettext_lazy as _ -from rest_framework import serializers - -from backend.db_meta.enums import ClusterType -from backend.db_meta.models import AppCache, Cluster -from backend.flow.engine.controller.tendbsingle import TenDBSingleController -from backend.ticket import builders -from backend.ticket.builders.mysql.base import BaseMySQLTicketFlowBuilder, MySQLBaseOperateDetailSerializer -from backend.ticket.constants import FlowRetryType, TicketType - - -class TenDBSingleStandardizeDetailSerializer(MySQLBaseOperateDetailSerializer): - class InnerDetailSerializer(serializers.Serializer): - cluster_ids = serializers.ListField(help_text=_("集群ID列表")) - - bk_biz_id = serializers.IntegerField(help_text=_("业务ID")) - infos = InnerDetailSerializer(help_text=_("标准化信息")) - - def validate(self, attrs): - self.__validate_clusters(attrs=attrs) - return attrs - - @staticmethod - def __validate_clusters(attrs): - AppCache.objects.get(bk_biz_id=attrs["bk_biz_id"]) - - for cluster_obj in Cluster.objects.filter(pk__in=attrs["infos"]["cluster_ids"]).all(): - if cluster_obj.cluster_type != ClusterType.TenDBSingle.value: - raise serializers.ValidationError( - _("{} 不是 {} 集群".format(cluster_obj.immute_domain, ClusterType.TenDBSingle.value)) - ) - - -class TenDBSingleStandardizeFlowParamBuilder(builders.FlowParamBuilder): - controller = TenDBSingleController.standardize_scene - - -@builders.BuilderFactory.register(TicketType.TENDBSINGLE_STANDARDIZE) -class TenDBSingleStandardizeFlowBuilder(BaseMySQLTicketFlowBuilder): - serializer = TenDBSingleStandardizeDetailSerializer - inner_flow_builder = TenDBSingleStandardizeFlowParamBuilder - inner_flow_name = _("TenDB Single 标准化") - retry_type = FlowRetryType.MANUAL_RETRY diff --git a/dbm-ui/backend/ticket/constants.py b/dbm-ui/backend/ticket/constants.py index 666b2cd451..3a9740d930 100644 --- a/dbm-ui/backend/ticket/constants.py +++ b/dbm-ui/backend/ticket/constants.py @@ -255,7 +255,6 @@ def get_approve_mode_by_ticket(cls, ticket_type): MYSQL_SINGLE_TRUNCATE_DATA = TicketEnumField("MYSQL_SINGLE_TRUNCATE_DATA", _("MySQL 单节点清档"), _("数据处理")) MYSQL_SINGLE_RENAME_DATABASE = TicketEnumField("MYSQL_SINGLE_RENAME_DATABASE", _("MySQL 单节点DB重命名"), _("集群维护")) # noqa - MYSQL_HA_STANDARDIZE = TicketEnumField("MYSQL_HA_STANDARDIZE", _("TendbHA 标准化"), register_iam=False) MYSQL_HA_METADATA_IMPORT = TicketEnumField("MYSQL_HA_METADATA_IMPORT", _("TendbHA 元数据导入"), register_iam=False) MYSQL_OPEN_AREA = TicketEnumField("MYSQL_OPEN_AREA", _("MySQL 开区"), _("克隆开区"), register_iam=False) MYSQL_DATA_MIGRATE = TicketEnumField("MYSQL_DATA_MIGRATE", _("MySQL DB克隆"), _("数据处理")) @@ -272,6 +271,7 @@ def get_approve_mode_by_ticket(cls, ticket_type): register_iam=False) MYSQL_ACCOUNT_RULE_CHANGE = TicketEnumField("MYSQL_ACCOUNT_RULE_CHANGE", _("MySQL 授权规则变更"), register_iam=False) + MYSQL_CLUSTER_STANDARDIZE = TicketEnumField("MYSQL_CLUSTER_STANDARDIZE", _("MySQL 集群标准化"), register_iam=False) # SPIDER(TenDB Cluster) TENDBCLUSTER_OPEN_AREA = TicketEnumField("TENDBCLUSTER_OPEN_AREA", _("TenDB Cluster 开区"), _("克隆开区"), @@ -336,16 +336,12 @@ def get_approve_mode_by_ticket(cls, ticket_type): _("权限管理")) TENDBCLUSTER_EXCEL_AUTHORIZE_RULES = TicketEnumField("TENDBCLUSTER_EXCEL_AUTHORIZE_RULES", _("TenDB Cluster EXCEL授权"), _("权限管理")) # noqa - TENDBCLUSTER_STANDARDIZE = TicketEnumField("TENDBCLUSTER_STANDARDIZE", _("TenDB Cluster 集群标准化"), - register_iam=False) TENDBCLUSTER_METADATA_IMPORT = TicketEnumField("TENDBCLUSTER_METADATA_IMPORT", _("TenDB Cluster 元数据导入"), register_iam=False) # noqa TENDBCLUSTER_APPEND_DEPLOY_CTL = TicketEnumField("TENDBCLUSTER_APPEND_DEPLOY_CTL", _("TenDB Cluster 追加部署中控"), register_iam=False) # noqa TENDBSINGLE_METADATA_IMPORT = TicketEnumField("TENDBSINGLE_METADATA_IMPORT", _("TenDB Single 元数据导入"), register_iam=False) # noqa - TENDBSINGLE_STANDARDIZE = TicketEnumField("TENDBSINGLE_STANDARDIZE", _("TenDB Single 集群标准化"), - register_iam=False) # noqa TENDBCLUSTER_DATA_MIGRATE = TicketEnumField("TENDBCLUSTER_DATA_MIGRATE", _("TenDB Cluster DB克隆"), _("数据处理")) TENDBCLUSTER_DUMP_DATA = TicketEnumField("TENDBCLUSTER_DUMP_DATA", _("TenDB Cluster 数据导出"), _("数据处理")) TENDBCLUSTER_ACCOUNT_RULE_CHANGE = TicketEnumField("TENDBCLUSTER_ACCOUNT_RULE_CHANGE",