diff --git a/.gitignore b/.gitignore index f6520ed87c..78d848eaf6 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,6 @@ pre-*-bkcodeai bkcodeai.json package-lock.json - +*.swp ### PreCI ### -.codecc \ No newline at end of file +.codecc diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_load.go b/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_load.go index bcce956d98..b71e40568d 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_load.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/cmd/subcmd_load.go @@ -77,6 +77,7 @@ func loadData(cnf *config.BackupConfig, backupType string) error { metaInfo, err := backupexe.ParseJsonFile(indexPath) if err != nil { + logger.Log.Errorf("can not parse index file:%s, errmsg:%s", indexPath, err) return err } if backupType != "" && metaInfo.BackupType != backupType { diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/cst/const.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/cst/const.go index c9c86a7b90..ce99034b43 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/cst/const.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/cst/const.go @@ -17,7 +17,7 @@ const ( ) const ( - StorageEnginRocksdb = "rocksdb" + StorageEngineRocksdb = "rocksdb" ) // backup role: dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/mysql.go diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper.go index b50bb2911b..4460e6e7cd 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper.go @@ -67,7 +67,7 @@ func BuildDumper(cnf *config.BackupConfig, storageEngine string) (dumper Dumper, return nil, err } - if cst.StorageEnginRocksdb == storageEngine { + if cst.StorageEngineRocksdb == storageEngine { dumper = &PhysicalRocksdbDumper{ cfg: cnf, } diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical_rocksdb.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical_rocksdb.go index 4274268ea3..1c58bb0f05 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical_rocksdb.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/dumper_physical_rocksdb.go @@ -19,27 +19,33 @@ import ( "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/util" ) +// PhysicalRocksdbDumper physical rocksdb dumper type PhysicalRocksdbDumper struct { cfg *config.BackupConfig + backupLogfile string dbbackupHome string checkpointDir string mysqlVersion string isOfficial bool rocksdbCmd string storageEngine string + mysqlRole string + masterHost string + masterPort int backupStartTime time.Time backupEndTime time.Time } +// buildArgs construct the instruction parameters for data recovery. func (p *PhysicalRocksdbDumper) buildArgs() []string { targetPath := filepath.Join(p.cfg.Public.BackupDir, p.cfg.Public.TargetName()) args := []string{ - fmt.Sprintf("--host=%s", p.cfg.Public.MysqlHost), - fmt.Sprintf("--port=%d", p.cfg.Public.MysqlPort), fmt.Sprintf("--user=%s", p.cfg.Public.MysqlUser), fmt.Sprintf("--password=%s", p.cfg.Public.MysqlPasswd), + fmt.Sprintf("--host=%s", p.cfg.Public.MysqlHost), + fmt.Sprintf("--port=%d", p.cfg.Public.MysqlPort), fmt.Sprintf("--checkpoint_dir=%s", p.checkpointDir), fmt.Sprintf("--backup_dir=%s", targetPath), "--stream=disabled", @@ -56,6 +62,7 @@ func (p *PhysicalRocksdbDumper) buildArgs() []string { return args } +// initConfig init config func (p *PhysicalRocksdbDumper) initConfig(mysqlVersion string) error { if p.cfg == nil { return errors.New("rocksdb physical dumper config missed") @@ -68,55 +75,80 @@ func (p *PhysicalRocksdbDumper) initConfig(mysqlVersion string) error { } p.dbbackupHome = filepath.Dir(cmdPath) - db, err := mysqlconn.InitConn(&p.cfg.Public) + // connect to the mysql and obtain the base information + db, err := mysqlconn.InitConn(&p.cfg.Public) if err != nil { + logger.Log.Errorf("can not connect to the mysql, host:%s, port:%d, errmsg:%s", + p.cfg.Public.MysqlHost, p.cfg.Public.MysqlPort, err) return err } + defer func() { + _ = db.Close() + }() + p.mysqlVersion, p.isOfficial = util.VersionParser(mysqlVersion) p.storageEngine, err = mysqlconn.GetStorageEngine(db) if err != nil { + logger.Log.Errorf("can not get the storage engine from the mysql, host:%s, port:%d, errmsg:%s", + p.cfg.Public.MysqlHost, p.cfg.Public.MysqlPort, err) return err } + + // keep the storage engine name is lower p.storageEngine = strings.ToLower(p.storageEngine) + p.mysqlRole = strings.ToLower(p.cfg.Public.MysqlRole) - defer func() { - _ = db.Close() - }() + // if the current node is slave, obtain the master ip and port + if p.mysqlRole == cst.RoleSlave || p.mysqlRole == cst.RoleRepeater { + p.masterHost, p.masterPort, err = mysqlconn.ShowMysqlSlaveStatus(db) + if err != nil { + logger.Log.Errorf("can not get the master host and port from the mysql, host:%s, port:%d, errmsg:%s", + p.cfg.Public.MysqlHost, p.cfg.Public.MysqlPort, err) + return err + } + } - p.checkpointDir = fmt.Sprintf("%s/MyRocks_checkpoint", p.cfg.Public.BackupDir) - p.rocksdbCmd = "/bin/" + cst.ToolMyrocksHotbackup + // set the base config + p.checkpointDir = filepath.Join(p.cfg.Public.BackupDir, "MyRocks_checkpoint") + p.rocksdbCmd = filepath.Join("bin", cst.ToolMyrocksHotbackup) BackupTool = cst.ToolMyrocksHotbackup return nil } +// Execute Perform data recovery operations. func (p *PhysicalRocksdbDumper) Execute(enableTimeOut bool) error { p.backupStartTime = time.Now() defer func() { p.backupEndTime = time.Now() }() - if p.storageEngine != cst.StorageEnginRocksdb { - err := fmt.Errorf("%s engine not support", p.storageEngine) + // the storage engine must be rocksdb + if p.storageEngine != cst.StorageEngineRocksdb { + err := fmt.Errorf("unsupported engine:%s, host:%s, port:%d", p.storageEngine, + p.cfg.Public.MysqlHost, p.cfg.Public.MysqlPort) logger.Log.Error(err) return err } + // pre-created checkpoint dir _, err := os.Stat(p.checkpointDir) if os.IsNotExist(err) { + logger.Log.Infof("the checkpoint does not exist, will create it. checkpoint:%s", p.checkpointDir) err = os.MkdirAll(p.checkpointDir, 0755) } if err != nil { - logger.Log.Errorf("failed to create checkpoint(%s), err-msg:%s", p.checkpointDir, err) + logger.Log.Errorf("can not create the checkpoint:%s, errmsg:%s", p.checkpointDir, err) return err } binPath := filepath.Join(p.dbbackupHome, p.rocksdbCmd) args := p.buildArgs() + // perform the dump operation var cmd *exec.Cmd backupCmd := fmt.Sprintf(`%s %s`, binPath, strings.Join(args, " ")) @@ -134,13 +166,17 @@ func (p *PhysicalRocksdbDumper) Execute(enableTimeOut bool) error { cmd = exec.Command("sh", "-c", backupCmd) } - backuplogFilename := fmt.Sprintf("%s_backup_%d_%d.log", p.storageEngine, p.cfg.Public.MysqlPort, int(time.Now().Weekday())) - rocksdbBackuplogFilename := filepath.Join(p.dbbackupHome, "logs", backuplogFilename) + // create a dumper log file to store the log of the dumper command + p.backupLogfile = fmt.Sprintf("dumper_%s_%s_%d_%d.log", p.storageEngine, + cst.ToolMyrocksHotbackup, p.cfg.Public.MysqlPort, int(time.Now().Weekday())) + + p.backupLogfile = filepath.Join(p.dbbackupHome, "logs", p.backupLogfile) - outFile, err := os.Create(rocksdbBackuplogFilename) + // pre-created dump log file + outFile, err := os.Create(p.backupLogfile) if err != nil { - logger.Log.Error("create log file failed: ", err) + logger.Log.Errorf("can not create the dumper log file, file name:%s, errmsg:%s", p.backupLogfile, err) return err } @@ -148,46 +184,32 @@ func (p *PhysicalRocksdbDumper) Execute(enableTimeOut bool) error { _ = outFile.Close() }() + // redirect standard output and error messages to a file cmd.Stdout = outFile cmd.Stderr = outFile - logger.Log.Info("rocksdb backup command: ", cmd.String()) + // perform the dump command err = cmd.Run() if err != nil { - logger.Log.Error("run rocksdb physical backup failed: ", err) + logger.Log.Errorf("can not run the rocksdb physical dumper command:%s, engine:%s, errmsg:%s", + backupCmd, p.storageEngine, err) return err } + logger.Log.Infof("dump rocksdb success, command:%s", cmd.String()) return nil } +// PrepareBackupMetaInfo generate the metadata of database backup func (p *PhysicalRocksdbDumper) PrepareBackupMetaInfo(cnf *config.BackupConfig) (*dbareport.IndexContent, error) { - db, err := mysqlconn.InitConn(&cnf.Public) - if err != nil { - return nil, errors.WithMessage(err, "IndexContent") - } - - defer func() { - _ = db.Close() - }() - - storageEngine, err := mysqlconn.GetStorageEngine(db) - if err != nil { - return nil, err - } - - storageEngine = strings.ToLower(storageEngine) - - if storageEngine != "rocksdb" { - logger.Log.Errorf("unknown storage engine(%s)", storageEngine) - return nil, nil - } + // parse the binglog position xtrabackupBinlogInfoFileName := filepath.Join(cnf.Public.BackupDir, cnf.Public.TargetName(), "xtrabackup_binlog_info") xtrabackupSlaveInfoFileName := filepath.Join(cnf.Public.BackupDir, cnf.Public.TargetName(), "xtrabackup_slave_info") tmpFileName := filepath.Join(cnf.Public.BackupDir, cnf.Public.TargetName(), "tmp_dbbackup_go.txt") + // obtain the qpress command path exepath, err := os.Executable() if err != nil { return nil, err @@ -200,30 +222,38 @@ func (p *PhysicalRocksdbDumper) PrepareBackupMetaInfo(cnf *config.BackupConfig) BinlogInfo: dbareport.BinlogStatusInfo{}, } - if masterStatus, err := parseXtraBinlogInfo(qpressPath, xtrabackupBinlogInfoFileName, tmpFileName); err != nil { + // parse the binlog + masterStatus, err := parseXtraBinlogInfo(qpressPath, xtrabackupBinlogInfoFileName, tmpFileName) + if err != nil { + logger.Log.Errorf("do not parse xtrabackup binlog file, file name:%s, errmsg:%s", + xtrabackupBinlogInfoFileName, err) return nil, err - } else { - metaInfo.BinlogInfo.ShowMasterStatus = masterStatus - metaInfo.BinlogInfo.ShowMasterStatus.MasterHost = cnf.Public.MysqlHost - metaInfo.BinlogInfo.ShowMasterStatus.MasterPort = cnf.Public.MysqlPort } - if mysqlRole := strings.ToLower(cnf.Public.MysqlRole); mysqlRole == cst.RoleSlave || mysqlRole == cst.RoleRepeater { - if slaveStatus, err := parseXtraSlaveInfo(qpressPath, xtrabackupSlaveInfoFileName, tmpFileName); err != nil { + // save the master node status + metaInfo.BinlogInfo.ShowMasterStatus = masterStatus + metaInfo.BinlogInfo.ShowMasterStatus.MasterHost = cnf.Public.MysqlHost + metaInfo.BinlogInfo.ShowMasterStatus.MasterPort = cnf.Public.MysqlPort + + // parse the information of the master node + if p.mysqlRole == cst.RoleSlave || p.mysqlRole == cst.RoleRepeater { + slaveStatus, err := parseXtraSlaveInfo(qpressPath, xtrabackupSlaveInfoFileName, tmpFileName) + + if err != nil { + logger.Log.Errorf("do not parse xtrabackup slave information, xtrabackup file:%s, errmsg:%s", + xtrabackupSlaveInfoFileName, err) return nil, err - } else { - metaInfo.BinlogInfo.ShowSlaveStatus = slaveStatus - masterHost, masterPort, err := mysqlconn.ShowMysqlSlaveStatus(db) - if err != nil { - return nil, err - } - metaInfo.BinlogInfo.ShowSlaveStatus.MasterHost = masterHost - metaInfo.BinlogInfo.ShowSlaveStatus.MasterPort = masterPort } + + metaInfo.BinlogInfo.ShowSlaveStatus = slaveStatus + metaInfo.BinlogInfo.ShowSlaveStatus.MasterHost = p.masterHost + metaInfo.BinlogInfo.ShowSlaveStatus.MasterPort = p.masterPort } + // teh mark indicating whether the update is a full backup or not metaInfo.JudgeIsFullBackup(&cnf.Public) if err = os.Remove(tmpFileName); err != nil { + logger.Log.Errorf("do not delete the tmp file, file name:%s, errmsg:%s", tmpFileName, err) return &metaInfo, err } diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/execute_load.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/execute_load.go index 38724870c8..5c065aba35 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/execute_load.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/execute_load.go @@ -3,6 +3,7 @@ package backupexe import ( "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config" "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/dbareport" + "strings" ) // ExecuteLoad execute load backup command @@ -11,7 +12,8 @@ func ExecuteLoad(cnf *config.BackupConfig, indexFileContent *dbareport.IndexCont return envErr } - loader, err := BuildLoader(cnf, indexFileContent.BackupType, indexFileContent.BackupTool) + backupStorageEngine := strings.ToLower(indexFileContent.StorageEngine) + loader, err := BuildLoader(cnf, indexFileContent.BackupType, indexFileContent.BackupTool, backupStorageEngine) if err != nil { return err } diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/grant.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/grant.go index a27f2a70f2..7def6d235c 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/grant.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/grant.go @@ -25,7 +25,7 @@ func BackupGrant(cfg *config.Public) error { rows, err := db.Query("select user, host from mysql.user where user not in ('ADMIN','yw','dba_bak_all_sel')") if err != nil { - logger.Log.Error("can't send query to Mysql server %v\n", err) + logger.Log.Errorf("can't send query to Mysql server %v\n", err) return err } defer rows.Close() diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/loader.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/loader.go index 3c8c53d547..688f146b1a 100644 --- a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/loader.go +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/loader.go @@ -18,7 +18,8 @@ type Loader interface { } // BuildLoader TODO -func BuildLoader(cnf *config.BackupConfig, backupType string, backupTool string) (loader Loader, err error) { +func BuildLoader(cnf *config.BackupConfig, backupType string, backupTool string, storageEngine string) (loader Loader, err error) { + if strings.ToLower(backupType) == cst.BackupLogical { if backupTool == cst.ToolMysqldump { // mysqldump 共用 LogicalLoad 参数 @@ -46,8 +47,15 @@ func BuildLoader(cnf *config.BackupConfig, backupType string, backupTool string) if err := validate.GoValidateStruct(cnf.PhysicalLoad, false, false); err != nil { return nil, err } - loader = &PhysicalLoader{ - cnf: cnf, + + if cst.StorageEngineRocksdb == storageEngine { + loader = &PhysicalRocksdbLoader{ + cfg: cnf, + } + } else { + loader = &PhysicalLoader{ + cnf: cnf, + } } } else { logger.Log.Error(fmt.Sprintf("Unknown BackupType: %s", backupType)) diff --git a/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/loader_physical_rocksdb.go b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/loader_physical_rocksdb.go new file mode 100644 index 0000000000..b92886d846 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/backupexe/loader_physical_rocksdb.go @@ -0,0 +1,323 @@ +package backupexe + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + "time" + + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/config" + "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/cst" + "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/dbareport" + "dbm-services/mysql/db-tools/mysql-dbbackup/pkg/src/logger" + + "github.com/pkg/errors" +) + +// PhysicalRocksdbLoader physical rocksdb loader +type PhysicalRocksdbLoader struct { + cfg *config.BackupConfig + indexContent *dbareport.IndexContent + loaderLogfile string + targetName string + dataDir string + rocksdbDataDir string + innodbDataHomeDir string + innodbLogGroupHomeDir string + logbinDir string + relaylogDir string + tmpDir string + slowQueryLogFile string + dbbackupHome string + checkpointDir string + storageEngine string + rocksdbCmd string +} + +// buildArgs construct the instruction parameters for data recovery. +func (p *PhysicalRocksdbLoader) buildArgs() []string { + + p.targetName = p.cfg.PhysicalLoad.MysqlLoadDir + p.rocksdbDataDir = filepath.Join(p.dataDir, ".rocksdb") + + args := []string{ + "--move_back", + fmt.Sprintf("--datadir=%s", p.dataDir), + fmt.Sprintf("--rocksdb_datadir=%s", p.rocksdbDataDir), + fmt.Sprintf("--rocksdb_waldir=%s", p.rocksdbDataDir), + fmt.Sprintf("--backup_dir=%s", p.targetName), + fmt.Sprintf("--defaults_file=%s", p.cfg.PhysicalLoad.DefaultsFile), + } + + return args +} + +// load restore mysql data +func (p *PhysicalRocksdbLoader) load() error { + + if p.storageEngine != cst.StorageEngineRocksdb { + err := fmt.Errorf("unsupported engine:%s, host:%s, port:%d", + p.storageEngine, p.cfg.Public.MysqlHost, p.cfg.Public.MysqlPort) + return err + } + + args := p.buildArgs() + + binPath := filepath.Join(p.dbbackupHome, p.rocksdbCmd) + loaderCmd := fmt.Sprintf(`%s %s`, binPath, strings.Join(args, " ")) + logger.Log.Infof("rocksdb physical loader command:%s", loaderCmd) + + // delete *.pid, *.err files + errFiles := filepath.Join(p.targetName, "*.err") + pidFiles := filepath.Join(p.targetName, "*.pid") + + logger.Log.Infof("delete the errors file:%s", errFiles) + cmutil.ExecCommand(true, "", "rm", errFiles) + logger.Log.Infof("delete the pid file:%s", pidFiles) + cmutil.ExecCommand(true, "", "rm", pidFiles) + + // run command command + cmd := exec.Command("sh", "-c", loaderCmd) + outFile, err := os.Create(p.loaderLogfile) + if err != nil { + logger.Log.Errorf("can not create the loader log file:%s, errmsg:%s", p.loaderLogfile, err) + return err + } + + defer func() { + _ = outFile.Close() + }() + + // redirect standard output and error messages to a file + cmd.Stdout = outFile + cmd.Stderr = outFile + + err = cmd.Run() + if err != nil { + logger.Log.Errorf("can not run the rocksdb physical loader command:%s, engine:%s, errmsg:%s", + loaderCmd, p.storageEngine, err) + return err + } + + logger.Log.Infof("run load rocksdb success, command:%s", cmd.String()) + + // convert to root user and group to mysql.mysql, the mysql server was started by user mysql + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.dataDir) + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.innodbLogGroupHomeDir) + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.innodbDataHomeDir) + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.logbinDir) + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.relaylogDir) + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.slowQueryLogFile) + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.tmpDir) + cmutil.ExecCommand(false, "", "chown", "-R", "mysql.mysql", p.rocksdbDataDir) + + return nil +} + +// initConfig init config +func (p *PhysicalRocksdbLoader) initConfig(indexContent *dbareport.IndexContent) error { + if p.cfg == nil { + return errors.New("rocksdb physical loader config missed") + } + + // the user mysql mysql is required + _, err := user.Lookup("mysql") + if err != nil { + logger.Log.Errorf("can not lookup the user: mysql, errmsg:%s", err) + return err + } + + // the group mysql mysql is required + _, err = user.LookupGroup("mysql") + if err != nil { + logger.Log.Errorf("can not lookup the group: mysql, errmsg:%s", err) + return err + } + + pwd, err := os.Getwd() + if err != nil { + return err + } + + // keep the storage engine name is lower case + p.storageEngine = strings.ToLower(indexContent.StorageEngine) + p.indexContent = indexContent + + p.loaderLogfile = filepath.Join(pwd, "logs", fmt.Sprintf("loader_%s_%s_%d_%d.log", + p.storageEngine, cst.ToolMyrocksHotbackup, p.cfg.Public.MysqlPort, int(time.Now().Weekday()))) + + // obtain the directory where the loader log file is located + loaderLogDir := filepath.Dir(p.loaderLogfile) + err = os.MkdirAll(loaderLogDir, 0755) + if err != nil { + logger.Log.Errorf("do not create log dir:%s, errmsg:%s", loaderLogDir, err) + return err + } + + cmdPath, err := os.Executable() + if err != nil { + return err + } + + // DefaultsFile should be the mysql config(eg: /etc/my.cnf) + if p.cfg.PhysicalLoad.DefaultsFile == "" { + return fmt.Errorf("physical load defaults file is required, config file:%s", p.cfg.PhysicalLoad.DefaultsFile) + } + + if !cmutil.FileExists(p.cfg.PhysicalLoad.DefaultsFile) { + return fmt.Errorf("the default file no exist, config file:%s", p.cfg.PhysicalLoad.DefaultsFile) + } + + file, err := os.Open(p.cfg.PhysicalLoad.DefaultsFile) + if err != nil { + return fmt.Errorf("can not open the default file, config file:%s", p.cfg.PhysicalLoad.DefaultsFile) + } + + defer file.Close() + + // extract parameters from the configuration file. + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "datadir=") { + p.dataDir = strings.TrimPrefix(line, "datadir=") + p.dataDir = strings.TrimSpace(p.dataDir) + continue + } + + if strings.HasPrefix(line, "innodb_log_group_home_dir=") { + p.innodbLogGroupHomeDir = strings.TrimPrefix(line, "innodb_log_group_home_dir=") + p.innodbLogGroupHomeDir = strings.TrimSpace(p.innodbLogGroupHomeDir) + continue + } + + if strings.HasPrefix(line, "innodb_data_home_dir=") { + p.innodbDataHomeDir = strings.TrimPrefix(line, "innodb_data_home_dir=") + p.innodbDataHomeDir = strings.TrimSpace(p.innodbDataHomeDir) + continue + } + + if strings.HasPrefix(line, "log_bin=") { + p.logbinDir = filepath.Dir(strings.TrimPrefix(line, "log_bin=")) + p.logbinDir = strings.TrimSpace(p.logbinDir) + continue + } + + if strings.HasPrefix(line, "relay-log=") { + p.relaylogDir = filepath.Dir(strings.TrimPrefix(line, "relay-log=")) + p.relaylogDir = strings.TrimSpace(p.relaylogDir) + continue + } + + if strings.HasPrefix(line, "slow_query_log_file=") { + p.slowQueryLogFile = filepath.Dir(strings.TrimPrefix(line, "slow_query_log_file=")) + p.slowQueryLogFile = strings.TrimSpace(p.slowQueryLogFile) + continue + } + + if strings.HasPrefix(line, "tmpdir=") { + p.tmpDir = strings.TrimPrefix(line, "tmpdir=") + p.tmpDir = strings.TrimSpace(p.tmpDir) + continue + } + } + + // store the base parameters + p.dbbackupHome = filepath.Dir(cmdPath) + p.storageEngine = strings.ToLower(indexContent.StorageEngine) + p.rocksdbCmd = filepath.Join("bin", cst.ToolMyrocksHotbackup) + BackupTool = cst.ToolMyrocksHotbackup + + return nil +} + +// cleanDirs Before the database resotres the data, it cleans up the existing data. +func (p *PhysicalRocksdbLoader) cleanDirs() error { + + logger.Log.Infof("delete the data dir:%s", p.dataDir) + if p.dataDir != "" && p.dataDir != "/" { + // delete the old directory + os.RemoveAll(p.dataDir) + // create the new directory + os.MkdirAll(p.dataDir, 0755) + } + + logger.Log.Infof("delete the innodb log group home dir:%s", p.innodbLogGroupHomeDir) + if p.innodbLogGroupHomeDir != "" && p.innodbLogGroupHomeDir != "/" { + // delete the old directory + os.RemoveAll(p.innodbLogGroupHomeDir) + // create the new directory + os.MkdirAll(p.innodbLogGroupHomeDir, 0755) + } + + logger.Log.Infof("delete the innodb data home dir:%s", p.innodbDataHomeDir) + if p.innodbDataHomeDir != "" && p.innodbDataHomeDir != "/" { + // delete the old directory + os.RemoveAll(p.innodbDataHomeDir) + // create the new directory + os.MkdirAll(p.innodbDataHomeDir, 0755) + } + + logger.Log.Infof("delete the relay log dir:%s", p.relaylogDir) + if p.relaylogDir != "" && p.relaylogDir != "/" { + // delete the old directory + os.RemoveAll(p.relaylogDir) + // create the new directory + os.MkdirAll(p.relaylogDir, 0755) + } + + logger.Log.Infof("delete the log bin dir:%s", p.logbinDir) + if p.logbinDir != "" && p.logbinDir != "/" { + // delete the old directory + os.RemoveAll(p.logbinDir) + // create the new directory + os.MkdirAll(p.logbinDir, 0755) + } + + logger.Log.Infof("delete the slow query log file:%s", p.slowQueryLogFile) + if p.slowQueryLogFile != "" && p.slowQueryLogFile != "/" { + // delete the old directory + os.Remove(p.slowQueryLogFile) + // create the new directory + os.MkdirAll(p.slowQueryLogFile, 0755) + } + + logger.Log.Infof("delete the tmp dir:%s", p.tmpDir) + if p.tmpDir != "" && p.tmpDir != "/" { + // delete the old directory + os.RemoveAll(p.tmpDir) + // create the new directory + os.MkdirAll(p.tmpDir, 0755) + } + return nil +} + +// Execute Perform data recovery operations. +func (p *PhysicalRocksdbLoader) Execute() error { + + // the storage engine must be rocksdb + if p.storageEngine != cst.StorageEngineRocksdb { + err := fmt.Errorf("unsupported engine:%s", p.storageEngine) + logger.Log.Error(err) + return err + } + + // delete the old directory used to restore the backup data + err := p.cleanDirs() + if err != nil { + return err + } + + // restore the backup data + err = p.load() + if err != nil { + return err + } + + return nil +}