客户要求对一台mysql数据做备份,但是因为数据量过大,只能做到重要数据全量备份,非重要数据增量备份。决定采用扫描schema表,如果数据表过大,只备份最新n条数据,否则的话全量备份。生成mysqldump脚本,然后执行脚本进行数据库备份。
#其中table_schema是库名
SELECT CONCAT( table_schema, '.', table_name ) AS 'Table Name', table_rows AS 'Number of Rows',
CONCAT( ROUND( data_length / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Data Size',
CONCAT( ROUND( index_length / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Index Size',
CONCAT( ROUND( (data_length + index_length) / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Total'
FROM information_schema.TABLES WHERE table_schema LIKE 'zx' ORDER BY table_name desc;
#其中table_schema是库名 table_name是表名
SELECT * FROM INFORMATION_SCHEMA. COLUMNS
WHERE table_schema = 'zx' AND table_name = 'anlian_host_result' and extra='auto_increment'#如果是查看所有字段情况,则是
SELECT * FROM INFORMATION_SCHEMA. COLUMNS
WHERE table_schema = 'zx' AND table_name = 'anlian_host_result'
根据自增id增量备份
mysqldump -h -P3307 -u xx -pxx zx site_result --table -w" 1=1 order by auto_id desc limit 100" --default-character-set=utf8 --skip-lock-table > ./zx/site_result.sql
全量备份
mysqldump -h -P3307 -u xx -pxx zx site_result --default-character-set=utf8 --skip-lock-table > ./site_result.sql
#!/bin/sh
source /etc/profile
path=`pwd`/
cd `dirname $0` && cd ../
nohup java -cp ./year_counter_2021.jar:lib/* com.isi.dbsyn.DumpCreater test 100 $path && cd dump_`date +"%Y_%m_%d"` && `nohup bash ./test_dump.sh >/dev/null &` &
public class DumpCreater {private String ip;private int port;private String uname;private String pwd;private String db_name;private String dump_sh_dir;private long dump_max_size;private static final String table_schema_tmp="SELECT table_schema,table_name, CONCAT( table_schema, '.', table_name ) AS 'Table Name', table_rows AS 'Number of Rows', CONCAT( ROUND( data_length / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Data Size', CONCAT( ROUND( index_length / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Index Size', CONCAT( ROUND( (data_length + index_length) / (1024 * 1024 * 1024), 4 ), 'G' ) AS 'Total' FROM information_schema.TABLES WHERE table_schema LIKE '[db]' ORDER BY table_name desc";private static final String id_column_schema_tmp="SELECT * FROM INFORMATION_SCHEMA. COLUMNS WHERE table_schema = '[db]' AND table_name = '[table]' and extra='auto_increment'";private String part_dump_sh_tmp="mysqldump -h [ip] -P[port] -u [uname] -p[pwd] [db] [table] --table -w" 1=1 order by [id] desc limit [dump_max_size]" --default-character-set=utf8 --skip-lock-table > [out_file]";private String total_dump_sh_tmp="mysqldump -h [ip] -P[port] -u [uname] -p[pwd] [db] [table] --default-character-set=utf8 --skip-lock-table > [out_file]";private List<Record> getTables(){List<Record> records = Db(JfinalDbUtil.source_name).find(table_place("[db]", db_name));return records;}private String getIdColumn(String db,String table){Record idColumn = Db(JfinalDbUtil.source_name).findFirst(id_column_place("[db]", db).replace("[table]", table));if(idColumn==null){return null;}else {Str("column_name");}}private String getDump(String table,Long dump_max_size,String idColumn,String out_file, boolean total){if(total){return total_dump_place("[ip]",ip).replace("[port]",port+"").replace("[uname]",uname).replace("[pwd]",pwd).replace("[db]",db_name).replace("[table]",table).replace("[out_file]",out_file);}else {return part_dump_place("[ip]",ip).replace("[port]",port+"").replace("[uname]",uname).replace("[pwd]",pwd).replace("[db]",db_name).replace("[table]",table).replace("[id]",idColumn).replace("[dump_max_size]",dump_max_size+"").replace("[out_file]",out_file);}}private List<String> getDumpShs( ){List<String> dumpShs=new ArrayList<>();List<Record> tables = getTables();for (Record table : tables) {Integer rows = Int("Number of Rows");String table_name = Str("table_name");String idColumn = getIdColumn(db_name, table_name);File dbDirFile = new File(dump_sh_dir+"/"+db_name);if(!ists()){dbDirFile.mkdirs();}String out_path="./"+db_name+"/"+table_name+".sql";if(rows!=null && rows>dump_max_size && idColumn!=null){//大于阈值且有增量id字段 增量备份String dump = getDump(table_name, dump_max_size, idColumn, out_path, false);dumpShs.add(dump);}else {//全量备份String dump = getDump(table_name, dump_max_size, idColumn, out_path, true);dumpShs.add(dump);}}return dumpShs;}public void exe(){List<String> dumpShs = getDumpShs();File dump_sh_file = new File(dump_sh_dir);String shell_path = dump_sh_dir + File.separator + db_name + "_dump.sh";if(dump_ists()){dump_sh_file.delete();}File pFile = dump_ParentFile();ists()==false){pFile.mkdirs();}else{for (String dumpSh : dumpShs) {MyIOUtils.appendLine(shell_path,dumpSh,"utf-8");}}}public DumpCreater(String ip, int port, String uname, String pwd, String db_name, String dump_sh_dir, long dump_max_size) {this.ip = ip;this.port = port;this.uname = uname;this.pwd = pwd;this.db_name = db_name;this.dump_sh_dir = dump_sh_dir;this.dump_max_size = dump_max_size;}
}
本文发布于:2024-01-31 00:37:02,感谢您对本站的认可!
本文链接:https://www.4u4v.net/it/170663264223985.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
留言与评论(共有 0 条评论) |