Redis中redis-shake实现数据迁移同步
作者:炸鸡物料库
在业务环境中使用Redis进行跨区域数据迁移和同步的挑战,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友们下面随着小编来一起学习学习吧
0 项目介绍
在当今快速发展的业务环境中,企业经常面临跨区域数据迁移和同步的挑战,以确保业务连续性和数据一致性。特别是在使用Redis作为关键数据存储解决方案时,如何高效、安全地进行数据迁移和同步成为了一个重要的问题。

1 初始化 Redis-shake 服务器
# ===================== 内核参数 ============================ 连接数 cat >> /etc/security/limits.conf << EOF * soft nofile 65535 * hard nofile 65535 * soft nproc 65535 * hard nproc 65535 EOF cat >> /etc/sysctl.conf << EOF vm.overcommit_memory = 1 net.ipv4.tcp_max_tw_buckets = 150000 net.ipv4.tcp_timestamps = 1 net.ipv4.tcp_tw_reuse = 1 net.ipv4.ip_local_port_range = 9000 65500 EOF # ====================== 基础配置 =================================== setenforce 0 && sed -i 's/^SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config && getenforce systemctl disable firewalld && systemctl stop firewalld useradd -s /sbin/nologin redis
2 安装 Redis-shake
# 下载 redis-shake wget https://github.com/tair-opensource/RedisShake/releases/download/v4.2.0/redis-shake-linux-amd64.tar.gz # 解压 redis-shake tar xf redis-shake-linux-amd64.tar.gz # 创建目录 mkdir -pv /usr/local/redis-shake/config
3 配置 Redis-shake
vim /usr/local/redis-shake/config/shake.toml
主要调整参数:
- sync_reader.cluster:设置同步模式是否为 Cluster 模式
- sync_reader.address:源端 Redis Cluster IP (任意IP)
- sync_reader.password: 源端 Redist Cluster 访问密码
- sync_reader.sync_rdb: 同步方式,RDB 为 全量同步
- sync_reader.aof: 同步方式, AOF 为增量同步 (即使Cluster没有开启AOF,也可以使用)
- redis_writer.cluster: 设置目标端同步模式,此处应与源端模式一样
- redis_writer.address :设置目标端同步 IP
- redis_writer.password: 设置目标端同步密码
[sync_reader] cluster = true # set to true if source is a redis cluster address = 192.168.1.1:6379" # when cluster is true, set address to one of the cluster node username = "" # keep empty if not using ACL password = "xxxxx" # keep empty if no authentication is required tls = false # sync_rdb = true # set to false if you don't want to sync rdb sync_aof = true # set to false if you don't want to sync aof prefer_replica = false # set to true if you want to sync from replica node try_diskless = false # set to true if you want to sync by socket and source repl-diskless-sync=yes #[scan_reader] #cluster = false # set to true if source is a redis cluster #address = "127.0.0.1:6379" # when cluster is true, set address to one of the cluster node #username = "" # keep empty if not using ACL #password = "" # keep empty if no authentication is required #tls = false #dbs = [] # set you want to scan dbs such as [1,5,7], if you don't want to scan all #scan = true # set to false if you don't want to scan keys #ksn = false # set to true to enabled Redis keyspace notifications (KSN) subscription #count = 1 # number of keys to scan per iteration # [rdb_reader] # filepath = "/tmp/dump.rdb" # [aof_reader] # filepath = "/tmp/.aof" # timestamp = 0 # subsecond [redis_writer] cluster = true # set to true if target is a redis cluster sentinel = false # set to true if target is a redis sentinel master = "" # set to master name if target is a redis sentinel address = "192.168.1.2:6379" # when cluster is true, set address to one of the cluster node username = "" # keep empty if not using ACL password = "xxxxx" # keep empty if no authentication is required tls = false off_reply = false # turn off the server reply [filter] # Allow keys with specific prefixes or suffixes # Examples: # allow_key_prefix = ["user:", "product:"] # allow_key_suffix = [":active", ":valid"] # Leave empty to allow all keys allow_key_prefix = [] allow_key_suffix = [] # Block keys with specific prefixes or suffixes # Examples: # block_key_prefix = ["temp:", "cache:"] # block_key_suffix = [":tmp", ":old"] # Leave empty to block nothing block_key_prefix = [] block_key_suffix = [] # Specify allowed and blocked database numbers (e.g., allow_db = [0, 1, 2], block_db = [3, 4, 5]) # Leave empty to allow all databases allow_db = [] block_command = [] # Allow or block specific command groups # Available groups: # SERVER, STRING, CLUSTER, CONNECTION, BITMAP, LIST, SORTED_SET, # GENERIC, TRANSACTIONS, SCRIPTING, TAIRHASH, TAIRSTRING, TAIRZSET, # GEO, HASH, HYPERLOGLOG, PUBSUB, SET, SENTINEL, STREAM # Examples: # allow_command_group = ["STRING", "HASH"] # Only allow STRING and HASH commands # block_command_group = ["SCRIPTING", "PUBSUB"] # Block SCRIPTING and PUBSUB commands # Leave empty to allow all command groups allow_command_group = [] block_command_group = [] # Function for custom data processing # For best practices and examples, visit: # https://tair-opensource.github.io/RedisShake/zh/function/best_practices.html function = "" [advanced] dir = "/usr/local/redis-shake/data" ncpu = 0 # runtime.GOMAXPROCS, 0 means use runtime.NumCPU() cpu cores pprof_port = 0 # pprof port, 0 means disable status_port = 0 # status port, 0 means disable # log log_file = "shake.log" log_level = "info" # debug, info or warn log_interval = 5 # in seconds # redis-shake gets key and value from rdb file, and uses RESTORE command to # create the key in target redis. Redis RESTORE will return a "Target key name # is busy" error when key already exists. You can use this configuration item # to change the default behavior of restore: # panic: redis-shake will stop when meet "Target key name is busy" error. # rewrite: redis-shake will replace the key with new value. # skip: redis-shake will skip restore the key when meet "Target key name is busy" error. rdb_restore_command_behavior = "panic" # panic, rewrite or skip # redis-shake uses pipeline to improve sending performance. # Adjust this value based on the destination Redis performance: # - Higher values may improve performance for capable destinations. # - Lower values are recommended for destinations with poor performance. # 1024 is a good default value for most cases. pipeline_count_limit = 1024 # This setting corresponds to the 'client-query-buffer-limit' in Redis configuration. # The default value is typically 1GB. # It's recommended not to modify this value unless absolutely necessary. target_redis_client_max_querybuf_len = 1073741824 # 1GB in bytes # This setting corresponds to the 'proto-max-bulk-len' in Redis configuration. # It defines the maximum size of a single string element in the Redis protocol. # The value must be 1MB or greater. Default is 512MB. # It's recommended not to modify this value unless absolutely necessary. target_redis_proto_max_bulk_len = 512_000_000 # If the source is Elasticache, you can set this item. AWS ElastiCache has custom # psync command, which can be obtained through a ticket. aws_psync = "" # example: aws_psync = "10.0.0.1:6379@nmfu2sl5osync,10.0.0.1:6379@xhma21xfkssync" # destination will delete itself entire database before fetching files # from source during full synchronization. # This option is similar redis replicas RDB diskless load option: # repl-diskless-load on-empty-db empty_db_before_sync = true [module] # The data format for BF.LOADCHUNK is not compatible in different versions. v2.6.3 <=> 20603 target_mbbloom_version = 20603
3 配置 Redis-shake service
vim /usr/lib/systemd/system/redis-shake.service
[Unit] Description=Redis-shake After=data.mount [Service] Type=simple ExecStart=/usr/local/redis-shake/redis-shake /usr/local/redis-shake/config/shake.toml ExecStop=/bin/kill -SIGTERM $MAINPID PrivateTmp=true User=redis Group=redis [Install] WantedBy=multi-user.target
4 启动 & 开机自启
systemctl start redis-shake systemctl enable redis-shake
到此这篇关于Redis中redis-shake实现数据迁移同步的文章就介绍到这了,更多相关Redis 数据迁移同步内容请搜索脚本之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持脚本之家!
