From 5ba1d8a65551651119fd7f73c13d8150f0323aa2 Mon Sep 17 00:00:00 2001 From: wuyun Date: Sun, 29 Dec 2024 08:43:03 +0000 Subject: [PATCH 1/4] reset tools/pika_migrate floder --- tools/pika_migrate/.gitattributes | 1 - tools/pika_migrate/.gitignore | 49 - tools/pika_migrate/.travis.yml | 26 - tools/pika_migrate/CODE_OF_CONDUCT.md | 46 - tools/pika_migrate/CONTRIBUTING.md | 1 - tools/pika_migrate/Dockerfile | 21 - tools/pika_migrate/LICENSE | 10 - tools/pika_migrate/Makefile | 245 -- tools/pika_migrate/README.md | 76 - tools/pika_migrate/conf/pika.conf | 144 -- tools/pika_migrate/detect_environment | 112 - tools/pika_migrate/img.png | Bin 54270 -> 0 bytes tools/pika_migrate/include/build_version.h | 15 - tools/pika_migrate/include/migrator_thread.h | 64 - tools/pika_migrate/include/pika_admin.h | 459 ---- .../include/pika_auxiliary_thread.h | 25 - tools/pika_migrate/include/pika_binlog.h | 115 - .../pika_migrate/include/pika_binlog_reader.h | 48 - .../include/pika_binlog_transverter.h | 91 - tools/pika_migrate/include/pika_bit.h | 142 -- tools/pika_migrate/include/pika_client_conn.h | 81 - tools/pika_migrate/include/pika_cluster.h | 114 - .../include/pika_cmd_table_manager.h | 32 - tools/pika_migrate/include/pika_command.h | 486 ---- tools/pika_migrate/include/pika_conf.h | 340 --- .../include/pika_data_distribution.h | 40 - tools/pika_migrate/include/pika_define.h | 424 ---- .../include/pika_dispatch_thread.h | 57 - tools/pika_migrate/include/pika_geo.h | 176 -- tools/pika_migrate/include/pika_geohash.h | 118 - .../include/pika_geohash_helper.h | 70 - tools/pika_migrate/include/pika_hash.h | 370 --- tools/pika_migrate/include/pika_hyperloglog.h | 69 - tools/pika_migrate/include/pika_kv.h | 736 ------ tools/pika_migrate/include/pika_list.h | 289 --- tools/pika_migrate/include/pika_meta.h | 35 - .../include/pika_monitor_thread.h | 48 - tools/pika_migrate/include/pika_partition.h | 169 -- tools/pika_migrate/include/pika_pubsub.h | 93 - .../pika_migrate/include/pika_repl_bgworker.h | 43 - tools/pika_migrate/include/pika_repl_client.h | 126 - .../include/pika_repl_client_conn.h | 40 - .../include/pika_repl_client_thread.h | 63 - tools/pika_migrate/include/pika_repl_server.h | 48 - .../include/pika_repl_server_conn.h | 30 - .../include/pika_repl_server_thread.h | 55 - tools/pika_migrate/include/pika_rm.h | 359 --- .../pika_migrate/include/pika_rsync_service.h | 28 - tools/pika_migrate/include/pika_sender.h | 41 - tools/pika_migrate/include/pika_server.h | 425 ---- tools/pika_migrate/include/pika_set.h | 266 --- .../include/pika_slaveping_thread.h | 44 - tools/pika_migrate/include/pika_slot.h | 191 -- tools/pika_migrate/include/pika_table.h | 89 - tools/pika_migrate/include/pika_version.h | 13 - tools/pika_migrate/include/pika_zset.h | 516 ---- tools/pika_migrate/include/redis_sender.h | 47 - tools/pika_migrate/pikatests.sh | 10 - tools/pika_migrate/src/build_version.cc.in | 10 - tools/pika_migrate/src/migrator_thread.cc | 475 ---- tools/pika_migrate/src/pika.cc | 214 -- tools/pika_migrate/src/pika_admin.cc | 2106 ----------------- .../pika_migrate/src/pika_auxiliary_thread.cc | 55 - tools/pika_migrate/src/pika_binlog.cc | 357 --- tools/pika_migrate/src/pika_binlog_reader.cc | 267 --- .../src/pika_binlog_transverter.cc | 200 -- tools/pika_migrate/src/pika_bit.cc | 221 -- tools/pika_migrate/src/pika_client_conn.cc | 256 -- tools/pika_migrate/src/pika_cluster.cc | 495 ---- .../src/pika_cmd_table_manager.cc | 88 - tools/pika_migrate/src/pika_command.cc | 763 ------ tools/pika_migrate/src/pika_conf.cc | 502 ---- .../src/pika_data_distribution.cc | 48 - .../pika_migrate/src/pika_dispatch_thread.cc | 78 - tools/pika_migrate/src/pika_geo.cc | 551 ----- tools/pika_migrate/src/pika_geohash.cc | 295 --- tools/pika_migrate/src/pika_geohash_helper.cc | 235 -- tools/pika_migrate/src/pika_hash.cc | 609 ----- tools/pika_migrate/src/pika_hyperloglog.cc | 73 - .../pika_migrate/src/pika_inner_message.proto | 145 -- tools/pika_migrate/src/pika_kv.cc | 1447 ----------- tools/pika_migrate/src/pika_list.cc | 321 --- tools/pika_migrate/src/pika_meta.cc | 131 - tools/pika_migrate/src/pika_monitor_thread.cc | 203 -- tools/pika_migrate/src/pika_partition.cc | 679 ------ tools/pika_migrate/src/pika_pubsub.cc | 222 -- tools/pika_migrate/src/pika_repl_bgworker.cc | 301 --- tools/pika_migrate/src/pika_repl_client.cc | 271 --- .../pika_migrate/src/pika_repl_client_conn.cc | 261 -- .../src/pika_repl_client_thread.cc | 48 - tools/pika_migrate/src/pika_repl_server.cc | 126 - .../pika_migrate/src/pika_repl_server_conn.cc | 448 ---- .../src/pika_repl_server_thread.cc | 32 - tools/pika_migrate/src/pika_rm.cc | 1634 ------------- tools/pika_migrate/src/pika_rsync_service.cc | 105 - tools/pika_migrate/src/pika_sender.cc | 189 -- tools/pika_migrate/src/pika_server.cc | 1598 ------------- tools/pika_migrate/src/pika_set.cc | 391 --- tools/pika_migrate/src/pika_slot.cc | 436 ---- tools/pika_migrate/src/pika_table.cc | 261 -- tools/pika_migrate/src/pika_zset.cc | 946 -------- tools/pika_migrate/src/redis_sender.cc | 222 -- tools/pika_migrate/tests/README.md | 4 - tools/pika_migrate/tests/assets/default.conf | 79 - tools/pika_migrate/tests/assets/encodings.rdb | Bin 667 -> 0 bytes .../pika_migrate/tests/assets/hash-zipmap.rdb | Bin 35 -> 0 bytes .../tests/helpers/bg_complex_data.tcl | 10 - .../tests/helpers/gen_write_load.tcl | 15 - tools/pika_migrate/tests/instances.tcl | 407 ---- .../tests/integration/aof-race.tcl | 35 - tools/pika_migrate/tests/integration/aof.tcl | 236 -- .../convert-zipmap-hash-on-load.tcl | 35 - tools/pika_migrate/tests/integration/rdb.tcl | 98 - .../tests/integration/redis-cli.tcl | 208 -- .../tests/integration/replication-2.tcl | 87 - .../tests/integration/replication-3.tcl | 101 - .../tests/integration/replication-4.tcl | 136 -- .../tests/integration/replication-psync.tcl | 115 - .../tests/integration/replication.tcl | 215 -- tools/pika_migrate/tests/sentinel/run.tcl | 22 - .../tests/sentinel/tests/00-base.tcl | 126 - .../tests/sentinel/tests/01-conf-update.tcl | 39 - .../tests/sentinel/tests/02-slaves-reconf.tcl | 84 - .../sentinel/tests/03-runtime-reconf.tcl | 1 - .../sentinel/tests/04-slave-selection.tcl | 5 - .../tests/sentinel/tests/05-manual.tcl | 44 - .../sentinel/tests/includes/init-tests.tcl | 72 - .../tests/sentinel/tmp/.gitignore | 2 - tools/pika_migrate/tests/support/redis.tcl | 294 --- tools/pika_migrate/tests/support/server.tcl | 337 --- tools/pika_migrate/tests/support/test.tcl | 130 - tools/pika_migrate/tests/support/tmpfile.tcl | 15 - tools/pika_migrate/tests/support/util.tcl | 371 --- tools/pika_migrate/tests/test_helper.tcl | 545 ----- tools/pika_migrate/tests/unit/aofrw.tcl | 210 -- tools/pika_migrate/tests/unit/auth.tcl | 27 - tools/pika_migrate/tests/unit/basic.tcl | 783 ------ tools/pika_migrate/tests/unit/bitops.tcl | 341 --- tools/pika_migrate/tests/unit/dump.tcl | 142 -- tools/pika_migrate/tests/unit/expire.tcl | 201 -- tools/pika_migrate/tests/unit/geo.tcl | 311 --- tools/pika_migrate/tests/unit/hyperloglog.tcl | 250 -- .../pika_migrate/tests/unit/introspection.tcl | 59 - tools/pika_migrate/tests/unit/keys.tcl | 54 - .../tests/unit/latency-monitor.tcl | 50 - tools/pika_migrate/tests/unit/limits.tcl | 16 - tools/pika_migrate/tests/unit/maxmemory.tcl | 144 -- .../pika_migrate/tests/unit/memefficiency.tcl | 37 - tools/pika_migrate/tests/unit/multi.tcl | 309 --- tools/pika_migrate/tests/unit/obuf-limits.tcl | 73 - tools/pika_migrate/tests/unit/other.tcl | 245 -- tools/pika_migrate/tests/unit/printver.tcl | 6 - tools/pika_migrate/tests/unit/protocol.tcl | 117 - tools/pika_migrate/tests/unit/pubsub.tcl | 399 ---- tools/pika_migrate/tests/unit/quit.tcl | 40 - tools/pika_migrate/tests/unit/scan.tcl | 239 -- tools/pika_migrate/tests/unit/scripting.tcl | 606 ----- tools/pika_migrate/tests/unit/slowlog.tcl | 70 - tools/pika_migrate/tests/unit/sort.tcl | 311 --- tools/pika_migrate/tests/unit/type/hash.tcl | 470 ---- tools/pika_migrate/tests/unit/type/list-2.tcl | 44 - tools/pika_migrate/tests/unit/type/list-3.tcl | 79 - .../tests/unit/type/list-common.tcl | 5 - tools/pika_migrate/tests/unit/type/list.tcl | 896 ------- tools/pika_migrate/tests/unit/type/set.tcl | 531 ----- tools/pika_migrate/tests/unit/type/zset.tcl | 944 -------- 166 files changed, 38063 deletions(-) delete mode 100644 tools/pika_migrate/.gitattributes delete mode 100644 tools/pika_migrate/.gitignore delete mode 100644 tools/pika_migrate/.travis.yml delete mode 100644 tools/pika_migrate/CODE_OF_CONDUCT.md delete mode 100644 tools/pika_migrate/CONTRIBUTING.md delete mode 100644 tools/pika_migrate/Dockerfile delete mode 100644 tools/pika_migrate/LICENSE delete mode 100644 tools/pika_migrate/Makefile delete mode 100644 tools/pika_migrate/README.md delete mode 100644 tools/pika_migrate/conf/pika.conf delete mode 100755 tools/pika_migrate/detect_environment delete mode 100644 tools/pika_migrate/img.png delete mode 100644 tools/pika_migrate/include/build_version.h delete mode 100644 tools/pika_migrate/include/migrator_thread.h delete mode 100644 tools/pika_migrate/include/pika_admin.h delete mode 100644 tools/pika_migrate/include/pika_auxiliary_thread.h delete mode 100644 tools/pika_migrate/include/pika_binlog.h delete mode 100644 tools/pika_migrate/include/pika_binlog_reader.h delete mode 100644 tools/pika_migrate/include/pika_binlog_transverter.h delete mode 100644 tools/pika_migrate/include/pika_bit.h delete mode 100644 tools/pika_migrate/include/pika_client_conn.h delete mode 100644 tools/pika_migrate/include/pika_cluster.h delete mode 100644 tools/pika_migrate/include/pika_cmd_table_manager.h delete mode 100644 tools/pika_migrate/include/pika_command.h delete mode 100644 tools/pika_migrate/include/pika_conf.h delete mode 100644 tools/pika_migrate/include/pika_data_distribution.h delete mode 100644 tools/pika_migrate/include/pika_define.h delete mode 100644 tools/pika_migrate/include/pika_dispatch_thread.h delete mode 100644 tools/pika_migrate/include/pika_geo.h delete mode 100644 tools/pika_migrate/include/pika_geohash.h delete mode 100644 tools/pika_migrate/include/pika_geohash_helper.h delete mode 100644 tools/pika_migrate/include/pika_hash.h delete mode 100644 tools/pika_migrate/include/pika_hyperloglog.h delete mode 100644 tools/pika_migrate/include/pika_kv.h delete mode 100644 tools/pika_migrate/include/pika_list.h delete mode 100644 tools/pika_migrate/include/pika_meta.h delete mode 100644 tools/pika_migrate/include/pika_monitor_thread.h delete mode 100644 tools/pika_migrate/include/pika_partition.h delete mode 100644 tools/pika_migrate/include/pika_pubsub.h delete mode 100644 tools/pika_migrate/include/pika_repl_bgworker.h delete mode 100644 tools/pika_migrate/include/pika_repl_client.h delete mode 100644 tools/pika_migrate/include/pika_repl_client_conn.h delete mode 100644 tools/pika_migrate/include/pika_repl_client_thread.h delete mode 100644 tools/pika_migrate/include/pika_repl_server.h delete mode 100644 tools/pika_migrate/include/pika_repl_server_conn.h delete mode 100644 tools/pika_migrate/include/pika_repl_server_thread.h delete mode 100644 tools/pika_migrate/include/pika_rm.h delete mode 100644 tools/pika_migrate/include/pika_rsync_service.h delete mode 100644 tools/pika_migrate/include/pika_sender.h delete mode 100644 tools/pika_migrate/include/pika_server.h delete mode 100644 tools/pika_migrate/include/pika_set.h delete mode 100644 tools/pika_migrate/include/pika_slaveping_thread.h delete mode 100644 tools/pika_migrate/include/pika_slot.h delete mode 100644 tools/pika_migrate/include/pika_table.h delete mode 100644 tools/pika_migrate/include/pika_version.h delete mode 100644 tools/pika_migrate/include/pika_zset.h delete mode 100644 tools/pika_migrate/include/redis_sender.h delete mode 100755 tools/pika_migrate/pikatests.sh delete mode 100644 tools/pika_migrate/src/build_version.cc.in delete mode 100644 tools/pika_migrate/src/migrator_thread.cc delete mode 100644 tools/pika_migrate/src/pika.cc delete mode 100644 tools/pika_migrate/src/pika_admin.cc delete mode 100644 tools/pika_migrate/src/pika_auxiliary_thread.cc delete mode 100644 tools/pika_migrate/src/pika_binlog.cc delete mode 100644 tools/pika_migrate/src/pika_binlog_reader.cc delete mode 100644 tools/pika_migrate/src/pika_binlog_transverter.cc delete mode 100644 tools/pika_migrate/src/pika_bit.cc delete mode 100644 tools/pika_migrate/src/pika_client_conn.cc delete mode 100644 tools/pika_migrate/src/pika_cluster.cc delete mode 100644 tools/pika_migrate/src/pika_cmd_table_manager.cc delete mode 100644 tools/pika_migrate/src/pika_command.cc delete mode 100644 tools/pika_migrate/src/pika_conf.cc delete mode 100644 tools/pika_migrate/src/pika_data_distribution.cc delete mode 100644 tools/pika_migrate/src/pika_dispatch_thread.cc delete mode 100644 tools/pika_migrate/src/pika_geo.cc delete mode 100644 tools/pika_migrate/src/pika_geohash.cc delete mode 100644 tools/pika_migrate/src/pika_geohash_helper.cc delete mode 100644 tools/pika_migrate/src/pika_hash.cc delete mode 100644 tools/pika_migrate/src/pika_hyperloglog.cc delete mode 100644 tools/pika_migrate/src/pika_inner_message.proto delete mode 100644 tools/pika_migrate/src/pika_kv.cc delete mode 100644 tools/pika_migrate/src/pika_list.cc delete mode 100644 tools/pika_migrate/src/pika_meta.cc delete mode 100644 tools/pika_migrate/src/pika_monitor_thread.cc delete mode 100644 tools/pika_migrate/src/pika_partition.cc delete mode 100644 tools/pika_migrate/src/pika_pubsub.cc delete mode 100644 tools/pika_migrate/src/pika_repl_bgworker.cc delete mode 100644 tools/pika_migrate/src/pika_repl_client.cc delete mode 100644 tools/pika_migrate/src/pika_repl_client_conn.cc delete mode 100644 tools/pika_migrate/src/pika_repl_client_thread.cc delete mode 100644 tools/pika_migrate/src/pika_repl_server.cc delete mode 100644 tools/pika_migrate/src/pika_repl_server_conn.cc delete mode 100644 tools/pika_migrate/src/pika_repl_server_thread.cc delete mode 100644 tools/pika_migrate/src/pika_rm.cc delete mode 100644 tools/pika_migrate/src/pika_rsync_service.cc delete mode 100644 tools/pika_migrate/src/pika_sender.cc delete mode 100644 tools/pika_migrate/src/pika_server.cc delete mode 100644 tools/pika_migrate/src/pika_set.cc delete mode 100644 tools/pika_migrate/src/pika_slot.cc delete mode 100644 tools/pika_migrate/src/pika_table.cc delete mode 100644 tools/pika_migrate/src/pika_zset.cc delete mode 100644 tools/pika_migrate/src/redis_sender.cc delete mode 100644 tools/pika_migrate/tests/README.md delete mode 100644 tools/pika_migrate/tests/assets/default.conf delete mode 100644 tools/pika_migrate/tests/assets/encodings.rdb delete mode 100644 tools/pika_migrate/tests/assets/hash-zipmap.rdb delete mode 100644 tools/pika_migrate/tests/helpers/bg_complex_data.tcl delete mode 100644 tools/pika_migrate/tests/helpers/gen_write_load.tcl delete mode 100644 tools/pika_migrate/tests/instances.tcl delete mode 100644 tools/pika_migrate/tests/integration/aof-race.tcl delete mode 100644 tools/pika_migrate/tests/integration/aof.tcl delete mode 100644 tools/pika_migrate/tests/integration/convert-zipmap-hash-on-load.tcl delete mode 100644 tools/pika_migrate/tests/integration/rdb.tcl delete mode 100644 tools/pika_migrate/tests/integration/redis-cli.tcl delete mode 100644 tools/pika_migrate/tests/integration/replication-2.tcl delete mode 100644 tools/pika_migrate/tests/integration/replication-3.tcl delete mode 100644 tools/pika_migrate/tests/integration/replication-4.tcl delete mode 100644 tools/pika_migrate/tests/integration/replication-psync.tcl delete mode 100644 tools/pika_migrate/tests/integration/replication.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/run.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tests/00-base.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tests/01-conf-update.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tests/02-slaves-reconf.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tests/03-runtime-reconf.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tests/04-slave-selection.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tests/05-manual.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tests/includes/init-tests.tcl delete mode 100644 tools/pika_migrate/tests/sentinel/tmp/.gitignore delete mode 100644 tools/pika_migrate/tests/support/redis.tcl delete mode 100644 tools/pika_migrate/tests/support/server.tcl delete mode 100644 tools/pika_migrate/tests/support/test.tcl delete mode 100644 tools/pika_migrate/tests/support/tmpfile.tcl delete mode 100644 tools/pika_migrate/tests/support/util.tcl delete mode 100644 tools/pika_migrate/tests/test_helper.tcl delete mode 100644 tools/pika_migrate/tests/unit/aofrw.tcl delete mode 100644 tools/pika_migrate/tests/unit/auth.tcl delete mode 100644 tools/pika_migrate/tests/unit/basic.tcl delete mode 100644 tools/pika_migrate/tests/unit/bitops.tcl delete mode 100644 tools/pika_migrate/tests/unit/dump.tcl delete mode 100644 tools/pika_migrate/tests/unit/expire.tcl delete mode 100644 tools/pika_migrate/tests/unit/geo.tcl delete mode 100755 tools/pika_migrate/tests/unit/hyperloglog.tcl delete mode 100644 tools/pika_migrate/tests/unit/introspection.tcl delete mode 100644 tools/pika_migrate/tests/unit/keys.tcl delete mode 100644 tools/pika_migrate/tests/unit/latency-monitor.tcl delete mode 100644 tools/pika_migrate/tests/unit/limits.tcl delete mode 100644 tools/pika_migrate/tests/unit/maxmemory.tcl delete mode 100644 tools/pika_migrate/tests/unit/memefficiency.tcl delete mode 100644 tools/pika_migrate/tests/unit/multi.tcl delete mode 100644 tools/pika_migrate/tests/unit/obuf-limits.tcl delete mode 100644 tools/pika_migrate/tests/unit/other.tcl delete mode 100644 tools/pika_migrate/tests/unit/printver.tcl delete mode 100644 tools/pika_migrate/tests/unit/protocol.tcl delete mode 100644 tools/pika_migrate/tests/unit/pubsub.tcl delete mode 100644 tools/pika_migrate/tests/unit/quit.tcl delete mode 100644 tools/pika_migrate/tests/unit/scan.tcl delete mode 100644 tools/pika_migrate/tests/unit/scripting.tcl delete mode 100644 tools/pika_migrate/tests/unit/slowlog.tcl delete mode 100644 tools/pika_migrate/tests/unit/sort.tcl delete mode 100644 tools/pika_migrate/tests/unit/type/hash.tcl delete mode 100644 tools/pika_migrate/tests/unit/type/list-2.tcl delete mode 100644 tools/pika_migrate/tests/unit/type/list-3.tcl delete mode 100644 tools/pika_migrate/tests/unit/type/list-common.tcl delete mode 100644 tools/pika_migrate/tests/unit/type/list.tcl delete mode 100644 tools/pika_migrate/tests/unit/type/set.tcl delete mode 100644 tools/pika_migrate/tests/unit/type/zset.tcl diff --git a/tools/pika_migrate/.gitattributes b/tools/pika_migrate/.gitattributes deleted file mode 100644 index 3ff2dd9c7b..0000000000 --- a/tools/pika_migrate/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -tests/* linguist-vendored diff --git a/tools/pika_migrate/.gitignore b/tools/pika_migrate/.gitignore deleted file mode 100644 index 5d21ed9c5a..0000000000 --- a/tools/pika_migrate/.gitignore +++ /dev/null @@ -1,49 +0,0 @@ -# Compiled Object files -*.slo -*.lo -*.o -*.obj -*pb.cc -*pb.h - -# Precompiled Headers -*.gch -*.pch - -# Compiled Dynamic libraries -*.so -*.dylib -*.dll - -# Fortran module files -*.mod - -# Compiled Static libraries -*.lai -*.la -*.a -*.lib - -# Executables -*.exe -*.out -*.app - -# Log path -make_config.mk -log/ -lib/ -tools/ -output/ - -# DB -db/ -dump/ - -# third party -gdb.txt -tags - -make_config.mk -src/*.d -src/build_version.cc diff --git a/tools/pika_migrate/.travis.yml b/tools/pika_migrate/.travis.yml deleted file mode 100644 index cdc94a458c..0000000000 --- a/tools/pika_migrate/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -sudo: required -dist: trusty -language: cpp - -os: - - linux - -env: - global: - - PROTOBUF_VERSION=2.5.0 - -install: - - wget https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOBUF_VERSION/protobuf-$PROTOBUF_VERSION.tar.bz2 - - tar xvf protobuf-$PROTOBUF_VERSION.tar.bz2 - - ( cd protobuf-$PROTOBUF_VERSION && ./configure --prefix=/usr && make && sudo make install ) - -addons: - apt: - packages: ['libsnappy-dev', 'libprotobuf-dev', 'libgoogle-glog-dev'] - -compiler: - - gcc - -language: cpp - -script: make diff --git a/tools/pika_migrate/CODE_OF_CONDUCT.md b/tools/pika_migrate/CODE_OF_CONDUCT.md deleted file mode 100644 index f50b192489..0000000000 --- a/tools/pika_migrate/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at g-infra-bada@360.cn. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/tools/pika_migrate/CONTRIBUTING.md b/tools/pika_migrate/CONTRIBUTING.md deleted file mode 100644 index 4cf487071f..0000000000 --- a/tools/pika_migrate/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -### Contributing to pika diff --git a/tools/pika_migrate/Dockerfile b/tools/pika_migrate/Dockerfile deleted file mode 100644 index 3fc690c3e7..0000000000 --- a/tools/pika_migrate/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM centos:latest -MAINTAINER left2right - -RUN rpm -ivh https://mirrors.ustc.edu.cn/epel/epel-release-latest-7.noarch.rpm && \ - yum -y update && \ - yum -y install snappy-devel && \ - yum -y install protobuf-devel && \ - yum -y install gflags-devel && \ - yum -y install glog-devel && \ - yum -y install gcc-c++ && \ - yum -y install make && \ - yum -y install which && \ - yum -y install git - -ENV PIKA /pika -COPY . ${PIKA} -WORKDIR ${PIKA} -RUN make -ENV PATH ${PIKA}/output/bin:${PATH} - -WORKDIR ${PIKA}/output diff --git a/tools/pika_migrate/LICENSE b/tools/pika_migrate/LICENSE deleted file mode 100644 index 93ce6ffc0b..0000000000 --- a/tools/pika_migrate/LICENSE +++ /dev/null @@ -1,10 +0,0 @@ - The MIT License (MIT) - -Copyright © 2018 - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/tools/pika_migrate/Makefile b/tools/pika_migrate/Makefile deleted file mode 100644 index be7e8191f5..0000000000 --- a/tools/pika_migrate/Makefile +++ /dev/null @@ -1,245 +0,0 @@ -CLEAN_FILES = # deliberately empty, so we can append below. -CXX=g++ -PLATFORM_LDFLAGS= -lpthread -lrt -PLATFORM_CXXFLAGS= -std=c++11 -fno-builtin-memcmp -msse -msse4.2 -PROFILING_FLAGS=-pg -OPT= -LDFLAGS += -Wl,-rpath=$(RPATH) - -# DEBUG_LEVEL can have two values: -# * DEBUG_LEVEL=2; this is the ultimate debug mode. It will compile pika -# without any optimizations. To compile with level 2, issue `make dbg` -# * DEBUG_LEVEL=0; this is the debug level we use for release. If you're -# running pika in production you most definitely want to compile pika -# with debug level 0. To compile with level 0, run `make`, - -# Set the default DEBUG_LEVEL to 0 -DEBUG_LEVEL?=0 - -ifeq ($(MAKECMDGOALS),dbg) - DEBUG_LEVEL=2 -endif - -ifneq ($(DISABLE_UPDATE_SB), 1) -$(info updating submodule) -dummy := $(shell (git submodule init && git submodule update)) -endif - -# compile with -O2 if debug level is not 2 -ifneq ($(DEBUG_LEVEL), 2) -OPT += -O2 -fno-omit-frame-pointer -# if we're compiling for release, compile without debug code (-DNDEBUG) and -# don't treat warnings as errors -OPT += -DNDEBUG -DISABLE_WARNING_AS_ERROR=1 -# Skip for archs that don't support -momit-leaf-frame-pointer -ifeq (,$(shell $(CXX) -fsyntax-only -momit-leaf-frame-pointer -xc /dev/null 2>&1)) -OPT += -momit-leaf-frame-pointer -endif -else -$(warning Warning: Compiling in debug mode. Don't use the resulting binary in production) -OPT += $(PROFILING_FLAGS) -DEBUG_SUFFIX = "_debug" -endif - -# Link tcmalloc if exist -dummy := $(shell ("$(CURDIR)/detect_environment" "$(CURDIR)/make_config.mk")) -include make_config.mk -CLEAN_FILES += $(CURDIR)/make_config.mk -PLATFORM_LDFLAGS += $(TCMALLOC_LDFLAGS) -PLATFORM_LDFLAGS += $(ROCKSDB_LDFLAGS) -PLATFORM_CXXFLAGS += $(TCMALLOC_EXTENSION_FLAGS) - -# ---------------------------------------------- -OUTPUT = $(CURDIR)/output -THIRD_PATH = $(CURDIR)/third -SRC_PATH = $(CURDIR)/src - -# ----------------Dependences------------------- - -ifndef SLASH_PATH -SLASH_PATH = $(THIRD_PATH)/slash -endif -SLASH = $(SLASH_PATH)/slash/lib/libslash$(DEBUG_SUFFIX).a - -ifndef PINK_PATH -PINK_PATH = $(THIRD_PATH)/pink -endif -PINK = $(PINK_PATH)/pink/lib/libpink$(DEBUG_SUFFIX).a - -ifndef ROCKSDB_PATH -ROCKSDB_PATH = $(THIRD_PATH)/rocksdb -endif -ROCKSDB = $(ROCKSDB_PATH)/librocksdb$(DEBUG_SUFFIX).a - -ifndef GLOG_PATH -GLOG_PATH = $(THIRD_PATH)/glog -endif - -ifndef BLACKWIDOW_PATH -BLACKWIDOW_PATH = $(THIRD_PATH)/blackwidow -endif -BLACKWIDOW = $(BLACKWIDOW_PATH)/lib/libblackwidow$(DEBUG_SUFFIX).a - - -ifeq ($(360), 1) -GLOG := $(GLOG_PATH)/.libs/libglog.a -endif - -INCLUDE_PATH = -I. \ - -I$(SLASH_PATH) \ - -I$(PINK_PATH) \ - -I$(BLACKWIDOW_PATH)/include \ - -I$(BLACKWIDOW_PATH)\ - -I$(ROCKSDB_PATH) \ - -I$(ROCKSDB_PATH)/include \ - -I$(GLOG_PATH)/src \ - -LIB_PATH = -L./ \ - -L$(SLASH_PATH)/slash/lib \ - -L$(PINK_PATH)/pink/lib \ - -L$(BLACKWIDOW_PATH)/lib \ - -L$(ROCKSDB_PATH) \ - -L$(GLOG_PATH)/.libs \ - -LDFLAGS += $(LIB_PATH) \ - -lpink$(DEBUG_SUFFIX) \ - -lslash$(DEBUG_SUFFIX) \ - -lblackwidow$(DEBUG_SUFFIX) \ - -lrocksdb$(DEBUG_SUFFIX) \ - -lglog \ - -lprotobuf \ - -static-libstdc++ \ - -# ---------------End Dependences---------------- - -VERSION_CC=$(SRC_PATH)/build_version.cc -LIB_SOURCES := $(VERSION_CC) \ - $(filter-out $(VERSION_CC), $(wildcard $(SRC_PATH)/*.cc)) - -PIKA_PROTO := $(wildcard $(SRC_PATH)/*.proto) -PIKA_PROTO_GENS:= $(PIKA_PROTO:%.proto=%.pb.h) $(PIKA_PROTO:%.proto=%.pb.cc) - - -#----------------------------------------------- - -AM_DEFAULT_VERBOSITY = 0 - -AM_V_GEN = $(am__v_GEN_$(V)) -am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) -am__v_GEN_0 = @echo " GEN " $(notdir $@); -am__v_GEN_1 = -AM_V_at = $(am__v_at_$(V)) -am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) -am__v_at_0 = @ -am__v_at_1 = - -AM_V_CC = $(am__v_CC_$(V)) -am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY)) -am__v_CC_0 = @echo " CC " $(notdir $@); -am__v_CC_1 = -CCLD = $(CC) -LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ -AM_V_CCLD = $(am__v_CCLD_$(V)) -am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY)) -am__v_CCLD_0 = @echo " CCLD " $(notdir $@); -am__v_CCLD_1 = - -AM_LINK = $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) - -CXXFLAGS += -g - -# This (the first rule) must depend on "all". -default: all - -WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare \ - -Wno-unused-parameter -Woverloaded-virtual \ - -Wnon-virtual-dtor -Wno-missing-field-initializers - -ifndef DISABLE_WARNING_AS_ERROR - WARNING_FLAGS += -Werror -endif - -CXXFLAGS += $(WARNING_FLAGS) $(INCLUDE_PATH) $(PLATFORM_CXXFLAGS) $(OPT) - -LDFLAGS += $(PLATFORM_LDFLAGS) - -date := $(shell date +%F) -git_sha := $(shell git rev-parse HEAD 2>/dev/null) -gen_build_version = sed -e s/@@GIT_SHA@@/$(git_sha)/ -e s/@@GIT_DATE_TIME@@/$(date)/ src/build_version.cc.in -# Record the version of the source that we are compiling. -# We keep a record of the git revision in this file. It is then built -# as a regular source file as part of the compilation process. -# One can run "strings executable_filename | grep _build_" to find -# the version of the source that we used to build the executable file. -CLEAN_FILES += $(SRC_PATH)/build_version.cc - -$(SRC_PATH)/build_version.cc: FORCE - $(AM_V_GEN)rm -f $@-t - $(AM_V_at)$(gen_build_version) > $@-t - $(AM_V_at)if test -f $@; then \ - cmp -s $@-t $@ && rm -f $@-t || mv -f $@-t $@; \ - else mv -f $@-t $@; fi -FORCE: - -LIBOBJECTS = $(LIB_SOURCES:.cc=.o) -PROTOOBJECTS = $(PIKA_PROTO:.proto=.pb.o) - -# if user didn't config LIBNAME, set the default -ifeq ($(BINNAME),) -# we should only run pika in production with DEBUG_LEVEL 0 -BINNAME=pika$(DEBUG_SUFFIX) -endif -BINARY = ${BINNAME} - -.PHONY: distclean clean dbg all - -%.pb.h %.pb.cc: %.proto - $(AM_V_GEN)protoc --proto_path=$(SRC_PATH) --cpp_out=$(SRC_PATH) $< - -%.o: %.cc - $(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ - -proto: $(PIKA_PROTO_GENS) - -all: $(BINARY) - -dbg: $(BINARY) - -$(BINARY): $(SLASH) $(PINK) $(ROCKSDB) $(BLACKWIDOW) $(GLOG) $(PROTOOBJECTS) $(LIBOBJECTS) - $(AM_V_at)rm -f $@ - $(AM_V_at)$(AM_LINK) - $(AM_V_at)rm -rf $(OUTPUT) - $(AM_V_at)mkdir -p $(OUTPUT)/bin - $(AM_V_at)mv $@ $(OUTPUT)/bin - $(AM_V_at)cp -r $(CURDIR)/conf $(OUTPUT) - - -$(SLASH): - $(AM_V_at)make -C $(SLASH_PATH)/slash/ DEBUG_LEVEL=$(DEBUG_LEVEL) - -$(PINK): - $(AM_V_at)make -C $(PINK_PATH)/pink/ DEBUG_LEVEL=$(DEBUG_LEVEL) NO_PB=0 SLASH_PATH=$(SLASH_PATH) - -$(ROCKSDB): - $(AM_V_at)make -j $(PROCESSOR_NUMS) -C $(ROCKSDB_PATH)/ static_lib DISABLE_JEMALLOC=1 DEBUG_LEVEL=$(DEBUG_LEVEL) - -$(BLACKWIDOW): - $(AM_V_at)make -C $(BLACKWIDOW_PATH) ROCKSDB_PATH=$(ROCKSDB_PATH) SLASH_PATH=$(SLASH_PATH) DEBUG_LEVEL=$(DEBUG_LEVEL) - -$(GLOG): - cd $(THIRD_PATH)/glog; if [ ! -f ./Makefile ]; then ./configure --disable-shared; fi; make; echo '*' > $(CURDIR)/third/glog/.gitignore; - -clean: - rm -rf $(OUTPUT) - rm -rf $(CLEAN_FILES) - rm -rf $(PIKA_PROTO_GENS) - find $(SRC_PATH) -name "*.[oda]*" -exec rm -f {} \; - find $(SRC_PATH) -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \; - -distclean: clean - make -C $(PINK_PATH)/pink/ SLASH_PATH=$(SLASH_PATH) clean - make -C $(SLASH_PATH)/slash/ clean - make -C $(BLACKWIDOW_PATH)/ clean - make -C $(ROCKSDB_PATH)/ clean -# make -C $(GLOG_PATH)/ clean diff --git a/tools/pika_migrate/README.md b/tools/pika_migrate/README.md deleted file mode 100644 index 2387bb1bf8..0000000000 --- a/tools/pika_migrate/README.md +++ /dev/null @@ -1,76 +0,0 @@ - - -## 适用版本 - -适用 PIKA 3.2.0及以上版本(3.5.x 和 4.0.x 版本不支持),单机模式且只使用了单 DB。若 PIKA 版本低于3.2.0,需将内核版本升级至 3.2.0。具体信息,请参见 升级 PIKA 内核版本至3.2.0。 -### 开发背景: -之前Pika项目官方提供的pika\_to\_redis工具仅支持离线将Pika的DB中的数据迁移到Pika、Redis, 且无法增量同步, 该工具实际上就是一个特殊的Pika, 只不过成为从库之后, 内部会将从主库获取到的数据转发给Redis,同时并支持增量同步, 实现热迁功能. - -## 迁移原理 - -将 PIKA 中的数据在线迁移到 Redis,并支持全量和增量同步。使用 pika-migrate 工具,将工具虚拟为 PIKA 的从库,然后从主库获取到数据转发给 Redis,同时支持增量同步,实现在线热迁的功能。 -1. pika-migrate 通过 dbsync 请求获取主库全量 DB 数据,以及当前 DB 数据所对应的 binlog 点位。 -2. 获取到主库当前全量 DB 数据之后,扫描 DB,将 DB 中的数据打包转发给 Redis。 -3. 通过之前获取的 binlog 的点位向主库进行增量同步, 在增量同步的过程中,将从主库获取到的 binlog 重组成 Redis 命令,转发给 Redis。 - - -## 注意事项 - -PIKA 支持不同数据结构采用同名 Key,但是 Redis 不⽀持,所以在有同 Key 数据的场景下,以第⼀个迁移到 Redis 数据结构为准,其他同名 Key 的数据结构会丢失。 -该工具只支持热迁移单机模式下,并且只采⽤单 DB 版本的 PIKA,如果是集群模式,或者是多 DB 场景,⼯具会报错并且退出。 -为了避免由于主库 binlog 被清理导致该⼯具触发多次全量同步向 Redis 写入脏数据,工具自身做了保护,在第⼆次触发全量同步时会报错退出。 - -## 编译步骤 -```shell -# 若third目录中子仓库为空,需要进入工具根目录更新submodule -git submodule update --init --recursive -# 编译 -make -``` - -### 编译备注 - -1.如果rocksdb编译失败,请先按照[此处](https://github.com/facebook/rocksdb/blob/004237e62790320d8e630456cbeb6f4a1f3579c2/INSTALL.md) 的步骤准备环境 -2.若类似为: -```shell -error: implicitly-declared 'constexpr rocksdb::FileDescriptor::FileDescriptor(const rocksdb::FileDescriptor&)' is deprecated [-Werror=deprecated-copy] -``` -可以修改tools/pika_migrate/third/rocksdb目录下的makefile:
WARNING_FLAGS = -Wno-missing-field-initializers --Wno-unused-parameter - -## 迁移步骤 - -1. 在 PIKA 主库上执行如下命令,让 PIKA 主库保留10000个 binlog 文件。 - -```shell -config set expire-logs-nums 10000 -``` - -```text -说明: -pika-port 将全量数据写入到 Redis 这段时间可能耗时很长,而导致主库原先 binlog 点位被清理。需要在 PIKA 主库上保留10000个 binlog ⽂件,确保后续该⼯具请求增量同步的时候,对应的 binlog 文件还存在。 -binlog 文件占用磁盘空间,可以根据实际情况确定保留 binlog 的数量。 -``` - -2. 修改迁移工具的配置文件 pika.conf 中的如下参数。 - ![img.png](img.png) - - target-redis-host:指定 Redis 的 IP 地址。 - target-redis-port:指定 Redis 的端口号。 - target-redis-pwd:指定 Redis 默认账号的密码。 - sync-batch-num:指定 pika-migrate 接收到主库的 sync-batch-num 个数据⼀起打包发送给 Redis,提升转发效率。 - redis-sender-num:指定 redis-sender-num 个线程用于转发数据包。转发命令通过 Key 的哈希值将数据分配到不同的线程发送,无需担心多线程发送导致数据错乱的问题。 -3. 在工具包的路径下执行如下命令,启动 pika-migrate 工具,并查看回显信息。 -```shell -pika -c pika.conf -``` - -4. 执行如下命令,将迁移工具伪装成 Slave,向主库请求同步,并观察是否有报错信息。 -```shell -slaveof ip port force -``` - -5. 确认主从关系建立成功之后,pika-migrate 同时向目标 Redis 转发数据。执行如下命令,查看主从同步延迟。可在主库写入⼀个特殊的 Key,然后在 Redis 侧查看是否可立即获取到该 Key,判断数据同步完毕。 -```shell -info Replication -``` diff --git a/tools/pika_migrate/conf/pika.conf b/tools/pika_migrate/conf/pika.conf deleted file mode 100644 index d1dd3f8831..0000000000 --- a/tools/pika_migrate/conf/pika.conf +++ /dev/null @@ -1,144 +0,0 @@ -# Pika port -port : 9222 -# Thread Number -thread-num : 1 -# Thread Pool Size -thread-pool-size : 12 -# Sync Thread Number -sync-thread-num : 6 -# Pika log path -log-path : ./log/ -# Pika db path -db-path : ./db/ -# Pika write-buffer-size -write-buffer-size : 268435456 -# Pika timeout -timeout : 60 -# Requirepass -requirepass : -# Masterauth -masterauth : -# Userpass -userpass : -# User Blacklist -userblacklist : -# if this option is set to 'classic', that means pika support multiple DB, in -# this mode, option databases enable -# if this option is set to 'sharding', that means pika support multiple Table, you -# can specify slot num for each table, in this mode, option default-slot-num enable -# Pika instance mode [classic | sharding] -instance-mode : classic -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases' - 1, limited in [1, 8] -databases : 1 -# default slot number each table in sharding mode -default-slot-num : 1024 -# Dump Prefix -dump-prefix : -# daemonize [yes | no] -#daemonize : yes -# Dump Path -dump-path : ./dump/ -# Expire-dump-days -dump-expire : 0 -# pidfile Path -pidfile : ./pika.pid -# Max Connection -maxclients : 20000 -# the per file size of sst to compact, defalut is 2M -target-file-size-base : 20971520 -# Expire-logs-days -expire-logs-days : 7 -# Expire-logs-nums -expire-logs-nums : 10 -# Root-connection-num -root-connection-num : 2 -# Slowlog-write-errorlog -slowlog-write-errorlog : no -# Slowlog-log-slower-than -slowlog-log-slower-than : 10000 -# Slowlog-max-len -slowlog-max-len : 128 -# Pika db sync path -db-sync-path : ./dbsync/ -# db sync speed(MB) max is set to 1024MB, min is set to 0, and if below 0 or above 1024, the value will be adjust to 1024 -db-sync-speed : -1 -# The slave priority -slave-priority : 100 -# network interface -#network-interface : eth1 -# replication -#slaveof : master-ip:master-port - -# CronTask, format 1: start-end/ratio, like 02-04/60, pika will check to schedule compaction between 2 to 4 o'clock everyday -# if the freesize/disksize > 60%. -# format 2: week/start-end/ratio, like 3/02-04/60, pika will check to schedule compaction between 2 to 4 o'clock -# every wednesday, if the freesize/disksize > 60%. -# NOTICE: if compact-interval is set, compact-cron will be mask and disable. -# -#compact-cron : 3/02-04/60 - -# Compact-interval, format: interval/ratio, like 6/60, pika will check to schedule compaction every 6 hours, -# if the freesize/disksize > 60%. NOTICE:compact-interval is prior than compact-cron; -#compact-interval : - -# server-id for hub -server-id : 1 -# the size of flow control window while sync binlog between master and slave.Default is 9000 and the maximum is 90000. -sync-window-size : 9000 - -################### -## Migrate Settings -################### - -target-redis-host : 127.0.0.1 -target-redis-port : 6379 -target-redis-pwd : - -sync-batch-num : 100 -redis-sender-num : 10 - -################### -## Critical Settings -################### -# write_binlog [yes | no] -write-binlog : yes -# binlog file size: default is 100M, limited in [1K, 2G] -binlog-file-size : 104857600 -# Automatically triggers a small compaction according statistics -# Use the cache to store up to 'max-cache-statistic-keys' keys -# if 'max-cache-statistic-keys' set to '0', that means turn off the statistics function -# it also doesn't automatically trigger a small compact feature -max-cache-statistic-keys : 0 -# When 'delete' or 'overwrite' a specific multi-data structure key 'small-compaction-threshold' times, -# a small compact is triggered automatically, default is 5000, limited in [1, 100000] -small-compaction-threshold : 5000 -# If the total size of all live memtables of all the DBs exceeds -# the limit, a flush will be triggered in the next DB to which the next write -# is issued. -max-write-buffer-size : 10737418240 -# Limit some command response size, like Scan, Keys* -max-client-response-size : 1073741824 -# Compression -compression : snappy -# max-background-flushes: default is 1, limited in [1, 4] -max-background-flushes : 1 -# max-background-compactions: default is 2, limited in [1, 8] -max-background-compactions : 2 -# maximum value of Rocksdb cached open file descriptors -max-cache-files : 5000 -# max_bytes_for_level_multiplier: default is 10, you can change it to 5 -max-bytes-for-level-multiplier : 10 -# BlockBasedTable block_size, default 4k -# block-size: 4096 -# block LRU cache, default 8M, 0 to disable -# block-cache: 8388608 -# whether the block cache is shared among the RocksDB instances, default is per CF -# share-block-cache: no -# whether or not index and filter blocks is stored in block cache -# cache-index-and-filter-blocks: no -# when set to yes, bloomfilter of the last level will not be built -# optimize-filters-for-hits: no -# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size -# level-compaction-dynamic-level-bytes: no diff --git a/tools/pika_migrate/detect_environment b/tools/pika_migrate/detect_environment deleted file mode 100755 index a316ec02da..0000000000 --- a/tools/pika_migrate/detect_environment +++ /dev/null @@ -1,112 +0,0 @@ -#!/bin/sh - -OUTPUT=$1 -if test -z "$OUTPUT"; then - echo "usage: $0 " >&2 - exit 1 -fi - -# Delete existing output, if it exists -rm -f "$OUTPUT" -touch "$OUTPUT" - -if test -z "$CXX"; then - CXX=g++ -fi - -# Test whether tcmalloc is available -if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \ - -ltcmalloc 2>/dev/null; then - TCMALLOC_LDFLAGS=" -ltcmalloc" -fi - -# Test whether malloc_extension is available -$CXX $CFLAGS -x c++ - -o /dev/null -ltcmalloc 2>/dev/null < - int main() { - MallocExtension::instance()->Initialize();; - return 0; - } -EOF -if [ "$?" = 0 ]; then - TCMALLOC_EXTENSION_FLAGS=" -DTCMALLOC_EXTENSION" -fi - -# Test whether Snappy library is installed -# http://code.google.com/p/snappy/ -$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lsnappy" -fi - -# Test whether gflags library is installed -# http://gflags.github.io/gflags/ -# check if the namespace is gflags -$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF - #include - using namespace gflags; - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lgflags" -else - # check if namespace is google - $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF - #include - using namespace google; - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lgflags" -fi -fi - -# Test whether zlib library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lz" -fi - -# Test whether bzip library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lbz2" -fi - -# Test whether lz4 library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - #include - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -llz4" -fi - -# Test whether zstd library is installed -$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < - int main() {} -EOF -if [ "$?" = 0 ]; then - ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lzstd" -fi - - - -# Test processor nums -PROCESSOR_NUMS=$(cat /proc/cpuinfo | grep processor | wc -l) - -echo "ROCKSDB_LDFLAGS=$ROCKSDB_LDFLAGS" >> "$OUTPUT" -echo "TCMALLOC_EXTENSION_FLAGS=$TCMALLOC_EXTENSION_FLAGS" >> "$OUTPUT" -echo "TCMALLOC_LDFLAGS=$TCMALLOC_LDFLAGS" >> "$OUTPUT" -echo "PROCESSOR_NUMS=$PROCESSOR_NUMS" >> "$OUTPUT" diff --git a/tools/pika_migrate/img.png b/tools/pika_migrate/img.png deleted file mode 100644 index 756bfa294820ba7f5aacc323e87041a14ad91338..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54270 zcmbrlRa6{7*YBN#03oHC9SH6^xa$yPaCaTt-4D<6es|y1 zd(K6#RjaFdud41|yY}AyUxh0xN~3-z`uy(QJ5-tP5~}asz3+Ov=szL6-TCNn!T;{v z&v!BsqUxT8M{S{cxH4o7&o88F*AAH=!GkFNyj1MZSk$R5qY>Ye>3)BFj|*qfp9zQ1 z_me`@d6d6MT=-VY4-N2nq@1($G^Tah&%lqqsI0X#tF*Gne`V%5;Lmp|tnhsFi-4Bb zFL7^e8KZARei5Cv%`WF7yb0w4iUiLo^DN&#*zYV1F z!?OPQHjrXJ;X7R7e@tX=^Ai8nT@m%sBWA9ssH>}+T%1+3w7gXp$vZjvt01#Bnbc7q zeG_BEzs@OH;-M$U$!t$rT;KVLR9Bh1G3>!AU|&(%hY<>`sPk|q{aRyBH-qBU{w6iz%C?tEVf>mz|DQAYW?4kTActS~$41%R%quo>Y<| zp2*S2!yD;-V#DcHM3?X{uONCgL#0g4#f8{FXMEx7g);r_-W{Jce)}O+or7Od@4!>J znk6qNQP4-|x4Q}~IXQbc|HKO0tJKOTASQPIX#8OMMo~J)Ez;^0mV9%&dl$xHke(r8 zg&H=1Pv|+`Q*RgqfzQuF=D{)$UOdicvVO zw+khmjyH8app((A8`BN2E>kg-gh7(C0`+|4xCxQQntV4SMJd@n`sK_qj0C-^Cou(o zoc9@aNC13sAS;}*xC50x8Di5JTY|;8hn^bdT;NKzOOu4Vz3uIGT_UY>&*{6bDW)ZA z5HvTTF_t@E>(-|#4qIA1q#SZ()`Y3;Aaz~^y z4ftO3-`V5w>jPjhd9RX_ay|`S(sHP*RS2~M&j?B*fRZUSgy-*811lFeArioBqSp1cT#RZTxl>XJ#X3+uHhN+{>zfQc*I z?w0|u;KP?sHe5ZtSOJeXl>$#*1X`1M%yj_xjgC{E`)hkTz)o97_y7$rON93|T|A$- z%Ceh&rnqOkdWm&p%jCzH6Q{o3SEQL0P7K$EZRh^E=sZg!l^oZJ7#b`xlD^f6qyZW3 ztE1njxg$Au_gR!*m6nPQ!M#rI)f@kyW(PiT4LMX<*pIhPR7{u(v8pN9{{IlA2d1d- zxm}eMT@=vf#C*^A?=p3vtf+mm&}WTwe;O+X;5G(C+c1dJ^qjFHY&37;s*G*&33$MC{OHoVBJ#nR-Oy zvNxp?0nOb0OdBHHJd7Y}v#$QM@8aYFI{T1G^RignpwN;PYF=?Sd44nwi%DLq@~(lQRg%j3Doo8I8FG_ zBhQA$>r|m^UAMIyg>QUhd}5)!C77Qfg$fEaOO~m%a8Nb;x!)v6-hx$guTw*Xz<;lx zGI)EA6zk5iDTZ&_B$z1fWT&)nEyRB(GqT|FDOX{uxwRX`n-}UQ|Jwm0JUa_fL*8ooQs72B| z7J}zBEwNSGhvBpKLpIo>ID@en1`N-7;izmcIhepx?mm7N&}p<`Wd-AaRT3V}L>@R@ zodB5P_G1o;UPH`BntB^J7gE5Vq^RL{mG$4&Y>ejmPJsu=Mq91D5!WoY#hU(BtWfrJvAidZ#C2wcsOG>f>Bl2`+OGj?kj8rclDSZ!g0U=4CBLNPU+x zoi2o+y?d?^vW#@h^j}yG5DY6>_&h=D%>-TkF$cQ;Felcc%NjjFnbJ-=)CK1K`F$cZ zePaoro5!ZB+&_jN2FBteu88e8L7{5}DA2A6LlETa5`SuW^{$)A?eykp&Cyq3U#@70H@tc3i};ah$xhAVHUNstV^dEn+7~#8;%dou?cc&ncQMT!$H0Tm zX<@RfHk4%z>L0XN`+PfCozlB=i7Tk}k(Q%o(uMkxtpa2_c0=~f{BE*h=0?{=a+9Jg zzm})k_b>c=b7t54vDJF)X*=!%|4#TjCUmpARa%3~UD_m?A;7~hzQdrbRR%Hg85x`mCU$PbDxWm~sP2DA=ZKY`eJEl&9ISK;GpWF1pephngdI z5rubn&_`B9$G(yI;4qY9(%|2zF6HTF5!bDPtrMykqb=|ue4!n=iu=OM1(|N_mX&eL zN*q4>GGG|yNqEt9~|;+bB|B9JNd_M%p>`Ka4TGTSk4ep6hG35KlvSfVHIfx0q$VLRZ=`qufDQ)d-Lu-glG5lVi z#JnImf=X8RdzwWp?csgWv_IU>*XP^q(tQZ!g~)m|XW zgm(l!*OsKRoNt@>W~E}NPfSnBSlUwJ_UwTs@woLSe@8K8pkyjyCDtJxgUb31dm>Oi)Xh{({k!wBkiJ%0&Eg9Vl4*dyCey%erD^WV=a^M_ ztZSyc!j52f$)ZWqT23Quv-Up)1_0dc!XKB#s4Z&IXMC3-nSzqEGd`e;i_0cPR}OBS zu^d}=BG4udNMea;$uIeC+E+G6u_AY>zVv_|8(=Q6*5f^6ssf{wD8h~4$p0~7jWp#t z=lrCJ(kw|m&C_6$H8_)qAD2x!MwkuNPIcUtHH#^>51O5XQLWSV3YVCwmv#4}Is(g^ptmBEAY7M++m)T|6d152ZMB#TTZ0&qZ@W5D(V zt0mV7Ji;m@Cp{KdEbL$bdLocbup;Sk?(vZ9Kj`3g)UKF3KA+REWGSB!LTvAiDGkl% z*_REmB8nkKVk<3=+|sf_Dv|8IfgEijU1NOL*hKvak@)P5jzxlYe#PY&#k}hL%n*wS z5nTzru!doom@b#$51I~FFqt;1);7UPL?(*|Do$Tki8vi)F>?d3RU{A8O-1>ScwfPD zIfv2D+cbP4(4>(ehMy|>rG2Q^Jx_K|F3|+ylJ&_X{lf-Q!)SJ>LKmsCM8J^3rvd8( zLNFe`^JTNu>u*^ijbqn3?ywhic_~#K|JD(mYL-7mgUuANA6ek-q#Y-HZ$MtmFDyBw znlr=4`r+HzmofS0c1S4u@1qtDNE9W_DSnwE z2>v*;g1($Spys^g9Rp|VC`25;$^8lXJm5kiudvf>>$B&N^c_gnpl5Laif1;bm+mWK zKH2{rr^ic84iAtHLeiVp=H{2Uq z3ryezTGV>ad0k}KdXa3#@P^@rR8 zWN!gnP0^XcBLYEeE}k^ECzQ6?#XMMeqz%;!(tEGFP@_nwZz3|k)Z_1dNvanVjnJiKnK;7~u_KULJ4rpD+54b{1@;sb(bXairQ)<{IZ(`qz zrJO#XTHzto5C0vcTqt zC``Ehz4M_L$<(woPOnUYD}?Bo%-(rB;zu+o6@!eydklQ}T`2H^fPD`IG{V$-pPDgd zc@s65{$AX&lwFEuc;3z>V!Fqaq7vSie=TAko0h|@nr?~&c|?-RiqDcJu>YHomFZw-Y1vc~d;AWO7YjAsZ3i^56ea4vkpX1`q9_DW0qO(VO-0=B*U3>(QB z{KWp1fOqDj+NESFK?a*cB7t%!<8CVJ!G2c>v2~)k2xQ7#$b}maZB|#An-KOxewSTl zkIVQE+7wI|3DmHOTfUdrAB>^pq?j{h)!$a+n?I%0gW@VNZDm%Q$;?wUSnf!33uPu@ z!sqa~K?y-pBNQ0?q$XFu95q2$nA)R$1=Fpz+R+?Ea$#VUpIwgOBrrA>7OO z3}RxEsN>5be~dMsWa1owvG$&y0Y4Ahe)q@eNym*$rFJD`khj@nnh;^aGvV?;qYOng z-)bnHYIBe@hZ8;)r z!`_()2?qi@EDz_@?lZEL|C2;N$?n?LEpvo&7|Jp9E*u?EX|I6Bp{ zN-5=?O~&%KOKBqT_#sr7SXk(-ngudTBT#N^S}7>*KuW5OGMe69v4t+Is z{8wg`Tgeo-6(c1N0{?17(%|Ci@VWk*Fgt6JP6Xy_aDms1yt&|NmVGklkBl7M8%qnE z3-&BMsPl4lb+;Lso>uRxfO8+=t$`g}emM8Q-p35B;Yjb{=@fYL$#Y*53%1hdKBueW zP_6Vj)LN;!P&CA2=>uN5h^%VbcnN*>a8|~6M)fbwSPm>6TB(Tt*-)&V5$TjTIKL z44k-f%k)o_$+Oby_njMMH?8l*bsj#DcpdLR%dwB7`E1GpcH0mu_d8Mf@}LJaz&l@6!gBq^<&``YqgY`~-SyTQbyCVGioBVCxl!-pIOw%BmJqVIQOIL~MpvvFgf zzQ`5OgbefDEAN9DY7LEyd)rFNop7!hUUg`X5AmaM@|P__;B$yIfyB>YsE-t$1@ z8|Q}tB?^=n=Hh%w#V#K2B;bMyOHq2NvioNGqnQOr=j(dopaP(p1u(i&=R3P{gC8P4 z(ykoTYDORQ4Fx;8Yt&3K`TVlPL(w;njzTV95(Hxy|L$~d7<_Ug^*}4fnTSB1Fpzr% zXOWsrsGEGW^a;Q)N^y@)LPl}=>{no*pF6ZUp1d%^enV5goBoMi?H`r#l322xuzb43 zj-SBTm_x7fNcmXjBuoEms&nN>yIz}c#xZc+Z2$+k573;AI9o2foh739dwaj4iwnu` zT&^-&YuRqwNNOqD!0&syIG0}!AS5xzcoRiCCeGZ@#V&Ay4_iia92m2m$;={R`AsCeWn%Xn0Uk4qUj<7^WYhMs2D1=r5A$oyK$ud$Ev@#SL?^c$@4V^ z*G2bC6lhB(T2}S~O>PRje%NH?{E^~fudyaU#r7Fg9=ZlHS6XNo|FbiyMR(GC@*6fe z*Hp!346P(yOo~YS(d`8aV_6B4Keb;mp=9kjN;7XuBn~&I(&+0%}xSt7j`q%_pP{bT{ zjf^LU(l45q?VRuG7@I|`Ojzl9Hy_EGj#ZCNe;ymxKJNKDOLe15Xi@`694R#G59`XPyjD1$Ghj~>%%=1J3oDl8#Dq;X5NMb0D8{>I(uD~_OBWFMNIYi`8BurW zQ*?kf^p#)<=+Jyjsi!Z`Sf5=z?|yq7Na8~8&J0dnc?S2A=Yf8%a&5b3Cquu@J2Z4G zH|#BL&_Y$AaZ93Zx`8rL|hu+6m$q6>XzwEBf?sUYZd2+SW z7i`;xsaPuUfpB7u-{XX#ZNgM?zX0nB>PM5HaU`p&gCjAT`}B^@_@7i?wG7<@bBQ9OSR3&}E{1*svcBCL37s z4xblEbBhV(^MVO>KOVCa6)}!j{1kAWT>3y1Z}bVI2%_$C!&ak>w){1|AC{|D zO^a+PZ}zl}dPgSGAB`B7$Wy1fSfIA|6`kq2h;7P_@ew@>-yBdmVcjNGB(4kkEFm$c zt@tz*DVCzF-@t6S=n02;kMQH-h(G3To((wk@d|$l%kv8BHmg8wN?FO@nQC{8(cHby z_2MHeb-_h>+v_5gwQR&9PXGwd@+i(vexoX(j7srgh;kFgtw=5q1SJAWDP=}S-x5xU zjIJM7|JtW4G;QtyQRdnk_U$UKeR2J)<@$MMFJHjvGjS#kXcTj=KC&4I$+5HYCC#%Ax{nFP$HDu{U;VCjEQbiQ`~=#@murD=xY zWwht~z$PG3C*m~ufuudwYH0hgDdxN-A$azY&l`pi=M>Dvs~LCk;IEZ7$L@E8g6DX) znL57jSs%Dy_R2~L z_u^{c`E$39hOd}q);Ww&sF(_wwC^AhaTDFEMvNo`rj>WbJxh-|@5(S0q^B5D$FGe# za&)a)7koKqs8n1f>A+EVZe;;XtwyTLDJ*z$U&6MmKuuE*|vr{WacsrPbmdwTb%^?a}TbkoS%vGMxi#WQQDRN|;W>6wB1{BkOCH=Oc( zH^BRR|0vRdz2CW0^?E1zB=nQD zp){VPesE}LVkw;9_;Q6CaLmxV#^vQZmsAu#aQn2+YGm565ZcIJ^3fg>@O(cU>#BO; zsYN4+C|M)}DGMK|qP~zwQ8co0Q+4&#Dr9XLx=QKv@BgXd{c(-40PxD4Rpvi!b=k;T z53m`|-=5GY37$+`mQ))&*eJP z54CmIh6C?5tBfvr3z#LyP8T;FPYaIQS6-jaV)67~=k}T6%NdrEA@1a&isbSZ zanQ$)!1me1^E}HM=TQ9j-H1+&dIQ^WEffdUy;?^i9@p>T-Jm!$Qchp**bHMbQY!_r z_>tYKZ@+zd>F}w}LmBO^J%7c8LcJ%zlLebR~VH z4ACj)Jwy)iN6e1+)EZhpVi5Sgdt{CzWTnWtKQ!RS@ceU#o=0C*L2)yI8biVtRv(rL zK>}&I?QdJOCe!E9a(IN3`oBk%l<>&zX~FxPPzUe=*qJSUG(f>S?J1L^MaGm-P!V0<^O_C?&fRCK<;rlY_Bj569yy!VZ^a($|W`w*MkO%9}%|N{4fDPT8@A`Vt?5Hr-km!xP(;e+UWKd(aaXl zl#xmI1G$8z0Li^gFd}1nNp6MaW}Vjk=h62(%;}@4H535w^;foSl3OEK#7mCEkd`E& z??Q(w8MgG8d31fs-D7o_Y6>za1lTYc=l3$e?{$J}ejTm8-S4zN?{w3A)aN;5RR~=P zDzb|FCZd(oivm$F_~(EhfJP=fR5qg!R0!!QzVPWD*rF*#I$h=_$c!5PFN&tY6 za_~8xJ$6qXuMQX2+&OGf67v|o8dy&cgdEmDASqpeh)%0D>4SkZCUt$HSA*g~EB=Zq z*OqOG`WHHvk0{k(R1H5uYe!1{00s8Q`@fu^!!R(OKm7kj8} zKV+UH@-SIb&P$(e80FgOlUIos-sA#CXlN2OvS#*|qK;jZoUga+Xv4d;t2+z@>8+!| zBx?;Ml$7X(X16z71zGjDES(BEL=#+)U zrOO)pMj)R#ne?f3r^6+?eZJ6VvzP7QkE_$k%{13U8BA^qn|^^2hApjq{i zFcbfca%(5U77uYzM$N*r3F{aEJh$TyLxEW8)-knWFW7Ql#WONnDmd+Nuw5vqa#OS# z6;FyMea;K#BJ0kW&p=wjNv2&`t#Y(&04+hMys zw4R1Z+by0V`m8`7b-vFo0kj-G2fJsl5CqhsiElxdWmIiSXr5I*X#$z&vD3$eY9uZ) z?#f0ACx>(m}% z!E>)%DktFkg>buEEu0W4nirXVJU`L{{u+#bXX@-tWNaz^)kIh>)ucl-QD!hFdcN$0 zAey~*S6*KU?#Pbn{1-X6ka!EP@R>_XauLoBPtl?oDpTuyPm)?n^Xqw7l91_xOu5wr z%2jN?gz@iHQ$<1g;~gDGq+Pod{4>IB1 z`n9a`VPwnJG$c8DmN=u8DUhzL>3n1Frv$ghFsPQzMAtK%ZE6WP6RhOy;}0*DdY3TO zng*~fp#!_;DBWCQB=0GidyG>u_(UCo)XhZtykp)Y^XTWw)4TZ546LMZt_&q3h6`!g z6x(12E5cHB;p6f%t1E(*LL1o(9qBEzZyP>JNqVkhSsYZtdLm6H^cDwW0NtBu7Du0P zunWnOFkVueLEM^C+FSn7rW+myvmavg#(+0Wh*=i1H&)1Q;s9xV&A*M~X#)lbgnmw; z|6CeUKjlwMZvpISDLE4R0SHG|UX|<{$+^4pl{^r;L(KTdR^%BO9f+U%2)pUMJG=Fi zFEuOZk5FV#$*)NtJ3aCFY!6D%qjyue`U~lt5HWd>^0;pbJv!#a6FqLaW$2O#Gu5iaedZo?}A1uN+$VeLTu!Ey1yw!5!(bzHh zx^fpH&s+bD56kwu4^27%D=fS5ViD-P0?}er4)n^2KBbroxW&FLRSVej$brH&}=Gqh=mI_=fcJ z4U! zn{>c9y(3m;%mur=lQ1*dD^#7K$v-{4i8{ zaM#bx3WBPq;K$QU7UhQn`;wE>6bw!1-Ac1krgkIKVd-LBl26mOp$|E!y)eI7cGE(_uZGC(!O9B@s(izf%oOKtf@xmd>5d8Rr3^AMVPDzHa5VJBND^aR@GbkXGg76ZVXe9L>aarW zM?iGEp>SPAVsj-^#vJ=Oj=Ifl*@5hLZ)_!RMYpkkGUF4v;xTkCrZ$)iJHRzkXd9=oQm_-*L0!*l`;|nh^^n*)$AqptnEjPn@aa zNldr_#7rN_D(Jb^O(&$_iHpgip|*ctn$Rqpp)UxtkSwZf2oG1nwV!&67KqIkklMVG z6wL!wVQ2?K;BY(uuKE?nJx<+pN<7YMdfk(3qT_U#0MWmgNPNGB?|b85yj-kp3o49^ zOv*~7ov7o)UFu5!1E{uT*6>1EGZU78fvXhriTzFJL!(EU$lpo6y@)odr&A*=nK;ki zv2r`GE1{&-*h?%ee>%!C^oj+C|5l%{1iF+ssxVB*Hc#?6(dWvy|A*~sJRkfoWQjHS ziuHewT}u4Flbip06Y*e---xLoRdlX4~CMLq&GCaT*U}nAJHohMhJXLDLR;N$q z#7A~zUk*;_kKC7P+{>^>UwG+?ZBMe~|3jU|zkbTL`f0=9Z0Pj#6LB`Cg9Noz&a+RB zOvG!X-zBuBW+Cvg`!GM5`&Eo;UP{!N@0X#&yg)M;3Q-_fSNI=d^}hFx3(k&}_QHa$ zp|zdQjW)eDWVVvjMxA>h$Oxrpb|IxY%uZLpTla4%McQzVKi@Ix#z7Kxlh;xIppw*o zq1XGZJ1V%Na+z(_VswF(QLGG&8GA>8Bro(c=jF(NJs^Xgfbi;yj->pZjy~wt8Ba=AuvKe?HdBw>PatpcK2=b2;O9^o?C%*#Jh`78NqZV0fSlqU+OuZ+e57NpT8< zHvYy6bPV(kGT8nFTnazlc>fP=J4il6OZbJfUqW9b28yQjq_{%MUiJF{LUV9fHaHfu z1uNK^KXr#dMWPZT8g9ql>oH6~(mumz&N0=O42o_M0Epk2aK)tq2;)41w8!h>yP=|18b3U^S{G z;tl3JXWeCcOE_G&W)y;^+gsre0upi|9+B4n`Bf)d)>e~7)~KeD z4?_&4)^__(!E?0#V>qDKk+aKi&yId~C)(KV_73xgE5m)mEQ#NVWXW-Ck@?21X0`Wb zh2^x-zBThr1rW7Uw5xZAj9z|GQq{P+xC#0XCr$M_cDI61?BG~f$dS!E#^_gngm zTXm?ek;_)Dw#jGz<9ozdOblO?*S{W1D!#tH^Cnw;JWGB@*8(Fs30h%;_8VHl6qIk4 zum(FpN<3Yo$wU_}9AQe|H|%`{$Ff6Q4z%C3+TE@n`9y>TsV=sVU6;&&T%vvjmM<0LlJHvQX z2K(#Y*lLk{d_b{GXJk<*qx)l(n)2GD(69el6Q=OOyQRZRuV=wbk=_JEx-O;RM+viDw3RhpnsC(4w*USn^<68@p#40!NYE6 z_YN8NDFtsg0hB~NxvQqDqU3W6m)MF@zhh=@o)DKNX~CLt;iKu! zUaxTcbSQ2@d_7~ny_VEvV-!P@DCx3LA@e%RP7*UQwVVt>Cv{OJV`F7|JIV994PN6PVeR6QHJV z)Zx~~r!0GJt2puxe!ZPs&4j$%TgVeM{H{)EC;Xc}lu3s+Uj=b4Va6vb%6kYbB@b{D z@NR*dUw=LWb9N0yA@VVihP~7Z3Cca5a(;7@W~@&N&TsRQ2)kv0jiip;(LZE*W`>qF z=E$r_xv5~yDLqt=`0`LnMVY0>kje3$?n;>*KQc`mW=Y?Y83GINvxV>_v%{6&zh$Pb9$^Lx+*<5 zZoL8F%`6E^yXdC0QDfGp?{DtOAxo^Ec+t^K>LMiJ0++5(D@VFnjF2)wE znfN}TZ&cX%Bc7uPjxHb$c47$m`xi49fqgDeXcVhqI^CVC^RLW_MJpBy=s@=LQzco0 z+xFM~I?K&Q{urK+n_UZz`d@VCq6rBd?UPTXX2MBWe=XXr@b%8 zg|U{~srw@wY;)VZJ}~(Z^qGFwmG=`o3a_f<4&~q1t9!8^u>^r9T$HegOJX7-P93e3 zxEADv0G{+k*jl!@9uVFMU2LJ=hAhFgh}~-lB?N`+-*}op6AcaGYt>t*q0W28vx!xS z#^OodI=Hm#oUXTIZEO9P?UH}s!;R^o`yU@CbMj>b3o7b7lGqra>t>r z3;wBDZOBrSRhD4%`e;2yaun!`g^S#$CXCU9FiQyuiHq$)waxh2;zOQ$4UYy@jb!P` zB`XR_%5cwzsJWUFoW$EnOi4r*d8IEm_B`AQrd2Iz@yj^5St67dl3dpOJ6AkP%8g}o zgK}GL(=IAm-xa*-mek_bkT+F7TOaUT+uD{=2wXF>_BB&d7-1#+ku-k}ZIl71%;1E% zbZlF1V^?M_4=nprrsXz9Z}DbzjER?cIBTAd;Xj`K71vPQTq1}E4|9*E^!064a9q#4t+zh zDAjodF>Q(Xr-%3IbxTwKJ~~Gp23#kn=IZ&G3p)ahF4ZSqb#R;pn~CfLo|^(j1>lz5 z*Xeh1^OAOo&JKcAE9tN%BdzYJZ#5fLbrKbpZB@u~)V7gx+@#o1nLG7!lOeLTYNlQ< zM`?wzoe?sH)omMrpTwAHdd;bXwCV!7uQ@xICQipb_Hus&dF5N9E4*S4K=PBurym%j zF9uyF^^IhmeM#uk%-ao-J40_mP-0r%D#|F>vx{Wgirb_&BWc}xzko;`YZ>o(;(o9O z)kBi)ZrkKd4N;-Y6u+5dwh|AWrBDcCx?4ESG~0+#rd{3ZrYI(@8k2AJY;8HcCM>nB zxsbQ&rN7gw_$5viP0855V7u$c1CRQ=JV^rN9hzmgC*-Xm$RFHUO-J^0=I_>uo^BEL zj=6(E0lB=2hK^09~HH#605a=q~ z$%BhMV|Kw#I|CraL=T{du~^ zTb-t__T;lgxUJ`O>;Tc7ehaZx9nUp$$C0d^1FjXw?BsPSmU`Sy=&V~UKyGl+{$%G` ztnc%)v6N!^0K&kQL|@YQg^9HFAl0-9bqJorvQLuKQ7`yN-=e4bSKyt=AC^l^pC62+ zVdw+#niKbiD?;5d+;~wWJlcuo?~DR&vDaSM!cHGix0l}Mv9Dstx|_SqVQ#+Z8yg2I z=*j7$X5&gC=L)ZwQFD3`d8fEe>~pqD_29Il-LLW>z7W=qgSEVl(}n=wnb>*<%YV=K z6j&Og_OnbpX3fLYSXgV$J52(~VA@HgX_GT;@dmF<%AB(;L`Qqng0{8*+3BVD8p@C} z=&cBcge6*;JG}F4lJipO5-y$mUoP5Ymu?m&OnZLi>BllhL=LNbwC6{3%lSQS{fDl% zBO#_p^BhrC!C6Lrj9d?fR*Om_Ka~2;6$ek~8XcTXPq$hvDW_-;&Q=|+usw4h9er9XvCwuMz_1eh7S;}wNJ+##6jIRmu5-N@d5#Wx4Opa@=dqABLH^z z>_2NEAMTYD1kvmX?(cN+hcRZbDNnbdPG6?WZF9K!BN<#mhS zQ}XdlQ+w?0SXF6EbY5`e4`d^vr#AJ0ZT>s#@( z3CLy%pevnhBoWz;yms(ua5NaMd)uV>Jn`gs7EOS=eGYok?hEryXCurPB72t=Gc|Wh z4kjHtx;&>pF!m*MiHS8?cz{h!P4Zgzm|9QWuI1E$K=N~HRRB=jpI*ZM5Rh4IPUIIx+a|+X@&4or*lPdd!uVMnlX*T0!PGGB%(Re1M>$@ zM1<5aJ9E8GkXMEIL6Zyt^YTc{O3cx!{qSOcVH6oRXeLJMJFC`YgIXNnpuZ!!o!#+# zn>*502l>!h4Ndtf&4KCeyoRIC=(2ymPrx&2ajj#`vNIkqdbZa78Ovg<#&}Sd*!_61 z9}Ap@LRdQB@M<~TcBp#n*4E8UI?&ToIb|l+WVfCC2xD@l60F$_)Ul$S z@lv^Ss5kvR$^~Oy=uspt5DL=F(Bd=?LZg(bc=`8E&j#7kw;NZlAd6?IhAUOEvx@l) zfcJ=uFfSviNu@Zv%i$2gZ`&|$%e`n%J5(phj30&{JfIlR}keRV%-d`vwsyooc~=+HqGbWri3M-!Ombb zlb??-;qPx^(!JT!^mtr|@*u5PNw;HJxW)8Q?OnQ4C8^~?5@Moiw;!|ReO!bGc!}9m zEXgdha67Z97$K>Ie#ni{$x$R{%;FXfoB9&n1Bd}wHXPYx7W+qWObsG(FvP=m>NAAP zd?(@e(;KG0MZ)2iDpltX1E%To~xpTpXKbaR600DI>X_9W&kxwv|voASfdd2BS zKS;DjOk27wMuUn}*4!AYN*Gcyr0v+JLH#thi0baol@2UBC+vEK(HY&k1K54$;oi3C z;tJyCC@hyzguMD*FBiyT@AQzHHXD;vBHok57R=qdSi*&)Kc#Xw;0+&DsmzWS#MOSN z=E!ae;<_02e|=ZrmRwW20%u>&jWU#g zqDoOkYLV28Ic1S*QeHI>)*Xq|^yf(+i?adK2a3&!X|niHSV{oVF^`Hl{!1gO{1hlA zZpW7f6%YFm9#N+9;h6slVrl-hO_!m`#h9LwCZp!oba9N8+ZN>qym-gBdD=A+Rs_*1v0Z3syuf>80$!cipL+1= zqLnPS{FP+`?7$q-STzwc{iz;5wlvOg?$ukmvN?-!L!+*KKbpN!-M)o?L$Q;L+rsAq zHaDIP4$$jg*~^xo`=|_ay5-n(0cuzd|D*X>gcg{)=aq1|;i?UNx~`KaHoLr8N}ma$A& zSYp6)vnOqH)o3-uN)S@^A$;&zi$>aJ;*@fM-xhCcpyj-t>y2o|)eWUA`7O|8KkO(mUWeoF*of6oit?O-F5T(&+a2&2e z-vNs0hbeDn7_)=}Dpn}6ogQKEzJdnxZ9sJ49Q1!xewL}(oSn=c)c5QvY3^tIVn}cL z2N&Rd5b@uN&ei{{C<6PV{2a9(5D`+pqWs_w>`Z+Fucl*9YRUw3=@flN>xn(SkJwIc zFR9)n!?h|909HR|t5p@BTx>yx)WvR@EH~#%`1~;5^CUvR0ni^Iv1vV%|A)78{H~-4 zw|MM{ZDV3PnRsH`p4hg1GO=yjoY=N)+qvhx_kXz0m;O>`b+6U6`cyspw|B*Ov6-8%QuMs}EPWioC?v8AUSSI8U9GI~_DuIrZJ})U8dY zRG)vpy9cP{&D%U#IiHn_q{#V7FvRIRI6&ml1*R2Mf<3}wp&W&Iokeu)4{y|o8@X+WeBHJX)r6d_UOh? zhZf1Hb*Aruu2iO)i)HM!UAT&Fi_%|7NmM z8}#B2Wj5sJm%_l6raB=~lfwL@l=06EBo+FbnKzntb-ZNzw=(c;vtZwMsr(Q@x5I5w zdsp4Ah++SBlpfos4kQ`HQwjAK{L)qX$ZLY$MD60>h%KlsfrC*Y+5DaMFyI|C_ z3fgjNTBJ34V)uMM(??QjBIMR6HEv$@_lS9ob^21o z%um$U<99iD%@0*)a2!*E*4qw8T4OD0{KxNWy2p#n@B906ndr~z6xEK{dK0iY>94iF z;jtwL!mE%mMDbh{{Zf!pYtGn2uYD}?_U-q{1ECuw%O`1d5k9Y{Yh2wX;8)Wos?{NK z@u+(X(n0Qz`Xk?Wz~O!lMQu7K95D1d)Kbe((1W=H0FjWnEQfveGbD>6kK6heFYhN{ zYGn{YqGj{o_SVT?7$m539nL_P2Wt~EicS7-r)jFVvs@C9chE$H#D6`A#)Mkg=?x$hsDGcz7*!eIvBo~_zqt&>2o0VMgjXE*U36@kr0qwy6rB0rOtS&UbMT^=CloV6Fql(&(xa4>yw8fscXK=QQ zKwRb6C3lZQQuas7dYAh?zh$T*8aMgVXy4r$Z0BtQjkN6QzVs|;{qtxpkv?0B)5B@k zYZtMD0gv8%u{wulHw3nu-O@Kd^j+!dOvA5+`Fg8i4MfB+IRVxN8ydX$R9yMY)=WL$ zvpN3?Y&2R=nSk_?;-=+z=|SFwE+0PqK67gh45kGv$;(of8cFcF+Z9UIUs=9`}$CFRPDdA&dh|rBgwG6)B zyM(dX-9@#hQ}xk*1TnP`jqg~9_SQW77b$A&@dPn1^K4>;8uP}%UO-R7h52^nuk3GM z7|a07rZ4uqu30|fidHrX>};#2deFT5aIqvUlXBg*aJjY0{>!+#plc29ebPKKwUT(H zfv(0zY@tc-go?9<4`qdw)Iq5uB@l1fhW*0Op@_@|V~(@`j@@pkv?1(6k$c^~y}+n- z#WeKQC48{nQW%SP!89^4CT}mJm~d^z-m3}N@lLzau%>%+;-!ORq+2N z5-YRLZEj9}i^a!LwhPoWn%vp)nXT0rtHWa)v0|6d(Z`ON){~5}1{jbCOSPQM*nUf8nSNzMffcKIE5$_>K{76}n9TeEdf>wHU87Vc+O`gR+ucoXKRMGuAH9 z0@-!FLl|a9)HN&eiVyH-Qc|$ARdV}hTH_PM?JH3gb54;q7`4*toF6Bk zK!U0N6+fcQMp;)gb!2pMUrZN!`rl6CkyD2fm4^EI7Y_r)>vfQZne7BnHVGcUY~%-{ zpq6S6x^D!<=BlDPcszC$^d-rk9`W{qW?Y_h&+3$LRZrZ9S6J*ZeqIp=<#Xn28qK~$ z7S>6;!zW7N#twJwT$_w_gUnirc3p^jcwu9ekuc(2&KP01AX8AoLPc?o#(+=Rb$!94 z2_d+zDrcm+*g$n=mteICBC;5)I@hF+K$2&gcZYF>?{uFXg(P&rNZSzT$}o*ub~sVu zdd({JwUB~2H_Kqr@B>XDEN(0VU!*9@+-3&VC4=Vk*;d*#R1R}4xnc#h))>EP$=gbp zxmv_@vd{!Ku~LBQ$Qlf|*`pzh@s$4rW@6BmG#387XoMgP(J_rp;XQA0cRIb&mRiTR zMI&oo`#C1|QD3Z}lPF>|0X{{z+OK8q_LSu(bG0lB{HH5uCr?AGu*Mcx@DBmz(j+9x z`U-)*zCxm>B)8~*2MJiIGCnl9HO%b)psd&qUxuuwh?{H|gKaFJ5OA!7g&wl@VJr`MCvo^564~qWd_EUso zy2UtnN>m345-lN7M9(+xw=^NmZB47o{m<^_EyZ~@^<{s zQX2_YIyYE^&fZXMeGHI$ubq`DV6nD+QeNm$n>7iy7;7`CpdT|u$>iJp9@!M&5-W9E z_fWIfjtj4&GLc7nUrqUlxGaWwNVBU%-fK`Sdws3GS1hsFRB>@xHB;ukz6^EKa;%#p zFlgsl!9|I#H;(1^dUbA91fQWA4X)2FM#_$OHdMk6nva~XkCAVYI$%gshA+Lo(T$nY zgC(0d13qvR>=>r#hn&TnRIHiwOMOb{NaJmiJEp#p*b_~MYPL)t@CZBFL0*Z@wgSE$_P6fiE0Zk zNW<*;n8OQVj)DQuT-_k5+7SMeSTZa#Ccg9j8^$;%%Fjqn^-2FF!4dve81|XjC0fjR zrT8Z*kT;COXrl(J=XuFs?Wm&AnZrO~Nm*4fzu8zw2!#fvn=Y+znz(;3v*WRNc4)4q z-g_VKwS&&TDWqVK*6m#;2^kM%LeCAwZQ0kg zv|mqWb7S@3Z1Bs;%_HR=R{o4v1lOXgts~Zf5()vwg8F9$yBTIPc7{g%#v`)BWCMIt zXZLBp3N1Z~JdqC?-UQvSkp(*xtrhRZkVbBq>Gafim0JL%qj$ga+h;5qboHPMa^4eq ziJ?Fq3hdU~5WLe8j5bN!|_RWRl=#Gbx* zhtT|e=H6e4A#EcK5fN-QF6hsXa;X^;M|1N?&rDLv-6Ynvn9bl?zYcyO-Hh@T(_v%Tx;#v)r&JK}oH=li^rfKKZ*oAH;RvD>e$&QCGNyX>r z8(%tG$%qls6?#j#CY^;ke^;G4&ssnS=O!%%&Kw9brfD z*zPxmgM-4xjI%A`=5z|w8w{8l@!$V*)8>~aep zeG{q6YS4D~yL=y;8U_NjE1^KCW+}8 z85J%vOxrt>=EaBhMq`ePCkM4DC)BLmKfjaFQhSCndczn8wqxs@kKh2?QWhw^Yiu7+ zJjUj=$*yIV%M>V{Uj;0}}1L(SU zH7ZWd`=THk&>0lOols@Zk0UOG=cSHJXZ_ z=a?CL?(3J2ddDjxvY2Rw=6lYTA-Y-H(4=;}xM0R2%}Gf9a@$edP@JW#ojrNKZf>u~ zf(8sDbbh2**JK&KJW}5dEGW*4EIdxpXfQyu+N_DMd{NUiKzT-NV^(Z5+$^qVXR1?5 zX$go+L^77SIIdl0?7sc@WBOA)mMh8~6FZG<3D&27=!y6dQYmYeldxY{-H$~|OG-pD zTdr4ewtJYtq{og8L$?y~(KKy?yCDnKbzigok<^Yw@M|3L2m9cF1jKHgAt(o?l7Mw8 zmpR(-C+Q&Zfw52330fo?SC19O{zXaMB9%rLZI9C#^%Q{YCYsZ!gv-E${4DI!SP9tx3W^^0&joqsu!a&FP#nZ?Xqq*S?;pi8ijn63@GX_0BnbRvgbkeb@(ir^W| zCZWEb)&`qcVhv(s{9Q5l5^-AT`j#&Rps0{lvvbL35zJ&g|M%VXemiqQ=G{B zA--cQ^sW7Tj%GVEvRQahaZKVr2WQhFOSMj$A^0 zcXORB+aM!#;+gpJn1*L_C-=APW&XpuOn?;ixz)lGJzDd(xPF8uT%%4p*rmqMq|nN* zH*Ufv*o7y$Q|uDizsaI&vH|7mnSq^PqhDFevr~$S+PXqu5ZW$s}k=7+=QL(qk$Rj6WdJUL3A)D@Ml*6oS1f^_us^WJRybLf9w zOKh|iZuB$3l3wLX#|Ex-yFz~Up%^kl(Vu(|l_P_D3$0YDOND=X4 zdoq1jy2t*@ava=t29N1Kxa?LZ|0#3AcqND(w!F=<;o#t;`BB${jEw;6BXI~%pT+I^E8vH6O-Q);bi+K=V+BBkwkHKdFl$2qF~_FJwr?N=P(P) zHv1;(ucZ3WjfX$Nfz?iD6Jm0iZ4-z7UIU$sg>tX3k_wCYBa#QFK?V%E?GvN(d#erV zulu^-B}DB^Fx)=*~w}pQ#J#R6FdW zD4Tf=ZK%nc5g7>kS{Jjum8wm?) zUDiY@^FNOT8OGM!-2Ie5LOzxgZv+@3&8Lk=e|~5}@oyGT-(p&2c^WX=&`!tuPE57w zJt*dNziV(AgNZ}EArSCH6L!7LI^~i$HADPWpJjQ~&3p?6&lobyt&sip4SKCO2Ju9~ z4hhXBP{#7S=hB;HQPs;DzhuQb+4ciYgp(S;Ix1A;Ozii^T$G>}uLB2L){veU0*^A; z057J2mF>u1A(0c3y{A*b_}S2Ax-IYYlu^^k)3{fjc4~i@!mhTc*bZ8}O?hPGxA>Pr zbX2r`5AW%eONQ!J_be)wqUa?+Zarn#O$XM>S$#~5ZCD%rN4R>OWGC8mx3nA?Sx4K@hj3mOsa7XlwHIKIHRQVV8bkDAn_t<(GLPFmxrKF_?I^e_MI|dH6a5Z9__AMM0E3VZQ6jj%YJ3FGJ zG$q<<<;7H$zeLx{f8t)MDyhx3RW1B}tQuNBHrg`X=!QkQ-OtrkfuJ_9ob@v_B`9Kge zRrWTekKA|`73HIn8L%SgBO`6~vLQ;KiA$f_+wb?LSon`>*dq8#$1oY3|A+uI>O z$eWGkIh}JkPL;<$7ySeJDqi`EzUtUy)x zw=;`Psw#j%)X-R~Tf4-*eWjl+6CGsy9Yb-`2fDP#%nD^`O$iA9##-yG>SfsVA_4JixyhV|>wAFUl^deb9JSzCxdPJn+_KrUU0pJUwA{p z#&NE+*@7Am1B~ak@wiUEPJS#Ky^OTIG^6&G2SPAhxuP~Fj;#o-%%iN=O=bR& z`GcG17APetW{8Vqhi^QXzq+y-J-B3u8MsOA4=f#Xk0cgL;T5Nd@I-%Ce}b{Xk{MzSqNza;*~8io0hoT=#$HI z3c7`dx#w}lyeh0=oTJz8GRZd-?O<@132d(VZmLM&j-Zg%PFkRHa3JRUrm{FI2)-dK zskOjcmHCFZ|1sqRnCiKS>V8A%<<+MrrhK(lY7Hg(1QaHD2VJWaknbO=V#>4i+YTWH z_HQfeyq;Fcxhx){V__<+Qo_1)`q_i9iV#q?Z;-bRg z0Cy5!6D*v=4o{$ZMJdgZHHE~th*J$0wf1X5e{>uwrcJsN8GvniMFe%ZD^!9{v3%vV z=^%E6%==V+hAVf1E)lu;#Nu*}sF_}4>lM#nUPYWNXPIrmqH7TiZa-Aqwb1CLr6RxU zfQJ+I%-w>RSxhoEn_U#Z(wuoWHg@NzjYoAgTe7jR$%RZ66Gxq>y8gf-JF3GYc9H3{ zR0{~pm^OQSx+tJ3BQ*P;cPAo|*?xRlDsE~X)mB~^4UdH^Uh)+TY0F&@2HuD#L0>ZD z{_GO7rCbzxi`#Vf$DOu5l%>YabFoz+7N_g_7!Tw$xOb|VacFgR8jmcG(hR%kvOA?7 zRz7v5nq&dNTmb`%#fJu)3&JdZm23_n_{pLjdO>56HzyRmA=>fhmq~JqNkNAS1vHa! zCykkttK(iTVzGSVaPN#vRxt`0+YCp>V4S~b`n%=sps6Tyo$Zz=$}qMrI9eRmZcm;|VXpkQfCvtcrGUCNtbwU7T}iW6S&y=` zdJaht)a^MVpq0o^=$KaL=TA6P?(1e`Ry3!F9~AGAY^NDO1bDD83ECZBeeNsI_V#Yg zaY7u);A+i=E}i3tlQHWpA(|poI|4y**r&A{A%#jKF7aXlp;HF^eThNgJIQp6>)aem z--a(_N#LF+)z+P;+=OZ}4}w8opAG)T`M=6rwG+IE`WZA+W^cB3%zG{`#t>Xp@YOmu z4Rh0&YHIK@--qkv*z8XU79;0<-op0kD!ZYJOW{hm9=*!5@rK)N&LEP{ccuwv4s&7F zb%|G-$b&7h=z_yA}spH~KbQ;Z0=r$|R;^V{w)bXD)}7Ft=qi6ajqf9|P}Fgtj> zcL`MtyK4_)>a#ngX@xOfa_ZP$(-^6U8Iut^Zdr2cGGPBvKNyncigV<&8zu{ zc^G5|MoG!oBWCcz(n>kOY{pl0V(2VK{bW(8IH%0quSpPaOv5J!7V&R?kkL@Yq|V$M ziI&DRpXvk}8QOBA|?B__8hx~=fI;gf}>)t{b#Zyl>~$TCO8 znO?{y^PU*9n2|`-NW>>n&gEpwtW1(kG;<11n-K)WwVlc3b{9u)Wa7tNv*lmqm>Jb5 z=FdAHUY#S{W)FEw=am-b_di&;m3k5lTtpOq03&4!9xGG3jfh&ZDdMwnrze&1W(jgf z2VRGNGgvjM$nIJp+GPTokFb}n+omdlABMaoLTjB~ArKxz)IFalCWG8fxfuhI_Do7+ z77Lp)ks`^tRi(Um?DRKG(wZ>R`;QxMK`4+K8mYCjrlX(Z@ZGY8Zv-iK9PTKxY{4+H z)bFP<>y*;T*Ng9tUUeDXHV;kbmSdI*YHIp)I!cgnY!rQ^xene_MdM-% zm-F1KvU_U{IV$fwMJ<#yRj^=rxu<`5rDBXX>ym%Smh;Hcz$)sjb)Xi^@~p_XP?1&* z`b>LV4v$6jCE#(v$?{trRr3EDxpylpqT2Pd$9YTll+hylQEDmHQJT-hn^9SbZ$zkT zP-jdeLs`B1wvS$)elCIO5jkeZ3aOJF)H5y91cJ!Zmr1|Rd(-+f~)_lVs%eW zkv$Q`^B*K0IRDby21QN&DJb)cjHf3At&MVKau!8i?q#NkjVdB2^s)g*A3*mw|Bodm zi@s0_1Tvflp<{ro703RWQyR1)Y+<2!eEeN@=}iYP`D$`ZHDk4TIi)pF9F*?2@06mv zIm$PfdNs7Ydkz&I$E88jy(@?9_PE2g!X66-Td8lfQYCe`LDvoOgY)tY_2;}XqXj`( zadC42m&&oD&9X@-9#`%ATSszJ1|=x|$M`RV3(lSA2YwU=WW7NlI_Q$*Sj^*ydn>g)945lc8D?h`sb%?9qw z_9Pjbin~9^dMzpQC7l0eh&>2tv0rIG(X%usE;mmA9Hv9!`YdY~bIMBU+Ek*sK4K?? zmY@q~gG>b!I=+8pJnm`FBx4>9?LKE$rZ+_PD>cAx-H@0TO~}#I2`MNGswC(~qd7#{LbHL|neaJeU9+w;_U zmSFRBPFgA3Ei4o|dMrCWKp_--=b}6iamlzl(sDc9vk$W8Tu-B4U)MV{V}!a+ZFnRt zTe`}Gkdj5q0?eZ4Y7MKF*$Me$Nr2fWHzi2iP6o7yN9_-1ru|d%D2dLd#i~aM&Wa#n z{i9yvG16fU$*%rzBs%SDA*NO>wPyEnIvcJHmXDaOFFF);7E^+p%z^zK9QA*S;Ln@g z>*wELw%%jwhL2%vudXV!T4FAttM3_lF4vZxBO>QEn3l>Q?b(r(%iI4RK}o?kgvUnh zzTW>P6@~G04(qZaz|KD&nWBxL$1}Iwf(6Z_fS5t4xvyDsZY*DO%(uHV9vypp2|295 zllBmYxQdFIY1WOt-WOL?QP!6_S$)*NKCAcs28T-*PgaF=z}Lwm!EGWFNa+{#%nuI_ zw?^3OvCDEWjXwQzWc^H45HqrvK!DeaK;rKzx7EpDy^Yo=viQqG2zQoIADQsB^OazZ z)YsV>RjjJSCyuW-Ll|Td$MCT4lM~X$TC3{!Kf7%iHM?6%O6@mMp%FuK1TUBxvv&$Y ziRMX>6J33^XwLotPcr#ATq@doG+~W%Hi+R@!p<>EZKE7Pr~pr!CA;Z6d3a)%gUIJf z%xgk{zQjRoIeXp*$lFow#AfXtYw@Cw(k!Db%7W5A?>Y~UNgBX3r|x%xdTmB{SgE=G zRRJsH;(e_9lP(njvEU2+SVehl*X)>!6AHG(`=2x5BH-G|o&trHqq=br3TqQaJd;T? zt<42dYh&bcH7;g{bN2(sEf@=6B&6q*&qI!HYT4wz&PfjyaZpK3P3#N4whhXh3ImXJ zM&-;skNAMlYW8Nt1p7=T$l=?B+B(!jZj~tJ1~-(i?nc>7l|?hB;|E(aZl1Dz0D*CA zKs5uDt27r1y1Zn^<01!JqW$*;C0$dKX2&&0+vU=X>LV~rJz zB$)31SE$Yom?8(|JJ_8#vk7bUyvEQB!8nVnZR}^_qzH6^cB}I&fIcQFR(!XCa{i8T zmIuKrhxfQAM~;1jXJTJCsLA$ueT?n?cq(~$e%5=KckJzN0X8$O{ahz&a2e)|GQgu#xNot*P@k24&pql3^E-dx=(@i_mQY5HF( zByxtFOGR>qW)h3_zfPziZg?zglHEIK{9Q0abzOwK4h+6NwXcudKJKiL9)=i5Z%o&> zwCVlPCTDSvr|zE#mPZTaYJ{*Qvo6=1na}io4*Y)G64!1X4SIR+`QrDVrNnd3Tw6gY z>AEw{4k2t%*C%e;)H-ZI`&Gzou_g9W@4Ce{5&eM8BZG(bUero(b9h?eEVXVISXQC3 zE@{5-7txi)UT5qwwK>8Dy|DZ${9M?Oa8W80?>G zQO9Kxg$K!z-M`D7nhwZA_v+jcaCw@o`3c07Tb6>Q7AA){Ru%N6H4UMjy`Pz;gy9Ls zF(4$4eVlPs;>bIDaLk11IPK3(eWAUiO3!FLh@5hQIyhMUrtpiW_4>*u-+^tFzA}Pw zW;~uC5zw5KM2dM+AO{i?-lyIKVX?(SD*(cYC6B0&SW;*aoSgHJN!G!!<9jABDt6hO zpO?OH18=>t7rI;o^HYG`=#Ak70K#$ebt-A5ZN=(b2#HqAXmlH|N~hZ#IT(exYfwG- zD=QX$hot98IWdcvudDVvTc&H8{>YPF}RbN+>k*dhd76H+uaSn4WI*#w@6gS*b17L z-grH`VW6<(a!mkcCx7#K#D>a$@YK*cgB$%mg4QFM@+)^@XxWE5-H)s8dMpEg=cbZ0 zAjma>ZBZ~--I3UQ4{~{QMaoj7Y~?%_OXp7k3$&ij_T@m$a4X3wd zfQ@|15K(y8wmiv8$ZJPkV(BX{rNN%VxA|<~6vVYxZsN(^ZR0U_f<-`VV610Mcv+K$TIa@y(*P_mRaxlU}*A ze9YVSDr1W2bE@Q5$4`F>3&U3W%*gr7%gm>)k!)Swm7R;o_wM7V$Cx2Gkp`ccQzXHtj@6=3k4ABd~k?)7{kGN-BxtXuZ^pxxA_UI&==Y^5J2>U?#z+ zJ^XDl`#rZL^voi&F`pW|e=9_JL&nMjU*TzknH`MBC2CdW*zwXou@mfA!{LzS$p#4h zpZ*~cEt-NwNJsNJ?uj!l@Gh}XhSTK2BDVL`cl=32PBrb4vRh8BbXoqR!8UxBewDx0 z;ykvYX>F{&ToK8yoq(&?^kJ%F<5IhcE2~?TMz?%1Mg?>~Z3Ik&S)7jSTr8WC4?%hM z+h+jt;08>|Vo7*HP1ZZ5cOD3bf^r@)J9A`j#ju0xR={){bYTixBf6z56tK-=`sgV? z1t}K)ZTP#eyol+ZT^-=nuw;s~OvbW~&-N*xsHph>RdYH7T%z=!ThdX?#RW{2%J1FO zmqouQBc&L}zSPE56tkt=teb1{tcDBqfjfU2fm##OeDKY?#H6*okxE7~;EUHVlI*>4 zOt8hI9y=zhOAa>D6;YSbO;J+r{lM{{xD-$N3AZh15Oa#$D0A%iCB(CsBd61Hk_{x# zd_6)S0ihaPHb+DA=wn*k_%5o0g`Hmg6$2kfSz(K3vP_84b2+4)c_%}YWJZOAL|4P@ zX_yGe+4saT&BMfN#GP{AT`?I3|IuX5W0IQcvbK(FHGA{a7G16-<@<}1UD2%A18m2v zowP4HBtH0(Ad?{=Q)`&N)}TpL7nM^{8SXeSs^W}SJzpqd`eiWE6tU8dnQ-aPDKdWm ztg0)@(~b1_x%zdyz$b>PNXVN-FExLAmw_Y~37WnO>xSY^?A$yqCocPX2ndGOi~mE@ z$D#|K(Ux^PH0(P*lUi0-JS^wrPCHwvy`7<-!yx+U1|Cwjd!fw_N#8H&t{a6$_%VF1 zs;}C&ehgZfX%;=2s=5(Ff9B3DWJA~js1mUY2DN&ExvmMa`a(&3q;`yVM|KZ>MWbhC z%AP#chV{1g&@A=>Dh7CCtnMgZf6oKlT2YV(4yCbfpPbDH+b`X-5DsKLEjj?L$rl3( zh@P$WmiL5!%#MKXN#qZ3Tl>8cy(O-Wy5|di7U}NGNyKk$wgvRNoduT0FhujlSL-Dk zSM!n%DqCB^%*T#pODAC(B*dD2>l0#UYcu$`ZHm3iK95O9d>eAmMT1n=W93}%Z`L+= zuu^i~Cyt6?yaEfZfRKuo-F6==TT=Qf!fbbUf`jO5-K}DbmbH-bLN->g8c1UtW)-4g z&GVIuc4-2?k_@2Fjd5_)kd`yM(AnM1=G2R!gO}OFAF3D-wMx!Ub?9}>P`&LZHqYG8 znD2lVjZ;9-LFzxJq>afLY~vzfX=K6~#-70^+U*Aael;}cLGN_TXf!_h8|V}4`(YaV z?q$sCN&cpmKhb<)^u81$Ltqu~c|b9dZXJ?0t5eOUA6iw&b^k-%)uU|OwIl<+VMufK zT3&ZZMst!QRvb^?4_jU`pJ z3d~0*y1q|bH0ku|?e6jt#ao{QT=HtP9oR)XH{sau#dM`)!tH>NYmG3x??90k1mZdgj!rgp=ltQFoKM4-x&3<71P!DJIh^{jW?vVojmPQts(9JaZ{IyL4m(B*ZXqwh6uQYJHObM0PE*+W?w%K5!aRBr zpF!}G7^rlK>lx1J$J>Ys_$6>aKkl~l6vhb4`k{SKtvVEVv{aT0wHJ~5Ptuwjma?W3 z`bj#x&A6qa?3>%|3^Xy-_uO$Lii2KIj5Rwn#3oh?z5`GXIZ|u{x;wo~PHG*>Wmafz z9vF&aTXOZUyGbv*PS{ueIyf~)vWfjwUV4gPd;}@7Sm{HbE-Gj0%`QwEpo}3yd9_w{yoTF0W4IjQw2c_AbWO_<{LY7Urp5n!Sk7 zeAOkKXtI7Jt|&P$xS1Nk@m;fB_{6xm&kaVO5J9JPJ-N8fjPA-Zr>JIzV*iI*D_^ei z0fF*bc`Juad%q_}2pl@x?V;$|z=sb1%j~kb0Aj;W`X48@!bszcDX5AX?QdBC+1a6$ zp7`(`r9edVM*b0daIA6=DPznEXI-cJqxh$_w^As!kl)k4>-h3Hd_4*NLsxqy&;X%6 zyzbG25e;;kWI>fgjN)6C3s6owD2MweA5uMvJnEXF{2hKnq#Tr-Evb*B!@|yp9W{S! zbdzKsGlveqL`%yG*Cyfn?KrpNAwz5q|Do+~n1FzPFZ#T(<3`Zp(M$%}?*w;iY}go! zMQxov7~&(B+7J*BPVZO11OzHYVuH8JTwfxE$d5u)3`{9|`@cNfprqT1cJn-%=O;Wr zFjo2eFvq$|Zu^u^JSEC1>B%lPX@#%1Nfhd@tGxJP!b#XW3is3Kg=94a>J($e)`HsZ ziQ}KS56|vPAj=rtKd9p$0?R2gXv-8#kO>zwnnC-r<7EYdk^G_}$Mfq|9g~D1?pe1` zUN2Bu$lo_Q#YTec_qL9eKQW$Oe*s%6h8KalnU6m(U4EwS@aBnj$qCI$hP?;8{e+aX zBg+`$3Fm$850|F-(?L=aaTYJ5#}9wxVB(!(%VH5_(jt?eBWP=j75{^BQF9tLyPspmyEY%`#MR05{E6(_J6RUUB3(No7TuC@ zFzWj(g@oZM<#TL-jYxK{E9JR7c2%_94jeLm8*8i_s;1wzT!K7A9RKKU5pCQQ1AvIb zGBNy4thg%XSD5-tMGYbv=G)DS3ufdVi`4iSwc-H^5zfdFw=p*Rn12&=j>5~XnP(bN zvOGZ$5FdGUGy(mXRZvYpro6mkH3<$y8Nx(d0pOG%x*87~npy-jH#5(tydfv6Y@DuD zPQ(t<_8g`Ht_HzkJN`pHyhVUN2Q90iq2PWK27gq}~X>Hrv~Z;L^}U9-~hhcF8B@>q6Pu;jU-My_{$6f}#Zp%p(_ z7Kq3!+&o=LQpRBSUh}hSG%)&Sjy*Yiy_Vv_akp5|y=iITw|-`i`N?7YjXMnmDK8(9 zir76`RJL~89Lqn%)2Q7@TV&9~fAs0iNF>Egcd1M+#3FIGUg_;DnxfNKVhBIRjI8|= zgXiU=ub}$gHU8iBjA;ocHMgQ6J&Z&fwKm_=h^X$CmzI8)=|_A z=w@{O>93%81L(bfx4-8bPgt4i9Pkr}2g%;Hjtyw$fChGoUc+X^KvnWay#KwDb!fD# zCV-dJIPe2yV^B_RpF5EACA~$H9VH#rZ(|$UMP1iTs z9~PjAD5dB+_6~m`6^@7`mmw6Q!TN2pjPIe`}dl-=jeY+_m#tJik*$Sq>Ia8x; zuz`D1fE|{fwrQ@45E-MK!x?w5Y*!xE(I; zxUDW6+^akXr+v^tevZL8bvgL`MXPA8b=A8D z_xQ;F36cl&%W__Ip6lHC@o;NCTGD!sOuU?4J5H47VwWY_;q~2#>Tc6@rWMdsL0d{+ zOctNC zrO>SS)-%}LnS$RJlQn)1McWGC)|55Jsrm>-Rb5*^;k+gXP`2nM(h z-ZGFk)Ta7W+}QZNo_3depjdwQgO#)qLP(e>k@<9$r$5UB!co3qzK*eO`Tf`q-=_s^ z?mn;eJ(-pt@ykpnCC@liM*}9(|4g+umAGT?#jdyaFwfvm5$);nL;Yd3y>hkf87py# z`8n;;ubCseqK(6vt1syLUclJ}ZE4JHi1W@Mrm*6h5?X3!N2qT+zh^l>=`?fgTUjw&SO*p*FUi#VTw+yl)kE zR5a!t^V*+gDN&qo7LZgj>|nG69_fXqKRt@AB@eVLxq#45xEz;9AUUFYuRT zdOZ-y#p-=xvkB*reN%v1m;T)O0>V3gqI>^*cm@^-#xvuj*#dzeYiiG!>(R~14bRZ< zflk+ReW@`xW3Fans;l?U-#h zVKX3znVH}LSuq1OJmPFe0=$Xo1VwZ>4$7I0S8L<7TchX z_DFw}ZEEyfzu6cRAhV}hZoh56BhAQ=I9MnrL|YE^jxP!B@vS^k$?%y!>T+hO&S-SW zSf|@J0q<*>tJN5GNQ-A>O5nF>y$WG|^j$)m+wiY$BM=bfHc3$-m1jgwFgyFR9W^?y z9}>&XBm&3a3-eoNA1~0TI2po&C6Vu}DqTIJ)+GMm&Net!Rxw5Uq#KAjeb;LguGLw6 zf4ko%oDhlCt0v+(%u(D&aKLAAc&9eUBBVh#U|p)(D1gyD+K8=>6}h_|i)4Mn+Ja)?o~ElL7~@S_Yn>B3O9)hal0gABG|L zn|Hh=<8EJ)M6Dp(9O)& znU7KuF}y-Lo<<`%T|DCOt-(%9+bvopCY{Q!cFrWjjw7LFJPK8*l&X{;y{T2XPnS)q z^=-k!M%X#;!p9TtHTR;*Dq==)5FW(W)FzA!CpAE)S=IDY|IkJJOey|#F2&k*AdubN zf zb*YEbR=$^((ws*{AP9^nPuk)mMHfAEY=6uNs~VEyAxzFJYmX;H%gcsFcrPWzC8f#Q zDz0zlW3<)b>)4FLh)6s^7ByPqmP_s(46P!3RYCi(Q|`I$30Oo90m_=G1Hgf&o-H)& zuJrRYajK*dge=3sJ6B=a-}+kKFJzWi2Zxz+7+P45wHHkP*~-+`?{oUT#6Hhx@P3wM ziPE0aS5FSv&TdOR(cJZ6Prq{|(D4;U{y(8uZ%=B?_o+#x2!h!v5cd1a2PAYqv+a&4 zuAY)&%=>bBTRf;1y9Y2*DNbu9p>G&bz=7!GdQ0A2Sd40ANVRSxa@%_#~5DE#*}XN5Xumg-Hm0LWun z>M`wbcXx0!G}jO79ns|eW?Ee@PW}Yiu^SXcxil1GEP5LoVO`kBT=aZF2Y)!I_4d1L zS=3lJJ(4x50m*ny1k01_u*iDdiSc(EDejXE1HSRlvygqwkFOg%b%dL7$^ZFvq!elO zTLe<(@rH719=El@RThDMrYAy5jWlM5TVFjh-z*7wvikra0>cKKlYl*ja!pXVQj4~{ z8FsVfhi{id*6QWC-)TV`m11Z1E}*|x#9sLnTcM~Jk1}=J)v+P+--AL8CA zFtYE7A51c_Cw69H+ni+Lq+{F0#I`1$*tXrVZQHi(?eA~*aUb^Szwdp!>)w01&Z+vK z^u&W;3OAu>YywUu8RW@90HCs(T7>*>69t~n%{iQ+D-bFR$L7W4w-0WcF_5E2r+!5! zEGsI_m|sSHxKlciqQUL(?2@DjCasiRXOqQ25x?lj2z($Yh+EdWjI5nD={&Nz9mUg= zSIQX2#U{P4D6)UC0AFBdS5{F8lLr87_$5(Lwv7u1grCxl8#TtPSN|8wE`z+u?@XSq zsI4uuGAm170#^_atARd_Q_}VK?rE!$+ekI`4LX}Vzb3skl9GDX5tEPqEc^hBRy&$6 zH%GI*xa@|XokS1{cWQ-W0UCiqQ{g`2vh0?;jZygC-i6m-Z%h^wqMHYe zDnCR|jW_@V;lXowL7ic}vIpZpDD{{%k-w{V6^Ao3Eyq!KJ;GOS7|PVtV;+M*YNM3< zCRBhyS=zgKyV^<)zbQF*tWAeXV%=YauLqqR8cP~@iSQUw>JC_UIQc^@(2+KwJwl2F zjkMY85mgI#`5%pAvBhoM{TXp7psLO>AD;*YLZb3`omX*@_Alz@mMSwEzkU53fN+a( z1LKzp;lK(Uq#jRyS)niMq7T6$kVJhgH=j8Hd=bCa<(g6TjuJjsb!wk7_ ztNZ(LdKAc>vsMKsw51mO8#JQnB=D@jTOCbt4gg+JO>+6EU@GU>J3E;BLa4fElv+O; zNC}5&lUEM)CyzUyaKrtx^MZlIL^Qf58bF7ffY9$O&@7mvXoG-qPZ3e6$~znJ%Yb|1)*Ob>*T? zs{^hmXAZz5g+VRH-Rep?d5P_osQUiR-4|^G{g>#hjtgCCKvt4K8c&G%Ute}H{G8m< zp&amU$BdBEeNB!a)EoP2WFy2<`9NfIy^pJJ2XT(6ce)=QUC9bD3NSs(Y%A zW~dg1MrX|x@$E)OTYWvR&`<`H$S+9Daqc5OZisN$zs9KF$^RPu5|vj_K>V9W_KhTO zpMdOW8ENB64T(y~X~v$=jO*%do>kUW#W_={c+%5{65~{JT+i39W=Y3vyd6E~5JEi5 zRN3X_@|!U?B+tdKi>=?de1`1_#X57-k6%g}&7+CS0UVhJ?d{>Q{v1{ShJOn-bs&l- zcq@a>H6~<9ZB-OrX>QJFam!q>(*z~^ zLO(HWZ+xrEO$Yk6YBE!JYhgac7q=|F%HHUjdq!339!r*vv+-vTyPKYL9@7CHezKP1 z^IxQ~q;yoASTW6WOrVTbz(Xl^mZ0Huvsy0wv6FvG37qet;rDI%^)Lhojy%8&ZHK#CkfP~| z&0j<`9c<~nhyiXoamj=j;EXRt4rs*OKRSp@T(JMFsIb08kS;j`e(u#nN|2+-s^@Ll z;T?)!{Ugz_lRaWHK9X|JU}PVsF}Kd9*ZPM#9<9-*(Y&Bhw%n%cY2w56W+wOBNO{Qw zRp+=7Y%4cMNXl11h{Aq`vt|>B@$kL%oZln8cQQ6zQAkP0J?2GOUR}z&srCjLT;Tba zU$CU>vVcz1$(YSwM%y>r1h~5ragv~z1+kSo3gJJIX>zL&CAX3k^$#`h6 zCt`v;+g0Q>be8tgSWS%Ja*KhvtOgv&*jY*|(>lfuWn<+fcIN^RrTeDmCfuL+IsR~-4G4D|B3G}88A9gN$|P4RtFCUv(wRE%0jY8Ascq}_ z#C$ViC3O|ik|)_k8{p7}GqEEUuKk6EiD$A0$tD&8Zhjk9wifl(mYb4VR$mmTHehmL z3KTziHyHboFyTxlnqhi3TBykLQPBQ=y@>#+CtM9x@^3+cevLqJj#Ux>_<5-T>F=Xk ziLOtV;SJ{&{XtF`j6rP+@wz@UuHa0;C#2 zOxuf;SmeA>R2Z+?rO0>8yU3Jd9AP`%!)FET^>2F?)-gE=QKCT^>!F9R^hwq(RJLWiSF#%oQ=L3|%xwYt%lt$EMh&4c%)Xq_BK|f$wMx7nZQHzG93a z9x*NGBSYWO7noOj%cv?yD*Js?3MpqbdJua5O%jIK)pzrIErr=eAikiELsc53x((O& zFPI8zB4Zn!$*`Xf;+#&joVo3j4rBmh-V%g0jYsqr93}7^s2>`J>G`;(Z{V7cPg#xn z$rAdSC5%!^uaST@$t+I$$n$*1m%l5A`_Y(ZPQv+#*QPolZgX$$^JW9vb&o&W`pc`w zRBLyWF-Yy-$2m*UnHiJe^7W$kD-uCp%FUd5g9G{+2fe7-I>9*uxhtAJ4dqj%e)pGF z{|&7qu|x@P)B*MLDu|Tng!BOuWzdg<1dT-0Q$1_JMXJ{iCy_P+^WeI z5l!~*Vk~t}xFrQt56qKoSS+@YmO@Xc(8zPbzK5FBc?bs?b2LS=x)TF`fd}Aaw`u5} zi7I97FDi+XJ@045pYMC2KJU+SKYWsAKcv5Mt?k<-b>9wXOcwe#T+Bw+Eh2F%7gZSw z2B&g%(vD@ahFr$x`inj~A4snj!9SnJ{sFE2U8bLQ#t_R=kfwuUDHI=++Qg z?tSNE{NhIgE>V;xEep`HNI#2cadbd9B@JP+9dxPUyAY74M)kt&z{+W72^4>-+HqXX zo;&*1;&g!=Yfqi9z~<}47s#Q`Gz=+{D z?e03Jj!#~VD31=iqYuKVNds-xMu7ZD1pLnNoEqf&ot+WsphihS$=*kq-=T2tw|1sY zw@6R9wIm)d0;tA;YHp*BR10*|I-gK-7uo1@){UXViM5qx`){np$4rVg&@fX1#dY4_ z-D&9o7)AU;U;73!QLeT2ce0NXu`NR_>HURCKi-D(TtprDPi~%W9QgYu33-`QVqMmfaw%I#YF1{^1ZKZ=PX6ynNpQCt*D<)41quGYm* zXcgLwwN1I~I?^ZA$tKgr(;PPsadNpVC+YAOdIK*_s}u3ToAl(>XN-ij6<%x<(E}6T?}T#*pCi3?aX#!X z#2MvHw(#F}owfpp??l%uE07xZmppy7=5b5SIIT+bSJaj4f9W{3v|zCs5_RYlhN62m zau2>9#3;h$43e}502<*$cLHTa~@OzPYq+#SD1Tc+f+i5wouj;b7nac?1$?t(z zX5W3G9X(n&WRe;nheCTY?nHGy^DGP+?!mg#94=fBiTH5zzjRec*NlLb+ksyreq6wN-4@vb56b*Jj0WrxuJ9 zR9z9YrIpGXNWBu@}m!q@B%#h`??u8*IV!^Byivf=@`3NG(ujL7*Z!3 zocm%HZM=;(0zJuJKc2(emxK|6HRYQwF%DWr&+N(=Vu)6mN7mRr z_x*>R+t>uF__CdIP|dz(S}+(CD9r1qlSDAB(Oo#MgP__D!5a*s_uj$LA6qUgQ8|GVeKdnv#0 zSGEAg=K8hyDB7LsQtQ){RuU^J%7cY((VA=eXG9W;E$uqy6Bs6-v8wY5@YZO30ZHc^GiJkB-yml)pFxWIGpf;m&q%pVGfk$yI613W&_G>in%>2q8;d20>yp`g5C(~yU@L& zpJIVpyxe#oZ8O(u{oJxP+KLb8SkQg`kJff`lVI5u_HA!TqT2EdLPbF>R$Rv1k4yr_ z8aaEA@y9z|OSy`{ecJ{fW*Gm|8xEu;vUNW(4K` zR^nF16q^1NTO2OF-$PH|^QPO250<(6BCFR1a^2V~nSni&)8$14wPgR+Tp0h`#SPL# zj)KfL%>Pyf=bEaEW@S8?R!?0p3XsBqO}>S0OPp@$sY!^@?=Q>9l9+Kx&{ zF^t50^XphoXK-x8{crmJE!u0FlMb*%cumc;SWfBmimcH%f-Ld%{<{mbQ70`kamQTk{r!WjW?2ByP4nvOJk@e_kaJZtsq?l zdDSEliD_;EosK{Dcl|vBND+Y?DeBD*6wlOl<*l6v4WgbLwN8yCG$}7U z+zU)$;hlxM-@OwR%MlFp&LYo!0M8qp`frw37LyISDoe0$(f@l=#Qzlr*0u#@b&k4FKPPNAInlI4z9&5n084|VWHx@voHIbJ9}7dZbpYw^7B0l#zq%t&+x|k`AZ0e zEf(9=A1dEzhV}kOes19#vfg*{M)H$>KIEUT&fpJqms?-RmaJTR%}JklW86LuNE83} z#c5OMIf`N{YsPs*8g_A~gNyu8S(M1ybZ6N|dg zFXBX)?mDmgrkjpT7F;j(OVz9K2~-N`~f z@MFC}$K?msEH~g}+8doL+5HfFAfEj5FhZwp1eo5F!Q0odYHZ!bsB%kTb!`>+9g4V9 zoDDPMTLX<=#4vPw444haZ#`#xb^#?c`h7~l-m{u0GDb#*ttbkHpJ8g%x@dR~S%FxK z1Uz`Zyle@>A3Z??{=UF7fs{*7{yxFy^Z86utB!ZWx++`}5!;wW+-+@cX9&~QLmb!w(}SCgK_QJDZ}EYVV1e~m@#B_x+{o6^k>@7DA9mfP z_yWa>0*y@pMSx+$DjQJa^$-8PfUE6R_n1A*s--3lLnA3qQ>q6<>|GBUf=z0oKIX z&fVdN3uUb8G1xgh5kyJje6V59b2h{l;cnqs4GsuqS5E`gjy#paDG_HC^xh<@RvVH zJWSHc>LrmZc-z`^bR;JkBl*DotB$gbv)XK=4hcu=?UpO|uj_-AnR23$i2|N0qo+Zu z5vg<%omt_G?)qd<+$%7Pv3g6z$nUcvg((qa8aqNyyF0Q`pK`G|yjy6s;@lkpV{d11 zuIf*V=`iZB`R6VU`walBRUR!Sr{$;_dWCGtuP^b|v!!wOT8)~xOJ?!Cfk&i>HoD%< zEdiuVtOwH;zUz$iijJw*zJOu(AqbdDP?6sx8V@TR>oIZke@YaIno@%K_zD1 zwyXG^;hHq#c1;`KDv?|}tPYJH&=z>H^c~$Qg_jAmi)a7-Z9)NFrK(N@Pf^^SP(UfP ze7-dqCtAKz)0gGvDXBc~%EuUfDAhZuQEzr68<5>mIG0bBDN{!GO?$avEAVecPS{H-+YfbS&t--UlDC42kJ@e7d64{iOHI#UpUW7 z<8S#csHYA+k(abN!IxFX1G(8u3@aGnvKg}lhL$l)>0sx3i~sdq zQ;1kF-sp2%F%~;ZaX52VXgUVS>U#YsAGEC159A!dw8?bfX5Oq|AsF3_GEKQ$_4kj) z>JyJN*xrssB*+_x98l$xsygvtDE%4x!uR(BADh`Hqv1 zy?&OWd*O91#ctoPyblZU{sD))P{$bnip8j=KTT^2o<}D>pzNKB09YZUrxmJTG_Q3h z*@#5QjcgMLXqH}5p7Z!cy@YaMYWS6VpIr6Q1pfQwWl89rM<4QA!3T^~HWCh3h5|bF z9$vx>xrZ`MnY;TCO#T3?f*LX9LHsGF7_)IRbo^n(-uXvHw0%k%5cbb_13N(t(BP>I zw?vZurssFz9SYuvdgcmOMnrD=do! z(1e?wCy4MS*DxG@U7r#GJmlgNp0zmF9x0=^Sgtk=>fv}N4c8Q;@$yL`pJQ9rH=SC< zRD|O7!ub{=XIyw=zn%${aOaQKdqWa(1Wo7>pg(b{>q2&*4sW&hBFqm#G>-KDenbl2 zdUYF2WnNNM)Y_Wc_`T|1p%et0@wqspdOW)Sg_uo)Svo43+QJDw6uT&#+ht}S6G*AD z?B;*(P-;+E@`Gg>I+J;BWzQk|R2HR1Sd}`_$n_B9BIkSU_1;_mQV!JXGy@&NMHld^ zGPuWWBjWjFhV!MtZWHi=-K#tIr#Panwmkq&ceF^9g93|C)hJJZH4sNK@_ zi=*YRCKGiPp70W7C;-`RY5KD?;v3aX=|qhfXCY=aCx|t&vwf$aVe;95VUxTFm z1VDgUfm2mSXKm)7dSXH$f3M4(Q%H15KaY~!r67QeoFx5hQvxuOOS}K6h^EoeM&YL(jWzU=cPN#^G3nj{E2&`jD(Z!R7e!(;n`m zpuCEb8SBgFZ6==P64%NmY(yF9wozV5Q^ZA+r&Lium!}niWS`3JA-IIv*dHN*hEjl%4v7a=^GEwohXA}X9SZO!O7!K!rM^s`pUg2;R&Mt~e z!WbGL9h4zp&lsR6mY}n2E5XuW#ztl^&Wq;Ib!BOVlSc#2@eImrN&YgnKNADU6W+7nMfMJXqT@z4YcgW1J5k>xH zLrq?2vTs6u`)vC|7N{7pz8-dm;@D{iK6>WnWlH*r&c=Kfo>x&dmkf^^h4es(X-g~R zkms$=trNPUH5sQo&0L;e#Dv_&-YS#Ym3%!PeJw-Y{ZQ`%vUzpn+DD&UOLL{HpuE{M zxOb+gA3x_gxO&x0mv-Mai`uM_wB|vTu(U5`FjYn|A$^D{voDY(_QA4&(5}j*5Vf1D z6~F{Y78lp^y)aPFQs&psXZ_uee%5_xV{d#Qc3R@VXzz9p{fSo(M90~{T5-zx@%k=S zbW(2MbBoKKeH^RIVQSyw!g?BLW_W-pE7XMRG~uAq${W>D26=4QFV_{VUqrT5lRR*R zPk4K}#8sIZ3Vao#cAz<5EqO!Kglg7E{`s3-z4Z+-EZF(#N>ucgFjDO#w`1LZxL|d0 zN_L6(Ec`E=z~E{yYYLMEIY$=d3hR(x zSgU7qvbOZ^V2l~9sYE;tF>UW6OX^YT(Z4+#4y;NP5>xl8X4*DQOrmmxMpMVtyx%mZ zX+0=^oy1nrxs4n+4lVn3Ln|S8f}qP4-|6oN_-4qf+;wZuKY*8N5(>eeWu(T1?UBvl z@tK50hrsiCCu{z}^;J09w!RcC^|~WJ*3X!`E-UadvgGe4W8oU;3fax@o>%F7?x|9` z2Ha7^rR*juaLqP)y+JqWaN#3%+1|?%rsLFD(b?l1>7F`+Rt{v~Rq2~C7WnIKSqm>a zNeb6e;r$c_?sWi*65s!F@-ZRb(Ai-#>OqUlY zoEXZ7g}6k;ahkk|4ZQA~T0XM@rCR=Nn^*BES~UY>LS7w)uJn=52Sph6ch1^Xj>^Q zj@GV>zZ}B|W6;;|;PuG5Q}<>xcVxY6`EOsX+aJe8d}nFi^QGje%E-Z2Syj-sbLV^& zz{@D!hVe^i7>mn_ZUklF-xLMtyD89w`BLWOA|BotV*kaCtDvAC0!T6k-T7i#m~Edn&pnN%0O_QQRS`=T0%l ztIb$m=pNL+N?~czwJ5cNKyx;OVi;|Sin$KGlOmQt(o>$GK2_vqx8Q$DUz4U%rN0tm zvA+I-Kzn=TRXykNFh{lK7p4#^AMqM4r<84M%xrbLJMSNMRHQV+nGa*wxz(IuH>)9Z z8`dLX5eDs2j77C=-H8A`IIeCPZ^BZ=n13XkD))*q!?jT|8Fe9kDRm6!${=X-vnsxw z`dm9$3PJ|fc%tB>g6RZRUsQ|>W+}>e^!6K{%(+dy0$R&GSQTOZ@;z$eR1)&ubNiqx}!Wur*Zd zi1h<_iUJ*D`L#Cg9ENa1Rh>nSF`vBPr?`_xgXL9E!n$Y?7g@o2LVlVus}9i#B-|4Z zscx595j)C8myKeHabVS}2G7Y@+f6%j5Y|5*YUfcGr}YPSbxsv?R(c2 zbz_nSm7}Y&qBkLpgq18yt0j)^d$=RBS(0rA-_+S%NnRa+4)=y^P%0CZtXf;^wFZaA z&N3ZY@-u7V8rKUTIDhS5!JOydCe2o+356kNKDZ5yOZdjb**|}(IrEbNE1Lp z9yot<`uIIbW=80@6*NLpr5*2AE1*KVGa)Q3-?s@2fehC=jWeUAP4?e2(6c-rMxYk? zS_z_J*wa7e;qbBjBttF~Hcs;vDRY%76UvRh(+d5w9!foV!kyaiqXZ0r7hzfiE$1{t z*iSD)4nf}1uLIV2EmG)x-j^=zmA010BO>8__31(FwVE z{97oC@jPUbwLt&FUpC9lh1hTpqGxN9x+1L5>LU5I2on0|R<9y3K@r__>5?wqHa+s1o-9Y_4|mV&b}&zX7V7O_FPQ)>-W(OOkP-q%ht8N$?8s%~0o z@GaI&aNjnNGJI2Hv`cvJ5D)h}==0C!T|s-yZ5^kZ7M*Furr2b#=~Eqo@&b;9(B{1@ zq4rT_%bG`{oVcHJ%T9`2O3);URxkQmD+zvioc$IL@k4Ap6R%5XWbf&d75Nzg`t>^J zS-egT$X@dNE-bgs$n*tf-h|)y%i}kjna1sp+&AleqGw_7GR;H6Gbmm=2hCQb3ZG6e zm993_4;OG3x?BY*q;yzh+l(IbpWU`pCDr45?#g7fcDKAqXkI3t@gPB`GWrzTi^VY= zT|1G&Y704;j*(b1Evr+bh5?p{P@_{#Ufka?m&glFh`@nGKB>x!+X@b~jvnVV61Mp$ zSPxYy-T7;n>j4PmVN194yR^5aNkaqM!162pI+`>=1zb)gwbZGzWf-?M?z*dgFil2x z|9oEz8|?z}3jN<3FdQ7l+QQ0#&gUjlg8UpHAEsq*ZQT_a%^(=*B01A2(0GXnRc6O9 zs2R|vwLfzNFV7A~F+T-=(W6PX>5wrbDVj-P52E7I{5Y~-cs`Tvnr97My!a#%W1>@oANj>M%T^BcIM4ST}@B%x=6h^wQ$M|ksxff zTXm5vQZN~yQ!GjM+zg(Gq><6M(LD_QYAsOUaU-l!+-_Y|MwgwD_ZmGSs7KX3wc`lt zuvQ&D)~d+NFB)f?L0%=%Cmr$aG_P2SJ-!9cu)3g8K|T{L#Q7IsqKkEbWj`KK=z3a{CA0dd9Ma11d~)dHZn5@gp!sEZjlquLs6J-_T4@NHE%;O&Dne=}44T$CFLz zGM?%mU$U{BSZMpEgH;!A_FTJt)LZq>Qt3}Oeb(9>-zMP1++d1t3~p2u)d)iO4qHk} z&vIDp(b`Z1i^@Eo#oNG&KVI#@Pvy>Nex3N!O2Q3In$z@!L>aw0pl3v9_CDQnd$q?H z*>#{iNi$MazBquFErK&B;16BTAgml6`^Q{yd&#}*8a96WW@B@IsNr}dY7lySTF+(H zirAQ&pW3k)kiTih?R<)Xj5Syp`2IUI>X)}A3Pi$fDb)!D{*kQlbq%%k>QTVbh~sQ6 z2qB?PIkM0pPsT;>j&|R}Ws^x3O|Jk+pXdv@?_h z-v;{!89GD<%ACpcQBT#cu8i;f;)wN^B?02B<_|cEa!@TH@eOy12%RdiQArW^_Qu|H z#SIYh&Jo96ROR-Xu~YGdWEdy4hOZ(O9)D$6tDRlDS5&`pE@7p?f8rR~fq%s8^$e++ zuJg09Vvm7s3ESZ)lDZWyO-55ca?ZZA{WYs+p(j!UeJJ0gCQLfUq17cAp%X*_&{x_~ zlJ#7!vmF!QxL`9&P9=O4pF;h^8ppYEz~Am%5Lujy&9qU6olj_L%?|7yf27hqs=nDS zJ-xv3jrP;~S^e`diBd~#Qh$7S=lw3b>-x#}@u=y0>o~|uBiWY>iuik!qOSD(z(*u8 ze`N0I7@XUxcSTK92Bknn<|BfmOV+WFA9%%L`l!R+SSBP@uK5q%9RcymCRWcntPkIl zsjrVcY#*YW$%NpxYu}W?QpOc}sLgq;njl2a@?^3zjh zAYJC<-=Bd6M!q|~UsrnXe3p2?Td#H((v*#>arr;;6>&Z`Zc>h48l5&KQA!{w0Rr8J#>8xe|=s@ zC=dD`g!+CB#t0&9hZ6f9*!g~8e-Q~l-OVxbnwHU8NxFMv{$5kd&liaE{iNc@7DN%( zv7;Vb^yxQU^peLe`)pWlL{FkiuN><3`6xJ2E)y((UTCdLI;Cbro1+4aB+Ws%_AD-}4Ugdb# zkSF_-y{s$@1%-m@n3?;a%yE=T%2R4O7@yBw>3-nkxzi%Il`FAEt0!onz?X^R=|JN9 z_MAB6^Ybn#KX2Jl@fby74=Z+|XT18X4ZwyK(@x0^)gh6!xbUd=ip7w97bGD64=?N?4n^HoD-0iY99pS0=SIL9Hwq}? zm{TL#mDM(2ObY$qw`h?-KC4iZE>Gfge3l8_ke}B+k)AsLjN*^nfcp<}j1h&sF2B93 zCNTb&s3jr#;Cs~({6E;;`~M(276plTI}@Mb6scck02-Z&C@OM_7?UKJ(|BlaZcc)F09r?&WGFa5g$^eRDUV zInr@ul61vOx2cNNq&w@3$bm^qXSv>NkJfl?gNvpY-t>k6Vb#e;zRC(>zX8-M8n|S| zrg2*8VcIV@k2S)&a*_rNrKJ{R#Rc(cv819zjoHs*M-yzy$^YV0S6Y1?ETJ{r2jojK~U+ZrK z;cn!boge5pIDqHOu06azLTFY(F)QIUY~ZT1HV=YAX^Y+}mJJMp!wM0G-fhv7H6@H( zi>dLcCZZ_!Z#9rL)*3Fmtge1m=y<{T+=usCDqMJ>C4v#R=JOlCM*ylHseYL#F~@Ss z)osnAU6t2jEID<-1!S4q%Jev&cr}Pukur(F)>O}ul#>hYkv|1a8QIi<+W^?bqUMg2 z)v=I>1#C+oLc#+?0=%r%nYBQMz@f8tHF&LaONQR(&~^@%0@SiSPXk})8+L(-ef9(d z zGAc@=!wtGmT*k1^AB;sRy_J~aS@wmvi`GD-%dIb8A4TSIO&TH+wC}=8DH88FFNbuv zbgoCs7MA7WszB*zWet8m;NoV(^JgEUc0ya;#tsHnWs@|X|EDb zL>v+tK?rj$`#t_8c7$*XZWIgGy_y)+>*FUbd07BGrQiXakUut;Bg4PxVUf{Ok0duE z6M-p`D~FydzT&B8$GuCXI<2Xj?+H<}EqbVEPGR+CQ}&M;#6&LXIQdmWaUGeEB_)(c z05<>PDSN3^FS$3#HEv-e#e%olhlC9akRjL09`Cc>^^GLG;uK3A|L%qtVy+|A91Kn?1VOuZ=kH`l9 z&|i}jPf59ZGA6a(qQtn1|2C;gxYW{4z|-$6o^ssWig#8j%3=2SmJC8h+jAp1<$lAp zI|;QGA!x%zn*;h_Ts42@bKT*p*{vS}s%Y-Cs3v?yF3LYEmz>Xfq&HPv1mBJn(uhVL zXq&Y15nAR^0bZYeei287x2_sAzLE(Gt{r7buflo6o4dV@64bhLrNY)eP~38xvLwJ8}oG7eE#wzYC{ipZj1 zn(S__Ee^#=>Zghe1<(b}Sd-}3dIw4Wn!&}?q;o?x{ycTtJQMN@*UZx99ly878`s_& z?r8SXB$f=mU#Af0w!E{RADkMxbD^nR$lq=|@;xmuQ=+auIv}i$L6FxF8@CZ02iR~q zQf7rSJji_0V61kS9*3t*Mra?md#fY4qo&lMUpXR>uoeRxeKRkSI2?sDi!9u_>&O(& ze@%UUAD9j80I`6RxSX5CM_cHO%|Cr;<6iqeLe4Nz3=ILd$HLD z(FH38>b!*rA*>npflbs#1>T=qTb~nOpL^J!a;r&Kf^wE4L$``z3LY4`i)|^)3W3?a zuc==jYmE2!MPu)clR*NE&%YqDBkBsn#+S9YQf%~ln`hpyV|=AgE{D3Ap%urTF=k4M zqveu{1|(h@LQ5-O?&}kxw{VK8Mk$G@vuWDKC&$ADdeTbqtZVs33L_|2TAgSDeb0ur ze2=GU>=Brpo26RGCKFxJGRz7OYqGtX-7-Q%s!)h1RvS)*UQ94`B0HW z_xN8=Q{a)a8bK2yZ~H1f?XG zyO<@6m?^759s7o$`25lTj zs`ZYhm-EVC=KiGG3@`V9T2H0>hW9}a87;{G?4c1UI2Wy`)s$Mg4d1|a8fKzr zvm9rvlCrp%w;-^@#u05n-voY`0x9ZRKi4hb@TjDhOAPv@?D=|qb_b9WHmdqxrv(}N zBL~ov{-zxbF!NSY5FFlQSFm_vQ9)O+Y-t@hmO7-&;nOui9$W#kOJ8D<^u%yfvo_4~ z7jRs<92(hx!NzAbJt@oQ_hjsulHMYoQ@;(_OR}1t&KMl61U3^s^~~+zz_?VKG^; z&}oH)&OWcvIv3yH8(kOsv5$doiX?2*i@`R8%jCPTx%l>H{@{mnu?35w5rZwK(g{`P z1O|DiL8AvA-X5Yx*BEk>Q5oOe;DC=H>!1azX{16jY)vUevei&z!4~n>Ndf$`_ct*#{dEA$d?Te{jX5Fh5ui_hhrwD$?$7*6oTySGF24^t zb{|WZTam>(yAfg|G?T7fYr^G8#ilLoMi)94aU7odo_L-?P3qXx*wz&zZRDWW7@ru< z<({p!LNTjQX2KBzz8RBIG-j-Z4XFQoW4QB&kU}q$0VF@;=dP0%^)09lCp=`$*_qg_)MF_~$bS3<15W-s%$8 z)W&H?R7e~hn9G-FMQNkKB^jmXLpTz&GUTqYTq;WPYztzLrc3YMJlXg5AF#l!lr1L5 zc~Bd=J8^=E^Lo;q^VcM}zjQ!-_B6`;E8 z3~gDV?2ihe1HkJ!-=t!Ni!DJ_y^k)#f0XKMNzAK=G2nQ@EO3VAUw;$flCs(@o?x5v zab$MjT4UrBW|fqmSUvRZlwN~+?N~gBNe?ua(;X!hRHDwl*RXO-AAJmJm_I^y^5R`p z{B2xq#iT(__|Z4v9L7bLHuM~O#v29B5sT%Rb?~Z5x4b)YH(vJiHfD9(LN2=DffrwU zAGPakS1#q(M1afgz$J)8Fw{OF?vC>Xyhzg;Eij6l!7c0YiFr%#HegnRoBmbb<>d42 zkxTdIiH(+|RjJfLKp%XF)nw1@_}J6;kt2lZ^cBG;qmo5|C>EAo7>lyvA1#XS;9`d1;+%IugK@`Puf#ZYkUcMmKuye~4m*;v?hj@Tn zrizq_F5eQvtGsQGsWKdgREG7jt^xZ1-S6!zoc$Rix)uBYvIrm6;#-tHVBVX6y;J6S%?VpIeMvr7O z$5CsfS9A`C69oz+2W|iWFiY{9p_-KbmF_MS->~5d%cPQB6)D!Nf(fT2CRH2~GqYZI z=9lJkt=uGpN-WW3@9ao6mOfyIR=Wux_S+RM)Js@$qB>7<^L+%@N~$ouh;^RjS<+p?RzH}c~|enqWl z$^QGdMA4;+p~Ba;>#JcH1EB?#F+Gz)SF;uHih@ zvVBY1>^+b=Wj=4>a}o;2#ep4E!t~vqOPA(dzS|%yh_1w)XU8WfY-(<&OB*t235IS# z%NZZ`Hdbgfg|S<40&T69@N12C=t4fMAfzK9EG{;zMvltnI4B}Dr%HKob+n>7)S+|p zI3biJ6x|6gBW56evpJd8=)aiCR7Rt^U2(SzG_pN0p_g`&1a^}SR%n^^pFXlxXjt`^ zEtqDs6G)3Tb=Dk)7!v6@82WcsDN~P|tfXC}tYz=GD62LHecOhzHYwIqK30*<63G5mB?V#q#=!V9#fFt#z4469 zqAcr0)=3XKw-pm`V3ARqHtuI&DU;gv`0>!brb?emE9ybYyzY_K|5$s;0YZLF*OfWc zw`{B+o%1?LLZ8ic+DE7?3C%1E3uEs~B2vy*DGmx1hF;XFhsGyEu`3Nvb5#5^zpd=P z`t36Gov^nf+8ZSeT0auTyb?m(8xO{$;as4Q>A}aPI~2nHc|depa=R5voJ``eZTX6Y zc<1oa^aCl8WNX}-vBrP|he1sUSoBAo8>*G1xgx_8Lu~U(sD1-Wov~7=K;3 znwEQV@ZyZ+)+H=fXYs%-{F#=*1(5mLnHZ?_p;?JZmkS9KqELeVHifL}*|Dq5Oq z$V`ahyq64EQhY<1B~4sXGOHh{ofUPU0eIlPmN&lVBhTXxG zbQ0itNq`G>pCYNSy8r`O*G+&qFRx-`FoAWZ{$zIz0HwT0{Xl>XrAheg8_zHO2h)Gh zhu1V{=op5M&|t~_OQaUI7X(RwFH{25oQjc+B*5OtL0(VXbvvf$YDx_yRXW?wKVZ;s zrmisM{_Cz6s}i80B*3jMVHABJz$4833<1`cy^-!|PXg4E1bBtKP6E_3@*%N6CIr}u zlB}l}*f4n*Lq_SMsWpuy>usfaFr8XiTcMWKyuf8f!SIhpaHI9r-a(sbe6&1d9SHFb2i zwKR|>wSW&Cbl_;<3rQ;9?-TX80&J;?$7S0hv~{;|T8fa9Q&UJxOzPN@l1WO+AUD6N zZOo=QpFrmws1I1i@vDhlCm6`9h`nlq=6LZ>Wt%O)ZA5nk3CnJ{(?(>EOF zd1lwyx-IesigVMtUca=xNld4p_~XkORFy?TO&NLFsl-MGoGB?43`sIMv_O-l2dF8P{B zdUifF-TL@GUw|=f0yHq=?#trN2T+;+1e=wHO#71wP|+--wz`_ydf9(XET0x2l&0Kf z|H@(f*ncAH_MPKJW>u$m?6QM};$r0rdx&Gp3JZ4=BJ2`YW|kZah>~R1F*8ruTtimebq*b{#o6Z;xwUQIRprIucie_OhkOZ-@0dBN z>Nc#cNac$6VfI0$0zPgO&N-yx<9 z8qY45+Z4WgZdB{s;;*q|zMd3~XtK2PGKg+uMYytTNl!~; zb6H;)iRJ3)j2yY0t5NyCqjFKwENh^lQTD;OV*aaJ?3gzggXQKtN^K7trF;sqHDcoE zC3xI;Lzh(~?WpppbJ^&<0Ne7ZDmw?sL6IEKol`SjQfg+VpLQ}1VI#qZWcjgaf_iBdV8jB(=>u);v^yGnVgAHbm{gN|Ws?T>iS+(^-(G?qsvlDw;cn)|J^ zwsr9M`%e2pZRJ_>b-qmB%*xnp zSRWnF#xD+1T-iX2sPkT3B`5PTSi4SX>m2vL|COBi74bg{AQtMbP5^b!zGwD9Ve$Z`^QZEZj!Xw;Jl|>Zxydh}Ll@Z{$JU6?Vj@bL313 zRkv%q%c}2i^<*0UQ@xqspTe~P;`Ck`vjNT3ulXWtFQ+e*Q(dFfHNscxxLbXm=&2JK@s=waaxPL; z(;((i6CF?Ov87f6)kV7qnKT-o50LidNHrdIFb>~BoB9dW_?Dn`soWBP08S7 z*s7RMSrxO9WvgO{T)K|ME8I};v4F>B zm%oGnDrGt-%1tD2hA*lyV|jOk3YT|PjBp#rn6bY2&s!@N_Q&+RbKKI^%&E-v{L^y` zPR?$)jPyX|HH}$w>Le#Q3=0j$5K1j7H78Bt1MOl zD7W#9pZpAa!PCP_jKgj#H&5J!t$g2K!GQ~P z=z2OO1n~Ot4LG~`Feg6$R}sJ)hnFx(^*0u5D&#>w0W3e6%+yiuu`;#yTcFDIt}EHk zl0aXaot1OT(Kg>3KXo>d(OXy^F_~%qigg6SSDxI?ls~A%CgA|e%WMpdg)9u5!q_<( z+-mL?z!RTOXVRo`?7q?>~RpXF!LLH^blra1{43Ey*i zcNDMxcxLbObjFbcxEQqj`3yD}tLh1po+-!Lz=dfMcR<2(-%$CF$BEprv zgc7!3HCwVyP@#S(7LgY(xJmznnlfRhl)iS_n7WTk<&BOn7$~$==d&d?jD(DQ8hc_R zx@+NK-9=K?#IQN5?-{n$tmodXZ%F@aHKDQub`6lr#dPNQi)r)m2Yl6?DJL+ zi>Z^Q>SA)!6Ie8VuFY1V^P*Uvlubc#El)a4y^lc6T53!3h>Tjx*4*!Uh4B@lDb6H% zX&7k-%X!=$Fb|!tv-+JET(5vUp>aBYZns`5KQ>O1aeN4(a?O4!mkpD z+wdi~T9pz;>Pt@$`_U4%W?!aV{K?pUgYBDRSh@8yjY_PJ*!-3r{e5iJPHUBb15K{*|UQPUa*!uXEhid zE@c&SyMvp>$4OehjNqVP0>YPw`I<+`t#7GE9lP`mtn-;p%@qtrkosBZjW- zJW70&x&NV0Ka`j!qmtjAjHZEO!v-f-%tiw`olY#^{bS#L0%0p-mnf^%qE(VkFUJ4l zv3C};5xr}?n@1Vq`qne5m7d0X|5(X-_g^P#)XKQYdqiSh*yy-thah52Z;wXL_?xIb2lnJ&HJ zPfGuL#;L@1>e1_a!%}Q@*nO`1#KSD?s7L3pvoZ(tHs2iBR=+;VSd=~~Hh6W2af$Uv z*V*lNFU}*o{|@j1RGV!L-9FW4zDj;q$REU&NF)-;kVFcgL?V$$h9pt|B@&55G9-}# zD3M4ck|Bu{K#4>mkqk+s07@hhiDXD31yCZ9NF+lNDS#4*L?RiINCA{cBofJx - -#include "pink/include/redis_cli.h" - -#include "include/pika_sender.h" - -class MigratorThread : public pink::Thread { - public: - MigratorThread(void *db, std::vector *senders, int type, int thread_num) : - db_(db), - should_exit_(false), - senders_(senders), - type_(type), - thread_num_(thread_num), - thread_index_(0), - num_(0) { - } - - virtual ~ MigratorThread(); - - int64_t num() { - slash::MutexLock l(&num_mutex_); - return num_; - } - - void Stop() { - should_exit_ = true; - } - - private: - void PlusNum() { - slash::MutexLock l(&num_mutex_); - ++num_; - } - - void DispatchKey(const std::string &command, const std::string& key = ""); - - void MigrateDB(); - void MigrateStringsDB(); - void MigrateListsDB(); - void MigrateHashesDB(); - void MigrateSetsDB(); - void MigrateZsetsDB(); - - virtual void *ThreadMain(); - - private: - void* db_; - bool should_exit_; - - std::vector *senders_; - int type_; - int thread_num_; - int thread_index_; - - int64_t num_; - slash::Mutex num_mutex_; -}; - -#endif - diff --git a/tools/pika_migrate/include/pika_admin.h b/tools/pika_migrate/include/pika_admin.h deleted file mode 100644 index c8484f7e6e..0000000000 --- a/tools/pika_migrate/include/pika_admin.h +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_ADMIN_H_ -#define PIKA_ADMIN_H_ - -#include -#include -#include -#include - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" - -/* - * Admin - */ -class SlaveofCmd : public Cmd { - public: - SlaveofCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), is_noone_(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlaveofCmd(*this); - } - - private: - std::string master_ip_; - int64_t master_port_; - bool is_noone_; - virtual void DoInitial() override; - virtual void Clear() { - is_noone_ = false; - master_ip_.clear(); - master_port_ = 0; - } -}; - -class DbSlaveofCmd : public Cmd { - public: - DbSlaveofCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DbSlaveofCmd(*this); - } - - private: - std::string db_name_; - bool force_sync_; - bool is_noone_; - bool have_offset_; - int64_t filenum_; - int64_t offset_; - virtual void DoInitial() override; - virtual void Clear() { - db_name_.clear(); - force_sync_ = false; - is_noone_ = false; - have_offset_ = false; - } -}; - -class AuthCmd : public Cmd { - public: - AuthCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new AuthCmd(*this); - } - - private: - std::string pwd_; - virtual void DoInitial() override; -}; - -class BgsaveCmd : public Cmd { - public: - BgsaveCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new BgsaveCmd(*this); - } - - private: - virtual void DoInitial() override; - virtual void Clear() { - bgsave_tables_.clear(); - } - std::set bgsave_tables_; -}; - -class CompactCmd : public Cmd { - public: - CompactCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new CompactCmd(*this); - } - - private: - virtual void DoInitial() override; - virtual void Clear() { - struct_type_.clear(); - compact_tables_.clear(); - } - std::string struct_type_; - std::set compact_tables_; -}; - -class PurgelogstoCmd : public Cmd { - public: - PurgelogstoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), num_(0) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PurgelogstoCmd(*this); - } - - private: - uint32_t num_; - std::string table_; - virtual void DoInitial() override; -}; - -class PingCmd : public Cmd { - public: - PingCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PingCmd(*this); - } - - private: - virtual void DoInitial() override; -}; - -class SelectCmd : public Cmd { - public: - SelectCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SelectCmd(*this); - } - - private: - virtual void DoInitial() override; - virtual void Clear() { - table_name_.clear(); - } - std::string table_name_; -}; - -class FlushallCmd : public Cmd { - public: - FlushallCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new FlushallCmd(*this); - } - - private: - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class FlushdbCmd : public Cmd { - public: - FlushdbCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new FlushdbCmd(*this); - } - - private: - std::string db_name_; - virtual void DoInitial() override; - virtual void Clear() { - db_name_.clear(); - } -}; - -class ClientCmd : public Cmd { - public: - ClientCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - const static std::string CLIENT_LIST_S; - const static std::string CLIENT_KILL_S; - virtual Cmd* Clone() override { - return new ClientCmd(*this); - } - - private: - std::string operation_, info_; - virtual void DoInitial() override; -}; - -class InfoCmd : public Cmd { - public: - enum InfoSection { - kInfoErr = 0x0, - kInfoServer, - kInfoClients, - kInfoStats, - kInfoExecCount, - kInfoCPU, - kInfoReplication, - kInfoKeyspace, - kInfoLog, - kInfoData, - kInfo, - kInfoAll, - kInfoDebug - }; - - InfoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), rescan_(false), off_(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new InfoCmd(*this); - } - - private: - InfoSection info_section_; - bool rescan_; //whether to rescan the keyspace - bool off_; - std::set keyspace_scan_tables_; - - const static std::string kInfoSection; - const static std::string kAllSection; - const static std::string kServerSection; - const static std::string kClientsSection; - const static std::string kStatsSection; - const static std::string kExecCountSection; - const static std::string kCPUSection; - const static std::string kReplicationSection; - const static std::string kKeyspaceSection; - const static std::string kDataSection; - const static std::string kDebugSection; - - virtual void DoInitial() override; - virtual void Clear() { - rescan_ = false; - off_ = false; - keyspace_scan_tables_.clear(); - } - - void InfoServer(std::string& info); - void InfoClients(std::string& info); - void InfoStats(std::string& info); - void InfoExecCount(std::string& info); - void InfoCPU(std::string& info); - void InfoShardingReplication(std::string& info); - void InfoReplication(std::string& info); - void InfoKeyspace(std::string& info); - void InfoData(std::string& info); - void InfoDebug(std::string& info); -}; - -class ShutdownCmd : public Cmd { - public: - ShutdownCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ShutdownCmd(*this); - } - - private: - virtual void DoInitial() override; -}; - -class ConfigCmd : public Cmd { - public: - ConfigCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ConfigCmd(*this); - } - - private: - std::vector config_args_v_; - virtual void DoInitial() override; - void ConfigGet(std::string &ret); - void ConfigSet(std::string &ret); - void ConfigRewrite(std::string &ret); - void ConfigResetstat(std::string &ret); -}; - -class MonitorCmd : public Cmd { - public: - MonitorCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new MonitorCmd(*this); - } - - private: - virtual void DoInitial() override; -}; - -class DbsizeCmd : public Cmd { - public: - DbsizeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DbsizeCmd(*this); - } - - private: - virtual void DoInitial() override; -}; - -class TimeCmd : public Cmd { - public: - TimeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TimeCmd(*this); - } - - private: - virtual void DoInitial() override; -}; - -class DelbackupCmd : public Cmd { - public: - DelbackupCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DelbackupCmd(*this); - } - - private: - virtual void DoInitial() override; -}; - -class EchoCmd : public Cmd { - public: - EchoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new EchoCmd(*this); - } - - private: - std::string body_; - virtual void DoInitial() override; -}; - -class ScandbCmd : public Cmd { - public: - ScandbCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), type_(blackwidow::kAll) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ScandbCmd(*this); - } - - private: - blackwidow::DataType type_; - virtual void DoInitial() override; - virtual void Clear() { - type_ = blackwidow::kAll; - } -}; - -class SlowlogCmd : public Cmd { - public: - enum SlowlogCondition{kGET, kLEN, kRESET}; - SlowlogCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), condition_(kGET) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlowlogCmd(*this); - } - private: - int64_t number_; - SlowlogCmd::SlowlogCondition condition_; - virtual void DoInitial() override; - virtual void Clear() { - number_ = 10; - condition_ = kGET; - } -}; - -class PaddingCmd : public Cmd { - public: - PaddingCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PaddingCmd(*this); - } - - private: - virtual void DoInitial(); - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -#ifdef TCMALLOC_EXTENSION -class TcmallocCmd : public Cmd { - public: - TcmallocCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TcmallocCmd(*this); - } - - private: - int64_t type_; - int64_t rate_; - virtual void DoInitial() override; -}; -#endif - -class PKPatternMatchDelCmd : public Cmd { - public: - PKPatternMatchDelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKPatternMatchDelCmd(*this); - } - - private: - blackwidow::DataType type_; - std::string pattern_; - virtual void DoInitial() override; -}; -#endif // PIKA_ADMIN_H_ diff --git a/tools/pika_migrate/include/pika_auxiliary_thread.h b/tools/pika_migrate/include/pika_auxiliary_thread.h deleted file mode 100644 index 3e192bf280..0000000000 --- a/tools/pika_migrate/include/pika_auxiliary_thread.h +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_AUXILIARY_THREAD_H_ -#define PIKA_AUXILIARY_THREAD_H_ - -#include "pink/include/pink_thread.h" - -#include "slash/include/slash_mutex.h" - -class PikaAuxiliaryThread : public pink::Thread { - public: - PikaAuxiliaryThread() : - mu_(), - cv_(&mu_) {} - virtual ~PikaAuxiliaryThread(); - slash::Mutex mu_; - slash::CondVar cv_; - private: - virtual void* ThreadMain(); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_binlog.h b/tools/pika_migrate/include/pika_binlog.h deleted file mode 100644 index 48d6b61f40..0000000000 --- a/tools/pika_migrate/include/pika_binlog.h +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_BINLOG_H_ -#define PIKA_BINLOG_H_ - -#include "slash/include/env.h" -#include "slash/include/slash_mutex.h" -#include "slash/include/slash_status.h" - -#include "include/pika_define.h" - -using slash::Status; -using slash::Slice; - -std::string NewFileName(const std::string name, const uint32_t current); - -class Version { - public: - Version(slash::RWFile *save); - ~Version(); - - Status Init(); - - // RWLock should be held when access members. - Status StableSave(); - - uint32_t pro_num_; - uint64_t pro_offset_; - uint64_t logic_id_; - - pthread_rwlock_t rwlock_; - - void debug() { - slash::RWLock(&rwlock_, false); - printf ("Current pro_num %u pro_offset %lu\n", pro_num_, pro_offset_); - } - - private: - - slash::RWFile *save_; - - // No copying allowed; - Version(const Version&); - void operator=(const Version&); -}; - -class Binlog { - public: - Binlog(const std::string& Binlog_path, const int file_size = 100 * 1024 * 1024); - ~Binlog(); - - void Lock() { mutex_.Lock(); } - void Unlock() { mutex_.Unlock(); } - - Status Put(const std::string &item); - Status Put(const char* item, int len); - - Status GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint64_t* logic_id = NULL); - /* - * Set Producer pro_num and pro_offset with lock - */ - Status SetProducerStatus(uint32_t filenum, uint64_t pro_offset); - - static Status AppendPadding(slash::WritableFile* file, uint64_t* len); - - slash::WritableFile *queue() { return queue_; } - - uint64_t file_size() { - return file_size_; - } - - std::string filename; - - private: - - void InitLogFile(); - Status EmitPhysicalRecord(RecordType t, const char *ptr, size_t n, int *temp_pro_offset); - - - /* - * Produce - */ - Status Produce(const Slice &item, int *pro_offset); - - uint32_t consumer_num_; - uint64_t item_num_; - - Version* version_; - slash::WritableFile *queue_; - slash::RWFile *versionfile_; - - slash::Mutex mutex_; - - uint32_t pro_num_; - - int block_offset_; - - char* pool_; - bool exit_all_consume_; - const std::string binlog_path_; - - uint64_t file_size_; - - // Not use - //int32_t retry_; - - // No copying allowed - Binlog(const Binlog&); - void operator=(const Binlog&); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_binlog_reader.h b/tools/pika_migrate/include/pika_binlog_reader.h deleted file mode 100644 index 6b50fa0b4b..0000000000 --- a/tools/pika_migrate/include/pika_binlog_reader.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_BINLOG_READER_H_ -#define PIKA_BINLOG_READER_H_ - -#include -#include - -#include "slash/include/slash_status.h" -#include "slash/include/env.h" -#include "slash/include/slash_slice.h" - -#include "include/pika_binlog.h" - -using slash::Status; -using slash::Slice; - -class PikaBinlogReader { - public: - PikaBinlogReader(uint32_t cur_filenum, uint64_t cur_offset); - PikaBinlogReader(); - ~PikaBinlogReader(); - Status Get(std::string* scratch, uint32_t* filenum, uint64_t* offset); - int Seek(std::shared_ptr logger, uint32_t filenum, uint64_t offset); - bool ReadToTheEnd(); - void GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset); - private: - bool GetNext(uint64_t* size); - unsigned int ReadPhysicalRecord(slash::Slice *redult, uint32_t* filenum, uint64_t* offset); - // Returns scratch binflog and corresponding offset - Status Consume(std::string* scratch, uint32_t* filenum, uint64_t* offset); - - pthread_rwlock_t rwlock_; - uint32_t cur_filenum_; - uint64_t cur_offset_; - uint64_t last_record_offset_; - - std::shared_ptr logger_; - slash::SequentialFile *queue_; - - char* const backing_store_; - Slice buffer_; -}; - -#endif // PIKA_BINLOG_READER_H_ diff --git a/tools/pika_migrate/include/pika_binlog_transverter.h b/tools/pika_migrate/include/pika_binlog_transverter.h deleted file mode 100644 index 14244f55e0..0000000000 --- a/tools/pika_migrate/include/pika_binlog_transverter.h +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_BINLOG_TRANSVERTER_H_ -#define PIKA_BINLOG_TRANSVERTER_H_ - -#include -#include -#include -#include - - -/* - * ***********************************************Type First Binlog Item Format*********************************************** - * | | | | | | | | | - * 2 Bytes 4 Bytes 4 Bytes 8 Bytes 4 Bytes 8 Bytes 4 Bytes content length Bytes - * - */ -#define BINLOG_ENCODE_LEN 34 - -enum BinlogType { - TypeFirst = 1, -}; - - -const int BINLOG_ITEM_HEADER_SIZE = 34; -const int PADDING_BINLOG_PROTOCOL_SIZE = 22; -const int SPACE_STROE_PARAMETER_LENGTH = 5; - -class BinlogItem { - public: - BinlogItem() : - exec_time_(0), - server_id_(0), - logic_id_(0), - filenum_(0), - offset_(0), - content_("") {} - - friend class PikaBinlogTransverter; - - uint32_t exec_time() const; - uint32_t server_id() const; - uint64_t logic_id() const; - uint32_t filenum() const; - uint64_t offset() const; - std::string content() const; - std::string ToString() const; - - void set_exec_time(uint32_t exec_time); - void set_server_id(uint32_t server_id); - void set_logic_id(uint64_t logic_id); - void set_filenum(uint32_t filenum); - void set_offset(uint64_t offset); - - private: - uint32_t exec_time_; - uint32_t server_id_; - uint64_t logic_id_; - uint32_t filenum_; - uint64_t offset_; - std::string content_; - std::vector extends_; -}; - -class PikaBinlogTransverter{ - public: - PikaBinlogTransverter() {}; - static std::string BinlogEncode(BinlogType type, - uint32_t exec_time, - uint32_t server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset, - const std::string& content, - const std::vector& extends); - - static bool BinlogDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item); - - static std::string ConstructPaddingBinlog(BinlogType type, uint32_t size); - - static bool BinlogItemWithoutContentDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_bit.h b/tools/pika_migrate/include/pika_bit.h deleted file mode 100644 index 257e9cb866..0000000000 --- a/tools/pika_migrate/include/pika_bit.h +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_BIT_H_ -#define PIKA_BIT_H_ - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -/* - * bitoperation - */ -class BitGetCmd : public Cmd { - public: - BitGetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitGetCmd(*this); - } - private: - std::string key_; - int64_t bit_offset_; - virtual void Clear() { - key_ = ""; - bit_offset_ = -1; - } - virtual void DoInitial() override; -}; - -class BitSetCmd : public Cmd { - public: - BitSetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitSetCmd(*this); - } - private: - std::string key_; - int64_t bit_offset_; - int64_t on_; - virtual void Clear() { - key_ = ""; - bit_offset_ = -1; - on_ = -1; - } - virtual void DoInitial() override; -}; - -class BitCountCmd : public Cmd { - public: - BitCountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitCountCmd(*this); - } - private: - std::string key_; - bool count_all_; - int64_t start_offset_; - int64_t end_offset_; - virtual void Clear() { - key_ = ""; - count_all_ = false; - start_offset_ = -1; - end_offset_ = -1; - } - virtual void DoInitial() override; -}; - -class BitPosCmd : public Cmd { - public: - BitPosCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitPosCmd(*this); - } - private: - std::string key_; - bool pos_all_; - bool endoffset_set_; - int64_t bit_val_; - int64_t start_offset_; - int64_t end_offset_; - virtual void Clear() { - key_ = ""; - pos_all_ = false; - endoffset_set_ = false; - bit_val_ = -1; - start_offset_ = -1; - end_offset_ = -1; - } - virtual void DoInitial() override; -}; - -class BitOpCmd : public Cmd { - public: - BitOpCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new BitOpCmd(*this); - } - private: - std::string dest_key_; - std::vector src_keys_; - blackwidow::BitOpType op_; - virtual void Clear() { - dest_key_ = ""; - src_keys_.clear(); - op_ = blackwidow::kBitOpDefault; - } - virtual void DoInitial() override; -}; -#endif diff --git a/tools/pika_migrate/include/pika_client_conn.h b/tools/pika_migrate/include/pika_client_conn.h deleted file mode 100644 index 1bbe82ab9e..0000000000 --- a/tools/pika_migrate/include/pika_client_conn.h +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_CLIENT_CONN_H_ -#define PIKA_CLIENT_CONN_H_ - -#include "include/pika_command.h" - -class PikaClientConn: public pink::RedisConn { - public: - struct BgTaskArg { - std::shared_ptr pcc; - std::vector redis_cmds; - std::string* response; - }; - - // Auth related - class AuthStat { - public: - void Init(); - bool IsAuthed(const std::shared_ptr cmd_ptr); - bool ChecknUpdate(const std::string& arg); - private: - enum StatType { - kNoAuthed = 0, - kAdminAuthed, - kLimitAuthed, - }; - StatType stat_; - }; - - PikaClientConn(int fd, std::string ip_port, - pink::Thread *server_thread, - pink::PinkEpoll* pink_epoll, - const pink::HandleType& handle_type); - virtual ~PikaClientConn() {} - - void AsynProcessRedisCmds(const std::vector& argvs, std::string* response) override; - - void BatchExecRedisCmd(const std::vector& argvs, std::string* response); - int DealMessage(const pink::RedisCmdArgsType& argv, std::string* response); - static void DoBackgroundTask(void* arg); - - bool IsPubSub() { return is_pubsub_; } - void SetIsPubSub(bool is_pubsub) { is_pubsub_ = is_pubsub; } - void SetCurrentTable(const std::string& table_name) {current_table_ = table_name;} - - pink::ServerThread* server_thread() { - return server_thread_; - } - - AuthStat& auth_stat() { - return auth_stat_; - } - - private: - pink::ServerThread* const server_thread_; - std::string current_table_; - bool is_pubsub_; - - std::string DoCmd(const PikaCmdArgsType& argv, const std::string& opt); - - void ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t start_us); - void ProcessMonitor(const PikaCmdArgsType& argv); - - AuthStat auth_stat_; -}; - -struct ClientInfo { - int fd; - std::string ip_port; - int64_t last_interaction; - std::shared_ptr conn; -}; - -extern bool AddrCompare(const ClientInfo& lhs, const ClientInfo& rhs); -extern bool IdleCompare(const ClientInfo& lhs, const ClientInfo& rhs); - -#endif diff --git a/tools/pika_migrate/include/pika_cluster.h b/tools/pika_migrate/include/pika_cluster.h deleted file mode 100644 index bb34c37c31..0000000000 --- a/tools/pika_migrate/include/pika_cluster.h +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_CLUSTER_H_ -#define PIKA_CLUSTER_H_ - -#include "include/pika_command.h" - -class PkClusterInfoCmd : public Cmd { - public: - enum InfoSection { - kInfoErr = 0x0, - kInfoSlot - }; - enum InfoRange { - kSingle = 0x0, - kAll - }; - PkClusterInfoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), - info_section_(kInfoErr), info_range_(kAll), partition_id_(0) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PkClusterInfoCmd(*this); - } - - private: - InfoSection info_section_; - InfoRange info_range_; - - std::string table_name_; - uint32_t partition_id_; - - virtual void DoInitial() override; - virtual void Clear() { - info_section_ = kInfoErr; - info_range_ = kAll; - table_name_.clear(); - partition_id_ = 0; - } - const static std::string kSlotSection; - void ClusterInfoSlotAll(std::string* info); - Status GetSlotInfo(const std::string table_name, uint32_t partition_id, std::string* info); - bool ParseInfoSlotSubCmd(); -}; - -class SlotParentCmd : public Cmd { - public: - SlotParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - - protected: - std::set slots_; - std::set p_infos_; - virtual void DoInitial(); - virtual void Clear() { - slots_.clear(); - p_infos_.clear(); - } -}; - -class PkClusterAddSlotsCmd : public SlotParentCmd { - public: - PkClusterAddSlotsCmd(const std::string& name, int arity, uint16_t flag) - : SlotParentCmd(name, arity, flag) {} - virtual Cmd* Clone() override { - return new PkClusterAddSlotsCmd(*this); - } - virtual void Do(std::shared_ptr partition = nullptr); - private: - virtual void DoInitial() override; - Status AddSlotsSanityCheck(const std::string& table_name); -}; - -class PkClusterDelSlotsCmd : public SlotParentCmd { - public: - PkClusterDelSlotsCmd(const std::string& name, int32_t arity, uint16_t flag) - : SlotParentCmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PkClusterDelSlotsCmd(*this); - } - private: - virtual void DoInitial() override; - Status RemoveSlotsSanityCheck(const std::string& table_name); -}; - -class PkClusterSlotsSlaveofCmd : public Cmd { - public: - PkClusterSlotsSlaveofCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PkClusterSlotsSlaveofCmd(*this); - } - private: - std::string ip_; - int64_t port_; - std::set slots_; - bool force_sync_; - bool is_noone_; - virtual void DoInitial() override; - virtual void Clear() { - ip_.clear(); - port_ = 0; - slots_.clear(); - force_sync_ = false; - is_noone_ = false; - } -}; - -#endif // PIKA_CLUSTER_H_ diff --git a/tools/pika_migrate/include/pika_cmd_table_manager.h b/tools/pika_migrate/include/pika_cmd_table_manager.h deleted file mode 100644 index bd87296698..0000000000 --- a/tools/pika_migrate/include/pika_cmd_table_manager.h +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_CMD_TABLE_MANAGER_H_ -#define PIKA_CMD_TABLE_MANAGER_H_ - -#include "include/pika_command.h" -#include "include/pika_data_distribution.h" - - -class PikaCmdTableManager { - public: - PikaCmdTableManager(); - virtual ~PikaCmdTableManager(); - std::shared_ptr GetCmd(const std::string& opt); - uint32_t DistributeKey(const std::string& key, uint32_t partition_num); - private: - std::shared_ptr NewCommand(const std::string& opt); - - void InsertCurrentThreadDistributionMap(); - bool CheckCurrentThreadDistributionMapExist(const pid_t& tid); - - void TryChangeToAlias(std::string *internal_opt); - - CmdTable* cmds_; - - pthread_rwlock_t map_protector_; - std::unordered_map thread_distribution_map_; -}; -#endif diff --git a/tools/pika_migrate/include/pika_command.h b/tools/pika_migrate/include/pika_command.h deleted file mode 100644 index dec0b50924..0000000000 --- a/tools/pika_migrate/include/pika_command.h +++ /dev/null @@ -1,486 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_COMMAND_H_ -#define PIKA_COMMAND_H_ - -#include - -#include "pink/include/redis_conn.h" -#include "pink/include/pink_conn.h" -#include "slash/include/slash_string.h" - -#include "include/pika_partition.h" - -//Constant for command name -//Admin -const std::string kCmdNameSlaveof = "slaveof"; -const std::string kCmdNameDbSlaveof = "dbslaveof"; -const std::string kCmdNameAuth = "auth"; -const std::string kCmdNameBgsave = "bgsave"; -const std::string kCmdNameCompact = "compact"; -const std::string kCmdNamePurgelogsto = "purgelogsto"; -const std::string kCmdNamePing = "ping"; -const std::string kCmdNameSelect = "select"; -const std::string kCmdNameFlushall = "flushall"; -const std::string kCmdNameFlushdb = "flushdb"; -const std::string kCmdNameClient = "client"; -const std::string kCmdNameShutdown = "shutdown"; -const std::string kCmdNameInfo = "info"; -const std::string kCmdNameConfig = "config"; -const std::string kCmdNameMonitor = "monitor"; -const std::string kCmdNameDbsize = "dbsize"; -const std::string kCmdNameTime = "time"; -const std::string kCmdNameDelbackup = "delbackup"; -const std::string kCmdNameEcho = "echo"; -const std::string kCmdNameScandb = "scandb"; -const std::string kCmdNameSlowlog = "slowlog"; -const std::string kCmdNamePadding = "padding"; -#ifdef TCMALLOC_EXTENSION -const std::string kCmdNameTcmalloc = "tcmalloc"; -#endif -const std::string kCmdNamePKPatternMatchDel = "pkpatternmatchdel"; - -//Kv -const std::string kCmdNameSet = "set"; -const std::string kCmdNameGet = "get"; -const std::string kCmdNameDel = "del"; -const std::string kCmdNameIncr = "incr"; -const std::string kCmdNameIncrby = "incrby"; -const std::string kCmdNameIncrbyfloat = "incrbyfloat"; -const std::string kCmdNameDecr = "decr"; -const std::string kCmdNameDecrby = "decrby"; -const std::string kCmdNameGetset = "getset"; -const std::string kCmdNameAppend = "append"; -const std::string kCmdNameMget = "mget"; -const std::string kCmdNameKeys = "keys"; -const std::string kCmdNameSetnx = "setnx"; -const std::string kCmdNameSetex = "setex"; -const std::string kCmdNamePsetex = "psetex"; -const std::string kCmdNameDelvx = "delvx"; -const std::string kCmdNameMset = "mset"; -const std::string kCmdNameMsetnx = "msetnx"; -const std::string kCmdNameGetrange = "getrange"; -const std::string kCmdNameSetrange = "setrange"; -const std::string kCmdNameStrlen = "strlen"; -const std::string kCmdNameExists = "exists"; -const std::string kCmdNameExpire = "expire"; -const std::string kCmdNamePexpire = "pexpire"; -const std::string kCmdNameExpireat = "expireat"; -const std::string kCmdNamePexpireat = "pexpireat"; -const std::string kCmdNameTtl = "ttl"; -const std::string kCmdNamePttl = "pttl"; -const std::string kCmdNamePersist = "persist"; -const std::string kCmdNameType = "type"; -const std::string kCmdNameScan = "scan"; -const std::string kCmdNameScanx = "scanx"; -const std::string kCmdNamePKSetexAt = "pksetexat"; -const std::string kCmdNamePKScanRange = "pkscanrange"; -const std::string kCmdNamePKRScanRange = "pkrscanrange"; - -//Hash -const std::string kCmdNameHDel = "hdel"; -const std::string kCmdNameHSet = "hset"; -const std::string kCmdNameHGet = "hget"; -const std::string kCmdNameHGetall = "hgetall"; -const std::string kCmdNameHExists = "hexists"; -const std::string kCmdNameHIncrby = "hincrby"; -const std::string kCmdNameHIncrbyfloat = "hincrbyfloat"; -const std::string kCmdNameHKeys = "hkeys"; -const std::string kCmdNameHLen = "hlen"; -const std::string kCmdNameHMget = "hmget"; -const std::string kCmdNameHMset = "hmset"; -const std::string kCmdNameHSetnx = "hsetnx"; -const std::string kCmdNameHStrlen = "hstrlen"; -const std::string kCmdNameHVals = "hvals"; -const std::string kCmdNameHScan = "hscan"; -const std::string kCmdNameHScanx = "hscanx"; -const std::string kCmdNamePKHScanRange = "pkhscanrange"; -const std::string kCmdNamePKHRScanRange = "pkhrscanrange"; - -//List -const std::string kCmdNameLIndex = "lindex"; -const std::string kCmdNameLInsert = "linsert"; -const std::string kCmdNameLLen = "llen"; -const std::string kCmdNameLPop = "lpop"; -const std::string kCmdNameLPush = "lpush"; -const std::string kCmdNameLPushx = "lpushx"; -const std::string kCmdNameLRange = "lrange"; -const std::string kCmdNameLRem = "lrem"; -const std::string kCmdNameLSet = "lset"; -const std::string kCmdNameLTrim = "ltrim"; -const std::string kCmdNameRPop = "rpop"; -const std::string kCmdNameRPopLPush = "rpoplpush"; -const std::string kCmdNameRPush = "rpush"; -const std::string kCmdNameRPushx = "rpushx"; - -//BitMap -const std::string kCmdNameBitSet = "setbit"; -const std::string kCmdNameBitGet = "getbit"; -const std::string kCmdNameBitPos = "bitpos"; -const std::string kCmdNameBitOp = "bitop"; -const std::string kCmdNameBitCount = "bitcount"; - -//Zset -const std::string kCmdNameZAdd = "zadd"; -const std::string kCmdNameZCard = "zcard"; -const std::string kCmdNameZScan = "zscan"; -const std::string kCmdNameZIncrby = "zincrby"; -const std::string kCmdNameZRange = "zrange"; -const std::string kCmdNameZRangebyscore = "zrangebyscore"; -const std::string kCmdNameZCount = "zcount"; -const std::string kCmdNameZRem = "zrem"; -const std::string kCmdNameZUnionstore = "zunionstore"; -const std::string kCmdNameZInterstore = "zinterstore"; -const std::string kCmdNameZRank = "zrank"; -const std::string kCmdNameZRevrank = "zrevrank"; -const std::string kCmdNameZScore = "zscore"; -const std::string kCmdNameZRevrange = "zrevrange"; -const std::string kCmdNameZRevrangebyscore = "zrevrangebyscore"; -const std::string kCmdNameZRangebylex = "zrangebylex"; -const std::string kCmdNameZRevrangebylex = "zrevrangebylex"; -const std::string kCmdNameZLexcount = "zlexcount"; -const std::string kCmdNameZRemrangebyrank = "zremrangebyrank"; -const std::string kCmdNameZRemrangebylex = "zremrangebylex"; -const std::string kCmdNameZRemrangebyscore = "zremrangebyscore"; -const std::string kCmdNameZPopmax = "zpopmax"; -const std::string kCmdNameZPopmin = "zpopmin"; - -//Set -const std::string kCmdNameSAdd = "sadd"; -const std::string kCmdNameSPop = "spop"; -const std::string kCmdNameSCard = "scard"; -const std::string kCmdNameSMembers = "smembers"; -const std::string kCmdNameSScan = "sscan"; -const std::string kCmdNameSRem = "srem"; -const std::string kCmdNameSUnion = "sunion"; -const std::string kCmdNameSUnionstore = "sunionstore"; -const std::string kCmdNameSInter = "sinter"; -const std::string kCmdNameSInterstore = "sinterstore"; -const std::string kCmdNameSIsmember = "sismember"; -const std::string kCmdNameSDiff = "sdiff"; -const std::string kCmdNameSDiffstore = "sdiffstore"; -const std::string kCmdNameSMove = "smove"; -const std::string kCmdNameSRandmember = "srandmember"; - -//HyperLogLog -const std::string kCmdNamePfAdd = "pfadd"; -const std::string kCmdNamePfCount = "pfcount"; -const std::string kCmdNamePfMerge = "pfmerge"; - -//GEO -const std::string kCmdNameGeoAdd = "geoadd"; -const std::string kCmdNameGeoPos = "geopos"; -const std::string kCmdNameGeoDist = "geodist"; -const std::string kCmdNameGeoHash = "geohash"; -const std::string kCmdNameGeoRadius = "georadius"; -const std::string kCmdNameGeoRadiusByMember = "georadiusbymember"; - -//Pub/Sub -const std::string kCmdNamePublish = "publish"; -const std::string kCmdNameSubscribe = "subscribe"; -const std::string kCmdNameUnSubscribe = "unsubscribe"; -const std::string kCmdNamePubSub = "pubsub"; -const std::string kCmdNamePSubscribe = "psubscribe"; -const std::string kCmdNamePUnSubscribe = "punsubscribe"; - -//Codis Slots -const std::string kCmdNameSlotsInfo = "slotsinfo"; -const std::string kCmdNameSlotsHashKey = "slotshashkey"; -const std::string kCmdNameSlotsMgrtTagSlotAsync = "slotsmgrttagslot-async"; -const std::string kCmdNameSlotsMgrtSlotAsync = "slotsmgrtslot-async"; -const std::string kCmdNameSlotsDel = "slotsdel"; -const std::string kCmdNameSlotsScan = "slotsscan"; -const std::string kCmdNameSlotsMgrtExecWrapper = "slotsmgrt-exec-wrapper"; -const std::string kCmdNameSlotsMgrtAsyncStatus = "slotsmgrt-async-status"; -const std::string kCmdNameSlotsMgrtAsyncCancel = "slotsmgrt-async-cancel"; -const std::string kCmdNameSlotsMgrtSlot = "slotsmgrtslot"; -const std::string kCmdNameSlotsMgrtTagSlot = "slotsmgrttagslot"; -const std::string kCmdNameSlotsMgrtOne = "slotsmgrtone"; -const std::string kCmdNameSlotsMgrtTagOne = "slotsmgrttagone"; - - -//Cluster -const std::string kCmdNamePkClusterInfo = "pkclusterinfo"; -const std::string kCmdNamePkClusterAddSlots = "pkclusteraddslots"; -const std::string kCmdNamePkClusterDelSlots = "pkclusterdelslots"; -const std::string kCmdNamePkClusterSlotsSlaveof = "pkclusterslotsslaveof"; - -const std::string kClusterPrefix = "pkcluster"; -typedef pink::RedisCmdArgsType PikaCmdArgsType; -static const int RAW_ARGS_LEN = 1024 * 1024; - -enum CmdFlagsMask { - kCmdFlagsMaskRW = 1, - kCmdFlagsMaskType = 30, - kCmdFlagsMaskLocal = 32, - kCmdFlagsMaskSuspend = 64, - kCmdFlagsMaskPrior = 128, - kCmdFlagsMaskAdminRequire = 256, - kCmdFlagsMaskPartition = 1536 -}; - -enum CmdFlags { - kCmdFlagsRead = 0, //default rw - kCmdFlagsWrite = 1, - kCmdFlagsAdmin = 0, //default type - kCmdFlagsKv = 2, - kCmdFlagsHash = 4, - kCmdFlagsList = 6, - kCmdFlagsSet = 8, - kCmdFlagsZset = 10, - kCmdFlagsBit = 12, - kCmdFlagsHyperLogLog = 14, - kCmdFlagsGeo = 16, - kCmdFlagsPubSub = 18, - kCmdFlagsNoLocal = 0, //default nolocal - kCmdFlagsLocal = 32, - kCmdFlagsNoSuspend = 0, //default nosuspend - kCmdFlagsSuspend = 64, - kCmdFlagsNoPrior = 0, //default noprior - kCmdFlagsPrior = 128, - kCmdFlagsNoAdminRequire = 0, //default no need admin - kCmdFlagsAdminRequire = 256, - kCmdFlagsDoNotSpecifyPartition = 0, //default do not specify partition - kCmdFlagsSinglePartition = 512, - kCmdFlagsMultiPartition = 1024 -}; - - -void inline RedisAppendContent(std::string& str, const std::string& value); -void inline RedisAppendLen(std::string& str, int64_t ori, const std::string &prefix); - -const std::string kNewLine = "\r\n"; - -class CmdRes { -public: - enum CmdRet { - kNone = 0, - kOk, - kPong, - kSyntaxErr, - kInvalidInt, - kInvalidBitInt, - kInvalidBitOffsetInt, - kInvalidBitPosArgument, - kWrongBitOpNotNum, - kInvalidFloat, - kOverFlow, - kNotFound, - kOutOfRange, - kInvalidPwd, - kNoneBgsave, - kPurgeExist, - kInvalidParameter, - kWrongNum, - kInvalidIndex, - kInvalidDbType, - kInvalidTable, - kErrOther, - }; - - CmdRes():ret_(kNone) {} - - bool none() const { - return ret_ == kNone && message_.empty(); - } - bool ok() const { - return ret_ == kOk || ret_ == kNone; - } - void clear() { - message_.clear(); - ret_ = kNone; - } - std::string raw_message() const { - return message_; - } - std::string message() const { - std::string result; - switch (ret_) { - case kNone: - return message_; - case kOk: - return "+OK\r\n"; - case kPong: - return "+PONG\r\n"; - case kSyntaxErr: - return "-ERR syntax error\r\n"; - case kInvalidInt: - return "-ERR value is not an integer or out of range\r\n"; - case kInvalidBitInt: - return "-ERR bit is not an integer or out of range\r\n"; - case kInvalidBitOffsetInt: - return "-ERR bit offset is not an integer or out of range\r\n"; - case kWrongBitOpNotNum: - return "-ERR BITOP NOT must be called with a single source key.\r\n"; - - case kInvalidBitPosArgument: - return "-ERR The bit argument must be 1 or 0.\r\n"; - case kInvalidFloat: - return "-ERR value is not a valid float\r\n"; - case kOverFlow: - return "-ERR increment or decrement would overflow\r\n"; - case kNotFound: - return "-ERR no such key\r\n"; - case kOutOfRange: - return "-ERR index out of range\r\n"; - case kInvalidPwd: - return "-ERR invalid password\r\n"; - case kNoneBgsave: - return "-ERR No BGSave Works now\r\n"; - case kPurgeExist: - return "-ERR binlog already in purging...\r\n"; - case kInvalidParameter: - return "-ERR Invalid Argument\r\n"; - case kWrongNum: - result = "-ERR wrong number of arguments for '"; - result.append(message_); - result.append("' command\r\n"); - break; - case kInvalidIndex: - result = "-ERR invalid DB index for '"; - result.append(message_); - result.append("'\r\n"); - break; - case kInvalidDbType: - result = "-ERR invalid DB for '"; - result.append(message_); - result.append("'\r\n"); - break; - case kInvalidTable: - result = "-ERR invalid Table for '"; - result.append(message_); - result.append("'\r\n"); - break; - case kErrOther: - result = "-ERR "; - result.append(message_); - result.append(kNewLine); - break; - default: - break; - } - return result; - } - - // Inline functions for Create Redis protocol - void AppendStringLen(int64_t ori) { - RedisAppendLen(message_, ori, "$"); - } - void AppendArrayLen(int64_t ori) { - RedisAppendLen(message_, ori, "*"); - } - void AppendInteger(int64_t ori) { - RedisAppendLen(message_, ori, ":"); - } - void AppendContent(const std::string& value) { - RedisAppendContent(message_, value); - } - void AppendString(const std::string& value) { - AppendStringLen(value.size()); - AppendContent(value); - } - void AppendStringRaw(const std::string& value) { - message_.append(value); - } - void SetRes(CmdRet _ret, const std::string content = "") { - ret_ = _ret; - if (!content.empty()) { - message_ = content; - } - } - -private: - std::string message_; - CmdRet ret_; -}; - -class Cmd { - public: - Cmd(const std::string& name, int arity, uint16_t flag) - : name_(name), arity_(arity), flag_(flag) {} - virtual ~Cmd() {} - - virtual std::vector current_key() const; - virtual void Execute(); - virtual void ProcessFlushDBCmd(); - virtual void ProcessFlushAllCmd(); - virtual void ProcessSinglePartitionCmd(); - virtual void ProcessMultiPartitionCmd(); - virtual void ProcessDoNotSpecifyPartitionCmd(); - virtual void Do(std::shared_ptr partition = nullptr) = 0; - virtual Cmd* Clone() = 0; - - void Initial(const PikaCmdArgsType& argv, - const std::string& table_name); - - bool is_write() const; - bool is_local() const; - bool is_suspend() const; - bool is_admin_require() const; - bool is_single_partition() const; - bool is_multi_partition() const; - - std::string name() const; - CmdRes& res(); - - virtual std::string ToBinlog(uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset); - - void SetConn(const std::shared_ptr conn); - std::shared_ptr GetConn(); - - protected: - // enable copy, used default copy - //Cmd(const Cmd&); - void ProcessCommand(std::shared_ptr partition); - void DoCommand(std::shared_ptr partition); - void DoBinlog(std::shared_ptr partition); - bool CheckArg(int num) const; - void LogCommand() const; - - std::string name_; - int arity_; - uint16_t flag_; - - CmdRes res_; - PikaCmdArgsType argv_; - std::string table_name_; - - std::weak_ptr conn_; - - private: - virtual void DoInitial() = 0; - virtual void Clear() {}; - - Cmd& operator=(const Cmd&); -}; - -typedef std::unordered_map CmdTable; - -// Method for Cmd Table -void InitCmdTable(CmdTable* cmd_table); -Cmd* GetCmdFromTable(const std::string& opt, const CmdTable& cmd_table); -void DestoryCmdTable(CmdTable* cmd_table); - -void RedisAppendContent(std::string& str, const std::string& value) { - str.append(value.data(), value.size()); - str.append(kNewLine); -} - -void RedisAppendLen(std::string& str, int64_t ori, const std::string &prefix) { - char buf[32]; - slash::ll2string(buf, 32, static_cast(ori)); - str.append(prefix); - str.append(buf); - str.append(kNewLine); -} - -void TryAliasChange(std::vector* argv); - -#endif diff --git a/tools/pika_migrate/include/pika_conf.h b/tools/pika_migrate/include/pika_conf.h deleted file mode 100644 index 83149be514..0000000000 --- a/tools/pika_migrate/include/pika_conf.h +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_CONF_H_ -#define PIKA_CONF_H_ - -#include -#include -#include -#include - -#include "slash/include/base_conf.h" -#include "slash/include/slash_mutex.h" -#include "slash/include/slash_string.h" - -#include "include/pika_define.h" -#include "include/pika_meta.h" - -#define kBinlogReadWinDefaultSize 9000 -#define kBinlogReadWinMaxSize 90000 - -typedef slash::RWLock RWLock; - -// global class, class members well initialized -class PikaConf : public slash::BaseConf { - public: - PikaConf(const std::string& path); - ~PikaConf(); - - // Getter - int port() { RWLock l(&rwlock_, false); return port_; } - std::string slaveof() { RWLock l(&rwlock_, false); return slaveof_;} - int slave_priority() { RWLock l(&rwlock_, false); return slave_priority_;} - bool write_binlog() { RWLock l(&rwlock_, false); return write_binlog_;} - int thread_num() { RWLock l(&rwlock_, false); return thread_num_; } - int thread_pool_size() { RWLock l(&rwlock_, false); return thread_pool_size_; } - int sync_thread_num() { RWLock l(&rwlock_, false); return sync_thread_num_; } - std::string log_path() { RWLock l(&rwlock_, false); return log_path_; } - std::string db_path() { RWLock l(&rwlock_, false); return db_path_; } - std::string db_sync_path() { RWLock l(&rwlock_, false); return db_sync_path_; } - int db_sync_speed() { RWLock l(&rwlock_, false); return db_sync_speed_; } - std::string compact_cron() { RWLock l(&rwlock_, false); return compact_cron_; } - std::string compact_interval() { RWLock l(&rwlock_, false); return compact_interval_; } - int64_t write_buffer_size() { RWLock l(&rwlock_, false); return write_buffer_size_; } - int64_t max_write_buffer_size() { RWLock l(&rwlock_, false); return max_write_buffer_size_; } - int64_t max_client_response_size() { RWLock L(&rwlock_, false); return max_client_response_size_;} - int timeout() { RWLock l(&rwlock_, false); return timeout_; } - std::string server_id() { RWLock l(&rwlock_, false); return server_id_; } - std::string requirepass() { RWLock l(&rwlock_, false); return requirepass_; } - std::string masterauth() { RWLock l(&rwlock_, false); return masterauth_; } - std::string bgsave_path() { RWLock l(&rwlock_, false); return bgsave_path_; } - int expire_dump_days() { RWLock l(&rwlock_, false); return expire_dump_days_; } - std::string bgsave_prefix() { RWLock l(&rwlock_, false); return bgsave_prefix_; } - std::string userpass() { RWLock l(&rwlock_, false); return userpass_; } - const std::string suser_blacklist() { RWLock l(&rwlock_, false); return slash::StringConcat(user_blacklist_, COMMA); } - const std::vector& vuser_blacklist() { RWLock l(&rwlock_, false); return user_blacklist_;} - bool classic_mode() { return classic_mode_.load();} - int databases() { RWLock l(&rwlock_, false); return databases_;} - int default_slot_num() { RWLock l(&rwlock_, false); return default_slot_num_;} - const std::vector& table_structs() { RWLock l(&rwlock_, false); return table_structs_; } - std::string default_table() { RWLock l(&rwlock_, false); return default_table_;} - std::string compression() { RWLock l(&rwlock_, false); return compression_; } - int target_file_size_base() { RWLock l(&rwlock_, false); return target_file_size_base_; } - int max_cache_statistic_keys() { RWLock l(&rwlock_, false); return max_cache_statistic_keys_;} - int small_compaction_threshold() { RWLock l(&rwlock_, false); return small_compaction_threshold_;} - int max_background_flushes() { RWLock l(&rwlock_, false); return max_background_flushes_; } - int max_background_compactions() { RWLock l(&rwlock_, false); return max_background_compactions_; } - int max_cache_files() { RWLock l(&rwlock_, false); return max_cache_files_; } - int max_bytes_for_level_multiplier() { RWLock l(&rwlock_, false); return max_bytes_for_level_multiplier_; } - int64_t block_size() { RWLock l(&rwlock_, false); return block_size_; } - int64_t block_cache() { RWLock l(&rwlock_, false); return block_cache_; } - bool share_block_cache() { RWLock l(&rwlock_, false); return share_block_cache_; } - bool cache_index_and_filter_blocks() { RWLock l(&rwlock_, false); return cache_index_and_filter_blocks_; } - bool optimize_filters_for_hits() { RWLock l(&rwlock_, false); return optimize_filters_for_hits_; } - bool level_compaction_dynamic_level_bytes() { RWLock l(&rwlock_, false); return level_compaction_dynamic_level_bytes_; } - int expire_logs_nums() { RWLock l(&rwlock_, false); return expire_logs_nums_; } - int expire_logs_days() { RWLock l(&rwlock_, false); return expire_logs_days_; } - std::string conf_path() { RWLock l(&rwlock_, false); return conf_path_; } - bool slave_read_only() { RWLock l(&rwlock_, false); return slave_read_only_; } - int maxclients() { RWLock l(&rwlock_, false); return maxclients_; } - int root_connection_num() { RWLock l(&rwlock_, false); return root_connection_num_; } - bool slowlog_write_errorlog() { return slowlog_write_errorlog_.load();} - int slowlog_slower_than() { return slowlog_log_slower_than_.load(); } - int slowlog_max_len() { RWLock L(&rwlock_, false); return slowlog_max_len_; } - std::string network_interface() { RWLock l(&rwlock_, false); return network_interface_; } - int sync_window_size() { return sync_window_size_.load(); } - - std::string target_redis_host() { return target_redis_host_; } - int target_redis_port() { return target_redis_port_; } - std::string target_redis_pwd() { return target_redis_pwd_; } - int sync_batch_num() { return sync_batch_num_; } - int redis_sender_num() { return redis_sender_num_; } - - // Immutable config items, we don't use lock. - bool daemonize() { return daemonize_; } - std::string pidfile() { return pidfile_; } - int binlog_file_size() { return binlog_file_size_; } - - // Setter - void SetPort(const int value) { - RWLock l(&rwlock_, true); - port_ = value; - } - void SetThreadNum(const int value) { - RWLock l(&rwlock_, true); - thread_num_ = value; - } - void SetTimeout(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("timeout", std::to_string(value)); - timeout_ = value; - } - void SetThreadPoolSize(const int value) { - RWLock l(&rwlock_, true); - thread_pool_size_ = value; - } - void SetSlaveof(const std::string value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("slaveof", value); - slaveof_ = value; - } - void SetSlavePriority(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("slave-priority", std::to_string(value)); - slave_priority_ = value; - } - void SetWriteBinlog(const std::string& value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("write-binlog", value); - write_binlog_ = (value == "yes") ? true : false; - } - void SetMaxCacheStatisticKeys(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("max-cache-statistic-keys", std::to_string(value)); - max_cache_statistic_keys_ = value; - } - void SetSmallCompactionThreshold(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("small-compaction-threshold", std::to_string(value)); - small_compaction_threshold_ = value; - } - void SetMaxClientResponseSize(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("max-client-response-size", std::to_string(value)); - max_client_response_size_ = value; - } - void SetBgsavePath(const std::string &value) { - RWLock l(&rwlock_, true); - bgsave_path_ = value; - if (value[value.length() - 1] != '/') { - bgsave_path_ += "/"; - } - } - void SetExpireDumpDays(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("dump-expire", std::to_string(value)); - expire_dump_days_ = value; - } - void SetBgsavePrefix(const std::string &value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("dump-prefix", value); - bgsave_prefix_ = value; - } - void SetRequirePass(const std::string &value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("requirepass", value); - requirepass_ = value; - } - void SetMasterAuth(const std::string &value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("masterauth", value); - masterauth_ = value; - } - void SetUserPass(const std::string &value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("userpass", value); - userpass_ = value; - } - void SetUserBlackList(const std::string &value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("userblacklist", value); - slash::StringSplit(value, COMMA, user_blacklist_); - for (auto& item : user_blacklist_) { - slash::StringToLower(item); - } - } - void SetExpireLogsNums(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("expire-logs-nums", std::to_string(value)); - expire_logs_nums_ = value; - } - void SetExpireLogsDays(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("expire-logs-days", std::to_string(value)); - expire_logs_days_ = value; - } - void SetMaxConnection(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("maxclients", std::to_string(value)); - maxclients_ = value; - } - void SetRootConnectionNum(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("root-connection-num", std::to_string(value)); - root_connection_num_ = value; - } - void SetSlowlogWriteErrorlog(const bool value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("slowlog-write-errorlog", value == true ? "yes" : "no"); - slowlog_write_errorlog_.store(value); - } - void SetSlowlogSlowerThan(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("slowlog-log-slower-than", std::to_string(value)); - slowlog_log_slower_than_.store(value); - } - void SetSlowlogMaxLen(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("slowlog-max-len", std::to_string(value)); - slowlog_max_len_ = value; - } - void SetDbSyncSpeed(const int value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("db-sync-speed", std::to_string(value)); - db_sync_speed_ = value; - } - void SetCompactCron(const std::string &value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("compact-cron", value); - compact_cron_ = value; - } - void SetCompactInterval(const std::string &value) { - RWLock l(&rwlock_, true); - TryPushDiffCommands("compact-interval", value); - compact_interval_ = value; - } - void SetSyncWindowSize(const int &value) { - TryPushDiffCommands("sync-window-size", std::to_string(value)); - sync_window_size_.store(value); - } - - Status TablePartitionsSanityCheck(const std::string& table_name, - const std::set& partition_ids, - bool is_add); - Status AddTablePartitions(const std::string& table_name, - const std::set& partition_ids); - Status RemoveTablePartitions(const std::string& table_name, - const std::set& partition_ids); - - int Load(); - int ConfigRewrite(); - - private: - Status InternalGetTargetTable(const std::string& table_name, - uint32_t* const target); - - int port_; - std::string slaveof_; - int slave_priority_; - int thread_num_; - int thread_pool_size_; - int sync_thread_num_; - std::string log_path_; - std::string db_path_; - std::string db_sync_path_; - int expire_dump_days_; - int db_sync_speed_; - std::string compact_cron_; - std::string compact_interval_; - int64_t write_buffer_size_; - int64_t max_write_buffer_size_; - int64_t max_client_response_size_; - bool daemonize_; - int timeout_; - std::string server_id_; - std::string requirepass_; - std::string masterauth_; - std::string userpass_; - std::vector user_blacklist_; - std::atomic classic_mode_; - int databases_; - int default_slot_num_; - std::vector table_structs_; - std::string default_table_; - std::string bgsave_path_; - std::string bgsave_prefix_; - std::string pidfile_; - - std::string compression_; - int maxclients_; - int root_connection_num_; - std::atomic slowlog_write_errorlog_; - std::atomic slowlog_log_slower_than_; - int slowlog_max_len_; - int expire_logs_days_; - int expire_logs_nums_; - bool slave_read_only_; - std::string conf_path_; - int max_cache_statistic_keys_; - int small_compaction_threshold_; - int max_background_flushes_; - int max_background_compactions_; - int max_cache_files_; - int max_bytes_for_level_multiplier_; - int64_t block_size_; - int64_t block_cache_; - bool share_block_cache_; - bool cache_index_and_filter_blocks_; - bool optimize_filters_for_hits_; - bool level_compaction_dynamic_level_bytes_; - std::atomic sync_window_size_; - - std::string network_interface_; - - // diff commands between cached commands and config file commands - std::map diff_commands_; - void TryPushDiffCommands(const std::string& command, const std::string& value); - - // migrate configure items - std::string target_redis_host_; - int target_redis_port_; - std::string target_redis_pwd_; - int sync_batch_num_; - int redis_sender_num_; - - // - // Critical configure items - // - bool write_binlog_; - int target_file_size_base_; - int binlog_file_size_; - - PikaMeta* local_meta_; - - pthread_rwlock_t rwlock_; -}; - -#endif diff --git a/tools/pika_migrate/include/pika_data_distribution.h b/tools/pika_migrate/include/pika_data_distribution.h deleted file mode 100644 index 19128e3704..0000000000 --- a/tools/pika_migrate/include/pika_data_distribution.h +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_DATA_DISTRIBUTION_H_ -#define PIKA_DATA_DISTRIBUTION_H_ - -#include "slash/include/slash_status.h" - -// polynomial reserved Crc32 magic num -const uint32_t IEEE_POLY = 0xedb88320; - -class PikaDataDistribution { - public: - virtual ~PikaDataDistribution() = default; - // Initialization - virtual void Init() = 0; - // key map to partition id - virtual uint32_t Distribute(const std::string& str, uint32_t partition_num) = 0; -}; - -class HashModulo : public PikaDataDistribution { - public: - virtual ~HashModulo() = default; - virtual void Init(); - virtual uint32_t Distribute(const std::string& str, uint32_t partition_num); -}; - -class Crc32 : public PikaDataDistribution { - public: - virtual void Init(); - virtual uint32_t Distribute(const std::string& str, uint32_t partition_num); - private: - void Crc32TableInit(uint32_t poly); - uint32_t Crc32Update(uint32_t crc, const char* buf, int len); - uint32_t crc32tab[256]; -}; - -#endif diff --git a/tools/pika_migrate/include/pika_define.h b/tools/pika_migrate/include/pika_define.h deleted file mode 100644 index 4610a84df0..0000000000 --- a/tools/pika_migrate/include/pika_define.h +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_DEFINE_H_ -#define PIKA_DEFINE_H_ - -#include -#include - -#include "pink/include/redis_cli.h" - -#define PIKA_SYNC_BUFFER_SIZE 10 -#define PIKA_MAX_WORKER_THREAD_NUM 24 -#define PIKA_REPL_SERVER_TP_SIZE 3 -#define PIKA_META_SYNC_MAX_WAIT_TIME 10 -#define PIKA_SCAN_STEP_LENGTH 1000 - -class PikaServer; - -/* Port shift */ -const int kPortShiftRSync = 1000; -const int kPortShiftReplServer = 2000; - -const std::string kPikaPidFile = "pika.pid"; -const std::string kPikaSecretFile = "rsync.secret"; -const std::string kDefaultRsyncAuth = "default"; - -struct TableStruct { - TableStruct(const std::string& tn, - const uint32_t pn, - const std::set& pi) - : table_name(tn), partition_num(pn), partition_ids(pi) {} - - bool operator == (const TableStruct& table_struct) const { - return table_name == table_struct.table_name - && partition_num == table_struct.partition_num - && partition_ids == table_struct.partition_ids; - } - std::string table_name; - uint32_t partition_num; - std::set partition_ids; -}; - -struct WorkerCronTask { - int task; - std::string ip_port; -}; -typedef WorkerCronTask MonitorCronTask; -//task define -#define TASK_KILL 0 -#define TASK_KILLALL 1 - -//slave item -struct SlaveItem { - std::string ip_port; - std::string ip; - int port; - int conn_fd; - int stage; - std::vector table_structs; - struct timeval create_time; -}; - -enum ReplState { - kNoConnect = 0, - kTryConnect = 1, - kTryDBSync = 2, - kWaitDBSync = 3, - kWaitReply = 4, - kConnected = 5, - kError = 6 -}; - -// debug only -const std::string ReplStateMsg[] = { - "kNoConnect", - "kTryConnect", - "kTryDBSync", - "kWaitDBSync", - "kWaitReply", - "kConnected", - "kError" -}; - -enum SlotState { - INFREE = 0, - INBUSY = 1, -}; - -struct BinlogOffset { - uint32_t filenum; - uint64_t offset; - BinlogOffset() - : filenum(0), offset(0) {} - BinlogOffset(uint32_t num, uint64_t off) - : filenum(num), offset(off) {} - BinlogOffset(const BinlogOffset& other) { - filenum = other.filenum; - offset = other.offset; - } - std::string ToString() const { - return "filenum: " + std::to_string(filenum) + " offset: " + std::to_string(offset); - } - bool operator==(const BinlogOffset& other) const { - if (filenum == other.filenum && offset == other.offset) { - return true; - } - return false; - } -}; - -//dbsync arg -struct DBSyncArg { - PikaServer* p; - std::string ip; - int port; - std::string table_name; - uint32_t partition_id; - DBSyncArg(PikaServer* const _p, - const std::string& _ip, - int _port, - const std::string& _table_name, - uint32_t _partition_id) - : p(_p), ip(_ip), port(_port), - table_name(_table_name), partition_id(_partition_id) {} -}; - -// rm define -enum SlaveState { - kSlaveNotSync = 0, - kSlaveDbSync = 1, - kSlaveBinlogSync = 2, -}; - -// debug only -const std::string SlaveStateMsg[] = { - "SlaveNotSync", - "SlaveDbSync", - "SlaveBinlogSync" -}; - -enum BinlogSyncState { - kNotSync = 0, - kReadFromCache = 1, - kReadFromFile = 2, -}; - -// debug only -const std::string BinlogSyncStateMsg[] = { - "NotSync", - "ReadFromCache", - "ReadFromFile" -}; - -struct BinlogChip { - BinlogOffset offset_; - std::string binlog_; - BinlogChip(BinlogOffset offset, std::string binlog) : offset_(offset), binlog_(binlog) { - } - BinlogChip(const BinlogChip& binlog_chip) { - offset_ = binlog_chip.offset_; - binlog_ = binlog_chip.binlog_; - } -}; - -struct PartitionInfo { - PartitionInfo(const std::string& table_name, uint32_t partition_id) - : table_name_(table_name), partition_id_(partition_id) { - } - PartitionInfo() : partition_id_(0) { - } - bool operator==(const PartitionInfo& other) const { - if (table_name_ == other.table_name_ - && partition_id_ == other.partition_id_) { - return true; - } - return false; - } - int operator<(const PartitionInfo& other) const { - int ret = strcmp(table_name_.data(), other.table_name_.data()); - if (!ret) { - if (partition_id_ < other.partition_id_) { - ret = -1; - } else if (partition_id_ > other.partition_id_) { - ret = 1; - } else { - ret = 0; - } - } - return ret; - } - std::string ToString() const { - return "(" + table_name_ + ":" + std::to_string(partition_id_) + ")"; - } - std::string table_name_; - uint32_t partition_id_; -}; - -struct hash_partition_info { - size_t operator()(const PartitionInfo& n) const { - return std::hash()(n.table_name_) ^ std::hash()(n.partition_id_); - } -}; - -class Node { - public: - Node(const std::string& ip, int port) : ip_(ip), port_(port) { - } - virtual ~Node() = default; - Node() : port_(0) { - } - const std::string& Ip() const { - return ip_; - } - int Port() const { - return port_; - } - std::string ToString() const { - return ip_ + ":" + std::to_string(port_); - } - private: - std::string ip_; - int port_; -}; - -class RmNode : public Node { - public: - RmNode(const std::string& ip, int port, - const PartitionInfo& partition_info) - : Node(ip, port), - partition_info_(partition_info), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - - RmNode(const std::string& ip, - int port, - const std::string& table_name, - uint32_t partition_id) - : Node(ip, port), - partition_info_(table_name, partition_id), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - - RmNode(const std::string& ip, - int port, - const std::string& table_name, - uint32_t partition_id, - int32_t session_id) - : Node(ip, port), - partition_info_(table_name, partition_id), - session_id_(session_id), - last_send_time_(0), - last_recv_time_(0) {} - - RmNode(const std::string& table_name, - uint32_t partition_id) - : Node(), - partition_info_(table_name, partition_id), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - RmNode() - : Node(), - partition_info_(), - session_id_(0), - last_send_time_(0), - last_recv_time_(0) {} - - virtual ~RmNode() = default; - bool operator==(const RmNode& other) const { - if (partition_info_.table_name_ == other.TableName() - && partition_info_.partition_id_ == other.PartitionId() - && Ip() == other.Ip() && Port() == other.Port()) { - return true; - } - return false; - } - - const std::string& TableName() const { - return partition_info_.table_name_; - } - uint32_t PartitionId() const { - return partition_info_.partition_id_; - } - const PartitionInfo& NodePartitionInfo() const { - return partition_info_; - } - void SetSessionId(uint32_t session_id) { - session_id_ = session_id; - } - int32_t SessionId() const { - return session_id_; - } - std::string ToString() const { - return "partition=" + TableName() + "_" + std::to_string(PartitionId()) + ",ip_port=" - + Ip() + ":" + std::to_string(Port()) + ",session id=" + std::to_string(SessionId()); - } - void SetLastSendTime(uint64_t last_send_time) { - last_send_time_ = last_send_time; - } - uint64_t LastSendTime() const { - return last_send_time_; - } - void SetLastRecvTime(uint64_t last_recv_time) { - last_recv_time_ = last_recv_time; - } - uint64_t LastRecvTime() const { - return last_recv_time_; - } - private: - PartitionInfo partition_info_; - int32_t session_id_; - uint64_t last_send_time_; - uint64_t last_recv_time_; -}; - -struct hash_rm_node { - size_t operator()(const RmNode& n) const { - return std::hash()(n.TableName()) ^ std::hash()(n.PartitionId()) ^ std::hash()(n.Ip()) ^ std::hash()(n.Port()); - } -}; - -struct WriteTask { - struct RmNode rm_node_; - struct BinlogChip binlog_chip_; - WriteTask(RmNode rm_node, BinlogChip binlog_chip) : rm_node_(rm_node), binlog_chip_(binlog_chip) { - } -}; - -//slowlog define -#define SLOWLOG_ENTRY_MAX_ARGC 32 -#define SLOWLOG_ENTRY_MAX_STRING 128 - -//slowlog entry -struct SlowlogEntry { - int64_t id; - int64_t start_time; - int64_t duration; - pink::RedisCmdArgsType argv; -}; - -#define PIKA_MIN_RESERVED_FDS 5000 - -const int SLAVE_ITEM_STAGE_ONE = 1; -const int SLAVE_ITEM_STAGE_TWO = 2; - -//repl_state_ -const int PIKA_REPL_NO_CONNECT = 0; -const int PIKA_REPL_SHOULD_META_SYNC = 1; -const int PIKA_REPL_META_SYNC_DONE = 2; -const int PIKA_REPL_ERROR = 3; - -//role -const int PIKA_ROLE_SINGLE = 0; -const int PIKA_ROLE_SLAVE = 1; -const int PIKA_ROLE_MASTER = 2; - -/* - * The size of Binlogfile - */ -//static uint64_t kBinlogSize = 128; -//static const uint64_t kBinlogSize = 1024 * 1024 * 100; - -enum RecordType { - kZeroType = 0, - kFullType = 1, - kFirstType = 2, - kMiddleType = 3, - kLastType = 4, - kEof = 5, - kBadRecord = 6, - kOldRecord = 7 -}; - -/* - * the block size that we read and write from write2file - * the default size is 64KB - */ -static const size_t kBlockSize = 64 * 1024; - -/* - * Header is Type(1 byte), length (3 bytes), time (4 bytes) - */ -static const size_t kHeaderSize = 1 + 3 + 4; - -/* - * the size of memory when we use memory mode - * the default memory size is 2GB - */ -const int64_t kPoolSize = 1073741824; - -const std::string kBinlogPrefix = "write2file"; -const size_t kBinlogPrefixLen = 10; - -const std::string kPikaMeta = "meta"; -const std::string kManifest = "manifest"; - -/* - * define common character - * - */ -#define COMMA ',' - -/* - * define reply between master and slave - * - */ -const std::string kInnerReplOk = "ok"; -const std::string kInnerReplWait = "wait"; - -const unsigned int kMaxBitOpInputKey = 12800; -const int kMaxBitOpInputBit = 21; -/* - * db sync - */ -const uint32_t kDBSyncMaxGap = 50; -const std::string kDBSyncModule = "document"; - -const std::string kBgsaveInfoFile = "info"; -#endif diff --git a/tools/pika_migrate/include/pika_dispatch_thread.h b/tools/pika_migrate/include/pika_dispatch_thread.h deleted file mode 100644 index 8c52053013..0000000000 --- a/tools/pika_migrate/include/pika_dispatch_thread.h +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_DISPATCH_THREAD_H_ -#define PIKA_DISPATCH_THREAD_H_ - -#include "include/pika_client_conn.h" - -class PikaDispatchThread { - public: - PikaDispatchThread(std::set &ips, int port, int work_num, - int cron_interval, int queue_limit); - ~PikaDispatchThread(); - int StartThread(); - - int64_t ThreadClientList(std::vector *clients); - - bool ClientKill(const std::string& ip_port); - void ClientKillAll(); - - void SetQueueLimit(int queue_limit) { - thread_rep_->SetQueueLimit(queue_limit); - } - - private: - class ClientConnFactory : public pink::ConnFactory { - public: - virtual std::shared_ptr NewPinkConn( - int connfd, - const std::string &ip_port, - pink::Thread* server_thread, - void* worker_specific_data, - pink::PinkEpoll* pink_epoll) const { - return std::make_shared(connfd, ip_port, server_thread, pink_epoll, pink::HandleType::kAsynchronous); - } - }; - - class Handles : public pink::ServerHandle { - public: - explicit Handles(PikaDispatchThread* pika_disptcher) - : pika_disptcher_(pika_disptcher) { - } - using pink::ServerHandle::AccessHandle; - bool AccessHandle(std::string& ip) const override; - void CronHandle() const override; - - private: - PikaDispatchThread* pika_disptcher_; - }; - - ClientConnFactory conn_factory_; - Handles handles_; - pink::ServerThread* thread_rep_; -}; -#endif diff --git a/tools/pika_migrate/include/pika_geo.h b/tools/pika_migrate/include/pika_geo.h deleted file mode 100644 index 6d8ac4495c..0000000000 --- a/tools/pika_migrate/include/pika_geo.h +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_GEO_H_ -#define PIKA_GEO_H_ - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -/* - * zset - */ -enum Sort { - Unsort, //default - Asc, - Desc -}; - -struct GeoPoint { - std::string member; - double longitude; - double latitude; -}; - -struct NeighborPoint { - std::string member; - double score; - double distance; -}; - -struct GeoRange { - std::string member; - double longitude; - double latitude; - double distance; - std::string unit; - bool withdist; - bool withhash; - bool withcoord; - int option_num; - bool count; - int count_limit; - bool store; - bool storedist; - std::string storekey; - Sort sort; -}; - -class GeoAddCmd : public Cmd { - public: - GeoAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoAddCmd(*this); - } - private: - std::string key_; - std::vector pos_; - virtual void DoInitial(); -}; - -class GeoPosCmd : public Cmd { - public: - GeoPosCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoPosCmd(*this); - } - private: - std::string key_; - std::vector members_; - virtual void DoInitial(); -}; - -class GeoDistCmd : public Cmd { - public: - GeoDistCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoDistCmd(*this); - } - private: - std::string key_, first_pos_, second_pos_, unit_; - virtual void DoInitial(); -}; - -class GeoHashCmd : public Cmd { - public: - GeoHashCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoHashCmd(*this); - } - private: - std::string key_; - std::vector members_; - virtual void DoInitial(); -}; - -class GeoRadiusCmd : public Cmd { - public: - GeoRadiusCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoRadiusCmd(*this); - } - private: - std::string key_; - GeoRange range_; - virtual void DoInitial(); - virtual void Clear() { - range_.withdist = false; - range_.withcoord = false; - range_.withhash = false; - range_.count = false; - range_.store = false; - range_.storedist = false; - range_.option_num = 0; - range_.count_limit = 0; - range_.sort = Unsort; - } -}; - -class GeoRadiusByMemberCmd : public Cmd { - public: - GeoRadiusByMemberCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GeoRadiusByMemberCmd(*this); - } - private: - std::string key_; - GeoRange range_; - virtual void DoInitial(); - virtual void Clear() { - range_.withdist = false; - range_.withcoord = false; - range_.withhash = false; - range_.count = false; - range_.store = false; - range_.storedist = false; - range_.option_num = 0; - range_.count_limit = 0; - range_.sort = Unsort; - } -}; - -#endif diff --git a/tools/pika_migrate/include/pika_geohash.h b/tools/pika_migrate/include/pika_geohash.h deleted file mode 100644 index e963839a4a..0000000000 --- a/tools/pika_migrate/include/pika_geohash.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2013-2014, yinqiwen - * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015, Salvatore Sanfilippo . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef PIKA_GEOHASH_H_ -#define PIKA_GEOHASH_H_ - -#include -#include -#include - -#if defined(__cplusplus) -extern "C" { -#endif - -#define HASHISZERO(r) (!(r).bits && !(r).step) -#define RANGEISZERO(r) (!(r).max && !(r).min) -#define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r)) - -#define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */ - -/* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ -#define GEO_LAT_MIN -85.05112878 -#define GEO_LAT_MAX 85.05112878 -#define GEO_LONG_MIN -180 -#define GEO_LONG_MAX 180 - -typedef enum { - GEOHASH_NORTH = 0, - GEOHASH_EAST, - GEOHASH_WEST, - GEOHASH_SOUTH, - GEOHASH_SOUTH_WEST, - GEOHASH_SOUTH_EAST, - GEOHASH_NORT_WEST, - GEOHASH_NORT_EAST -} GeoDirection; - -typedef struct { - uint64_t bits; - uint8_t step; -} GeoHashBits; - -typedef struct { - double min; - double max; -} GeoHashRange; - -typedef struct { - GeoHashBits hash; - GeoHashRange longitude; - GeoHashRange latitude; -} GeoHashArea; - -typedef struct { - GeoHashBits north; - GeoHashBits east; - GeoHashBits west; - GeoHashBits south; - GeoHashBits north_east; - GeoHashBits south_east; - GeoHashBits north_west; - GeoHashBits south_west; -} GeoHashNeighbors; - -/* - * 0:success - * -1:failed - */ -void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range); -int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, - double longitude, double latitude, uint8_t step, - GeoHashBits *hash); -int geohashEncodeType(double longitude, double latitude, - uint8_t step, GeoHashBits *hash); -int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, - GeoHashBits *hash); -int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, - const GeoHashBits hash, GeoHashArea *area); -int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area); -int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); -int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy); -int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy); -int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy); -int geohashDecodeToLongLatMercator(const GeoHashBits hash, double *xy); -void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors); - -#if defined(__cplusplus) -} -#endif -#endif /* PIKA_GEOHASH_H_ */ diff --git a/tools/pika_migrate/include/pika_geohash_helper.h b/tools/pika_migrate/include/pika_geohash_helper.h deleted file mode 100644 index 0642455fa4..0000000000 --- a/tools/pika_migrate/include/pika_geohash_helper.h +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2013-2014, yinqiwen - * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015, Salvatore Sanfilippo . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef PIKA_GEOHASH_HELPER_HPP_ -#define PIKA_GEOHASH_HELPER_HPP_ - -#include "include/pika_geohash.h" - -#define GZERO(s) s.bits = s.step = 0; -#define GISZERO(s) (!s.bits && !s.step) -#define GISNOTZERO(s) (s.bits || s.step) - -typedef uint64_t GeoHashFix52Bits; -typedef uint64_t GeoHashVarBits; - -typedef struct { - GeoHashBits hash; - GeoHashArea area; - GeoHashNeighbors neighbors; -} GeoHashRadius; - -int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); -uint8_t geohashEstimateStepsByRadius(double range_meters, double lat); -int geohashBoundingBox(double longitude, double latitude, double radius_meters, - double *bounds); -GeoHashRadius geohashGetAreasByRadius(double longitude, - double latitude, double radius_meters); -GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, - double radius_meters); -GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude, - double radius_meters); -GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash); -double geohashGetDistance(double lon1d, double lat1d, - double lon2d, double lat2d); -int geohashGetDistanceIfInRadius(double x1, double y1, - double x2, double y2, double radius, - double *distance); -int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, - double y2, double radius, - double *distance); - -#endif /* PIKA_GEOHASH_HELPER_HPP_ */ diff --git a/tools/pika_migrate/include/pika_hash.h b/tools/pika_migrate/include/pika_hash.h deleted file mode 100644 index 0658f0e73e..0000000000 --- a/tools/pika_migrate/include/pika_hash.h +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_HASH_H_ -#define PIKA_HASH_H_ - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -/* - * hash - */ -class HDelCmd : public Cmd { - public: - HDelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HDelCmd(*this); - } - private: - std::string key_; - std::vector fields_; - virtual void DoInitial() override; -}; - -class HGetCmd : public Cmd { - public: - HGetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HGetCmd(*this); - } - private: - std::string key_, field_; - virtual void DoInitial() override; -}; - -class HGetallCmd : public Cmd { - public: - HGetallCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HGetallCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class HSetCmd : public Cmd { - public: - HSetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HSetCmd(*this); - } - private: - std::string key_, field_, value_; - virtual void DoInitial() override; -}; - -class HExistsCmd : public Cmd { - public: - HExistsCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HExistsCmd(*this); - } - private: - std::string key_, field_; - virtual void DoInitial() override; -}; - -class HIncrbyCmd : public Cmd { - public: - HIncrbyCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HIncrbyCmd(*this); - } - private: - std::string key_, field_; - int64_t by_; - virtual void DoInitial() override; -}; - -class HIncrbyfloatCmd : public Cmd { - public: - HIncrbyfloatCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HIncrbyfloatCmd(*this); - } - private: - std::string key_, field_, by_; - virtual void DoInitial() override; -}; - -class HKeysCmd : public Cmd { - public: - HKeysCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HKeysCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class HLenCmd : public Cmd { - public: - HLenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HLenCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class HMgetCmd : public Cmd { - public: - HMgetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HMgetCmd(*this); - } - private: - std::string key_; - std::vector fields_; - virtual void DoInitial() override; -}; - -class HMsetCmd : public Cmd { - public: - HMsetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HMsetCmd(*this); - } - private: - std::string key_; - std::vector fvs_; - virtual void DoInitial() override; -}; - -class HSetnxCmd : public Cmd { - public: - HSetnxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HSetnxCmd(*this); - } - private: - std::string key_, field_, value_; - virtual void DoInitial() override; -}; - -class HStrlenCmd : public Cmd { - public: - HStrlenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HStrlenCmd(*this); - } - private: - std::string key_, field_; - virtual void DoInitial() override; -}; - -class HValsCmd : public Cmd { - public: - HValsCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HValsCmd(*this); - } - private: - std::string key_, field_; - virtual void DoInitial() override; -}; - -class HScanCmd : public Cmd { - public: - HScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HScanCmd(*this); - } - private: - std::string key_, pattern_; - int64_t cursor_, count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class HScanxCmd : public Cmd { - public: - HScanxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new HScanxCmd(*this); - } - private: - std::string key_, start_field_, pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class PKHScanRangeCmd : public Cmd { - public: - PKHScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKHScanRangeCmd(*this); - } - private: - std::string key_; - std::string field_start_; - std::string field_end_; - std::string pattern_; - int64_t limit_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - limit_ = 10; - } -}; - -class PKHRScanRangeCmd : public Cmd { - public: - PKHRScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKHRScanRangeCmd(*this); - } - private: - std::string key_; - std::string field_start_; - std::string field_end_; - std::string pattern_; - int64_t limit_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - limit_ = 10; - } -}; -#endif diff --git a/tools/pika_migrate/include/pika_hyperloglog.h b/tools/pika_migrate/include/pika_hyperloglog.h deleted file mode 100644 index ecf3b9036f..0000000000 --- a/tools/pika_migrate/include/pika_hyperloglog.h +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_HYPERLOGLOG_H_ -#define PIKA_HYPERLOGLOG_H_ - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -/* - * hyperloglog - */ -class PfAddCmd : public Cmd { - public: - PfAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PfAddCmd(*this); - } - private: - std::string key_; - std::vector values_; - virtual void DoInitial() override; - virtual void Clear() { - values_.clear(); - } -}; - -class PfCountCmd : public Cmd { - public: - PfCountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PfCountCmd(*this); - } - private: - std::vector keys_; - virtual void DoInitial() override; - virtual void Clear() { - keys_.clear(); - } -}; - -class PfMergeCmd : public Cmd { - public: - PfMergeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PfMergeCmd(*this); - } - private: - std::vector keys_; - virtual void DoInitial() override; - virtual void Clear() { - keys_.clear(); - } -}; - -#endif diff --git a/tools/pika_migrate/include/pika_kv.h b/tools/pika_migrate/include/pika_kv.h deleted file mode 100644 index f23c8c07ca..0000000000 --- a/tools/pika_migrate/include/pika_kv.h +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_KV_H_ -#define PIKA_KV_H_ - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" -#include "include/pika_partition.h" - - -/* - * kv - */ -class SetCmd : public Cmd { - public: - enum SetCondition {kNONE, kNX, kXX, kVX, kEXORPX}; - SetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag), sec_(0), condition_(kNONE) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetCmd(*this); - } - - private: - std::string key_; - std::string value_; - std::string target_; - int32_t success_; - int64_t sec_; - SetCmd::SetCondition condition_; - virtual void DoInitial() override; - virtual void Clear() override { - sec_ = 0; - success_ = 0; - condition_ = kNONE; - } - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class GetCmd : public Cmd { - public: - GetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GetCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class DelCmd : public Cmd { - public: - DelCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { - return keys_; - } - virtual Cmd* Clone() override { - return new DelCmd(*this); - } - - private: - std::vector keys_; - virtual void DoInitial() override; -}; - -class IncrCmd : public Cmd { - public: - IncrCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new IncrCmd(*this); - } - private: - std::string key_; - int64_t new_value_; - virtual void DoInitial() override; -}; - -class IncrbyCmd : public Cmd { - public: - IncrbyCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new IncrbyCmd(*this); - } - private: - std::string key_; - int64_t by_, new_value_; - virtual void DoInitial() override; -}; - -class IncrbyfloatCmd : public Cmd { - public: - IncrbyfloatCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new IncrbyfloatCmd(*this); - } - private: - std::string key_, value_, new_value_; - double by_; - virtual void DoInitial() override; -}; - -class DecrCmd : public Cmd { - public: - DecrCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DecrCmd(*this); - } - private: - std::string key_; - int64_t new_value_; - virtual void DoInitial() override; -}; - -class DecrbyCmd : public Cmd { - public: - DecrbyCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DecrbyCmd(*this); - } - private: - std::string key_; - int64_t by_, new_value_; - virtual void DoInitial() override; -}; - -class GetsetCmd : public Cmd { - public: - GetsetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GetsetCmd(*this); - } - private: - std::string key_; - std::string new_value_; - virtual void DoInitial() override; -}; - -class AppendCmd : public Cmd { - public: - AppendCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new AppendCmd(*this); - } - private: - std::string key_; - std::string value_; - virtual void DoInitial() override; -}; - -class MgetCmd : public Cmd { - public: - MgetCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { - return keys_; - } - virtual Cmd* Clone() override { - return new MgetCmd(*this); - } - - private: - std::vector keys_; - virtual void DoInitial() override; -}; - -class KeysCmd : public Cmd { - public: - KeysCmd(const std::string& name , int arity, uint16_t flag) - : Cmd(name, arity, flag), type_(blackwidow::DataType::kAll) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new KeysCmd(*this); - } - private: - std::string pattern_; - blackwidow::DataType type_; - virtual void DoInitial() override; - virtual void Clear() { - type_ = blackwidow::DataType::kAll; - } -}; - -class SetnxCmd : public Cmd { - public: - SetnxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetnxCmd(*this); - } - private: - std::string key_; - std::string value_; - int32_t success_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class SetexCmd : public Cmd { - public: - SetexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetexCmd(*this); - } - private: - std::string key_; - int64_t sec_; - std::string value_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class PsetexCmd : public Cmd { - public: - PsetexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PsetexCmd(*this); - } - private: - std::string key_; - int64_t usec_; - std::string value_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class DelvxCmd : public Cmd { - public: - DelvxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new DelvxCmd(*this); - } - private: - std::string key_; - std::string value_; - int32_t success_; - virtual void DoInitial() override; -}; - -class MsetCmd : public Cmd { - public: - MsetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { - std::vector res; - for (auto& kv : kvs_) { - res.push_back(kv.key); - } - return res; - } - virtual Cmd* Clone() override { - return new MsetCmd(*this); - } - private: - std::vector kvs_; - virtual void DoInitial() override; -}; - -class MsetnxCmd : public Cmd { - public: - MsetnxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new MsetnxCmd(*this); - } - private: - std::vector kvs_; - int32_t success_; - virtual void DoInitial() override; -}; - -class GetrangeCmd : public Cmd { - public: - GetrangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new GetrangeCmd(*this); - } - private: - std::string key_; - int64_t start_; - int64_t end_; - virtual void DoInitial() override; -}; - -class SetrangeCmd : public Cmd { - public: - SetrangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SetrangeCmd(*this); - } - private: - std::string key_; - int64_t offset_; - std::string value_; - virtual void DoInitial() override; -}; - -class StrlenCmd : public Cmd { - public: - StrlenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new StrlenCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class ExistsCmd : public Cmd { - public: - ExistsCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual std::vector current_key() const { - return keys_; - } - virtual Cmd* Clone() override { - return new ExistsCmd(*this); - } - - private: - std::vector keys_; - virtual void DoInitial() override; -}; - -class ExpireCmd : public Cmd { - public: - ExpireCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ExpireCmd(*this); - } - private: - std::string key_; - int64_t sec_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class PexpireCmd : public Cmd { - public: - PexpireCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PexpireCmd(*this); - } - private: - std::string key_; - int64_t msec_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class ExpireatCmd : public Cmd { - public: - ExpireatCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ExpireatCmd(*this); - } - private: - std::string key_; - int64_t time_stamp_; - virtual void DoInitial() override; -}; - -class PexpireatCmd : public Cmd { - public: - PexpireatCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PexpireatCmd(*this); - } - private: - std::string key_; - int64_t time_stamp_ms_; - virtual void DoInitial() override; - virtual std::string ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) override; -}; - -class TtlCmd : public Cmd { - public: - TtlCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TtlCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class PttlCmd : public Cmd { - public: - PttlCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PttlCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class PersistCmd : public Cmd { - public: - PersistCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PersistCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class TypeCmd : public Cmd { - public: - TypeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new TypeCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class ScanCmd : public Cmd { - public: - ScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ScanCmd(*this); - } - private: - int64_t cursor_; - std::string pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class ScanxCmd : public Cmd { - public: - ScanxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ScanxCmd(*this); - } - private: - blackwidow::DataType type_; - std::string start_key_; - std::string pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class PKSetexAtCmd : public Cmd { -public: - PKSetexAtCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), time_stamp_(0) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKSetexAtCmd(*this); - } -private: - std::string key_; - std::string value_; - int64_t time_stamp_; - virtual void DoInitial() override; - virtual void Clear() { - time_stamp_ = 0; - } -}; - -class PKScanRangeCmd : public Cmd { - public: - PKScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10), string_with_value(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKScanRangeCmd(*this); - } - private: - blackwidow::DataType type_; - std::string key_start_; - std::string key_end_; - std::string pattern_; - int64_t limit_; - bool string_with_value; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - limit_ = 10; - string_with_value = false; - } -}; - -class PKRScanRangeCmd : public Cmd { - public: - PKRScanRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), limit_(10), string_with_value(false) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new PKRScanRangeCmd(*this); - } - private: - blackwidow::DataType type_; - std::string key_start_; - std::string key_end_; - std::string pattern_; - int64_t limit_; - bool string_with_value; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - limit_ = 10; - string_with_value = false; - } -}; -#endif diff --git a/tools/pika_migrate/include/pika_list.h b/tools/pika_migrate/include/pika_list.h deleted file mode 100644 index 3f18129554..0000000000 --- a/tools/pika_migrate/include/pika_list.h +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_LIST_H_ -#define PIKA_LIST_H_ - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -/* - * list - */ -class LIndexCmd : public Cmd { - public: - LIndexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), index_(0) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LIndexCmd(*this); - } - private: - std::string key_; - int64_t index_; - virtual void DoInitial() override; - virtual void Clear() { - index_ = 0; - } -}; - -class LInsertCmd : public Cmd { - public: - LInsertCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), dir_(blackwidow::After) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LInsertCmd(*this); - } - private: - std::string key_; - blackwidow::BeforeOrAfter dir_; - std::string pivot_; - std::string value_; - virtual void DoInitial() override; -}; - -class LLenCmd : public Cmd { - public: - LLenCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LLenCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class LPopCmd : public Cmd { - public: - LPopCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LPopCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class LPushCmd : public Cmd { - public: - LPushCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LPushCmd(*this); - } - private: - std::string key_; - std::vector values_; - virtual void DoInitial() override; - virtual void Clear() { - values_.clear(); - } -}; - -class LPushxCmd : public Cmd { - public: - LPushxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LPushxCmd(*this); - } - private: - std::string key_; - std::string value_; - virtual void DoInitial() override; -}; - -class LRangeCmd : public Cmd { - public: - LRangeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_(0), right_(0) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LRangeCmd(*this); - } - private: - std::string key_; - int64_t left_; - int64_t right_; - virtual void DoInitial() override; -}; - -class LRemCmd : public Cmd { - public: - LRemCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), count_(0) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LRemCmd(*this); - } - private: - std::string key_; - int64_t count_; - std::string value_; - virtual void DoInitial() override; -}; - -class LSetCmd : public Cmd { - public: - LSetCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), index_(0) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LSetCmd(*this); - } - private: - std::string key_; - int64_t index_; - std::string value_; - virtual void DoInitial() override; -}; - -class LTrimCmd : public Cmd { - public: - LTrimCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), start_(0), stop_(0) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new LTrimCmd(*this); - } - private: - std::string key_; - int64_t start_; - int64_t stop_; - virtual void DoInitial() override; -}; - -class RPopCmd : public Cmd { - public: - RPopCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPopCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class RPopLPushCmd : public Cmd { - public: - RPopLPushCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPopLPushCmd(*this); - } - private: - std::string source_; - std::string receiver_; - virtual void DoInitial() override; -}; - -class RPushCmd : public Cmd { - public: - RPushCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPushCmd(*this); - } - private: - std::string key_; - std::vector values_; - virtual void DoInitial() override; - virtual void Clear() { - values_.clear(); - } -}; - -class RPushxCmd : public Cmd { - public: - RPushxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {}; - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new RPushxCmd(*this); - } - private: - std::string key_; - std::string value_; - virtual void DoInitial() override; -}; -#endif diff --git a/tools/pika_migrate/include/pika_meta.h b/tools/pika_migrate/include/pika_meta.h deleted file mode 100644 index de576bfa63..0000000000 --- a/tools/pika_migrate/include/pika_meta.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_META -#define PIKA_META - -#include "slash/include/env.h" -#include "slash/include/slash_mutex.h" - -#include "include/pika_define.h" - -using slash::Status; - -class PikaMeta { - public: - PikaMeta(); - ~PikaMeta(); - - void SetPath(const std::string& path); - - Status StableSave(const std::vector& table_structs); - Status ParseMeta(std::vector* const table_structs); - - private: - pthread_rwlock_t rwlock_; - std::string local_meta_path_; - - // No copying allowed; - PikaMeta(const PikaMeta&); - void operator=(const PikaMeta&); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_monitor_thread.h b/tools/pika_migrate/include/pika_monitor_thread.h deleted file mode 100644 index f7900e2af7..0000000000 --- a/tools/pika_migrate/include/pika_monitor_thread.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_MONITOR_THREAD_H_ -#define PIKA_MONITOR_THREAD_H_ - -#include -#include -#include -#include - -#include "pink/include/pink_thread.h" -#include "slash/include/slash_mutex.h" - -#include "include/pika_define.h" -#include "include/pika_client_conn.h" - -class PikaMonitorThread : public pink::Thread { - public: - PikaMonitorThread(); - virtual ~PikaMonitorThread(); - - void AddMonitorClient(std::shared_ptr client_ptr); - void AddMonitorMessage(const std::string &monitor_message); - int32_t ThreadClientList(std::vector* client = NULL); - bool ThreadClientKill(const std::string& ip_port = "all"); - bool HasMonitorClients(); - - private: - void AddCronTask(MonitorCronTask task); - bool FindClient(const std::string& ip_port); - pink::WriteStatus SendMessage(int32_t fd, std::string& message); - void RemoveMonitorClient(const std::string& ip_port); - - std::atomic has_monitor_clients_; - slash::Mutex monitor_mutex_protector_; - slash::CondVar monitor_cond_; - - std::list monitor_clients_; - std::deque monitor_messages_; - std::queue cron_tasks_; - - virtual void* ThreadMain(); - void RemoveMonitorClient(int32_t client_fd); -}; -#endif diff --git a/tools/pika_migrate/include/pika_partition.h b/tools/pika_migrate/include/pika_partition.h deleted file mode 100644 index 461955b85a..0000000000 --- a/tools/pika_migrate/include/pika_partition.h +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_PARTITION_H_ -#define PIKA_PARTITION_H_ - -#include "blackwidow/blackwidow.h" -#include "blackwidow/backupable.h" -#include "slash/include/scope_record_lock.h" - -#include "include/pika_binlog.h" - -class Cmd; - -/* - *Keyscan used - */ -struct KeyScanInfo { - time_t start_time; - std::string s_start_time; - int32_t duration; - std::vector key_infos; //the order is strings, hashes, lists, zsets, sets - bool key_scaning_; - KeyScanInfo() : - start_time(0), - s_start_time("1970-01-01 08:00:00"), - duration(-3), - key_infos({{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}), - key_scaning_(false) { - } -}; - - -struct BgSaveInfo { - bool bgsaving; - time_t start_time; - std::string s_start_time; - std::string path; - uint32_t filenum; - uint64_t offset; - BgSaveInfo() : bgsaving(false), filenum(0), offset(0) {} - void Clear() { - bgsaving = false; - path.clear(); - filenum = 0; - offset = 0; - } -}; - -class Partition : public std::enable_shared_from_this { - public: - Partition(const std::string& table_name, - uint32_t partition_id, - const std::string& table_db_path, - const std::string& table_log_path); - virtual ~Partition(); - - std::string GetTableName() const; - uint32_t GetPartitionId() const; - std::string GetPartitionName() const; - std::shared_ptr logger() const; - std::shared_ptr db() const; - - void Compact(const blackwidow::DataType& type); - // needd to hold logger_->Lock() - Status WriteBinlog(const std::string& binlog); - - void DbRWLockWriter(); - void DbRWLockReader(); - void DbRWUnLock(); - - slash::lock::LockMgr* LockMgr(); - - void SetBinlogIoError(bool error); - bool IsBinlogIoError(); - bool GetBinlogOffset(BinlogOffset* const boffset); - bool SetBinlogOffset(const BinlogOffset& boffset); - - void PrepareRsync(); - bool TryUpdateMasterOffset(); - bool ChangeDb(const std::string& new_path); - - void Leave(); - void Close(); - void MoveToTrash(); - - // BgSave use; - bool IsBgSaving(); - void BgSavePartition(); - BgSaveInfo bgsave_info(); - - // FlushDB & FlushSubDB use - bool FlushDB(); - bool FlushSubDB(const std::string& db_name); - - // Purgelogs use - bool PurgeLogs(uint32_t to = 0, bool manual = false); - void ClearPurge(); - - // key scan info use - Status GetKeyNum(std::vector* key_info); - KeyScanInfo GetKeyScanInfo(); - - private: - std::string table_name_; - uint32_t partition_id_; - - std::string db_path_; - std::string log_path_; - std::string bgsave_sub_path_; - std::string dbsync_path_; - std::string partition_name_; - - bool opened_; - std::shared_ptr logger_; - std::atomic binlog_io_error_; - - pthread_rwlock_t db_rwlock_; - slash::lock::LockMgr* lock_mgr_; - std::shared_ptr db_; - - bool full_sync_; - - slash::Mutex key_info_protector_; - KeyScanInfo key_scan_info_; - - /* - * BgSave use - */ - static void DoBgSave(void* arg); - bool RunBgsaveEngine(); - bool InitBgsaveEnv(); - bool InitBgsaveEngine(); - void ClearBgsave(); - void FinishBgsave(); - BgSaveInfo bgsave_info_; - slash::Mutex bgsave_protector_; - blackwidow::BackupEngine* bgsave_engine_; - - /* - * Purgelogs use - */ - static void DoPurgeLogs(void* arg); - bool PurgeFiles(uint32_t to, bool manual); - bool GetBinlogFiles(std::map& binlogs); - std::atomic purging_; - - // key scan info use - void InitKeyScan(); - - /* - * No allowed copy and copy assign - */ - Partition(const Partition&); - void operator=(const Partition&); - -}; - -struct PurgeArg { - std::shared_ptr partition; - uint32_t to; - bool manual; - bool force; // Ignore the delete window -}; - - -#endif diff --git a/tools/pika_migrate/include/pika_pubsub.h b/tools/pika_migrate/include/pika_pubsub.h deleted file mode 100644 index 737dc752c0..0000000000 --- a/tools/pika_migrate/include/pika_pubsub.h +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_PUBSUB_H_ -#define PIKA_PUBSUB_H_ - -#include "pika_command.h" - -/* - * pubsub - */ -class PublishCmd : public Cmd { - public: - PublishCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PublishCmd(*this); - } - private: - std::string channel_; - std::string msg_; - virtual void DoInitial() override; -}; - -class SubscribeCmd : public Cmd { - public: - SubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new SubscribeCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class UnSubscribeCmd : public Cmd { - public: - UnSubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new UnSubscribeCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class PUnSubscribeCmd : public Cmd { - public: - PUnSubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PUnSubscribeCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class PSubscribeCmd : public Cmd { - public: - PSubscribeCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PSubscribeCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class PubSubCmd : public Cmd { - public: - PubSubCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr) override; - virtual Cmd* Clone() override { - return new PubSubCmd(*this); - } - private: - std::string subcommand_; - std::vector arguments_; - virtual void DoInitial() override; - virtual void Clear() { - arguments_.clear(); - } -}; - -#endif // INCLUDE_PIKA_PUBSUB_H_ diff --git a/tools/pika_migrate/include/pika_repl_bgworker.h b/tools/pika_migrate/include/pika_repl_bgworker.h deleted file mode 100644 index e74f41e3a9..0000000000 --- a/tools/pika_migrate/include/pika_repl_bgworker.h +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_REPL_BGWROKER_H_ -#define PIKA_REPL_BGWROKER_H_ - -#include -#include - -#include "pink/include/pb_conn.h" -#include "pink/include/bg_thread.h" -#include "pink/include/thread_pool.h" - -#include "src/pika_inner_message.pb.h" - -#include "include/pika_command.h" -#include "include/pika_binlog_transverter.h" - -class PikaReplBgWorker { - public: - explicit PikaReplBgWorker(int queue_size); - ~PikaReplBgWorker(); - int StartThread(); - int StopThread(); - void Schedule(pink::TaskFunc func, void* arg); - void QueueClear(); - static void HandleBGWorkerWriteBinlog(void* arg); - static void HandleBGWorkerWriteDB(void* arg); - - BinlogItem binlog_item_; - pink::RedisParser redis_parser_; - std::string ip_port_; - std::string table_name_; - uint32_t partition_id_; - - private: - pink::BGThread bg_thread_; - static int HandleWriteBinlog(pink::RedisParser* parser, const pink::RedisCmdArgsType& argv); -}; - -#endif // PIKA_REPL_BGWROKER_H_ diff --git a/tools/pika_migrate/include/pika_repl_client.h b/tools/pika_migrate/include/pika_repl_client.h deleted file mode 100644 index d786af489f..0000000000 --- a/tools/pika_migrate/include/pika_repl_client.h +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_REPL_CLIENT_H_ -#define PIKA_REPL_CLIENT_H_ - -#include -#include - -#include "pink/include/pink_conn.h" -#include "pink/include/client_thread.h" -#include "pink/include/thread_pool.h" -#include "slash/include/slash_status.h" - -#include "include/pika_define.h" -#include "include/pika_partition.h" -#include "include/pika_binlog_reader.h" -#include "include/pika_repl_bgworker.h" -#include "include/pika_repl_client_thread.h" - -#include "pink/include/thread_pool.h" -#include "src/pika_inner_message.pb.h" - -using slash::Status; - -struct ReplClientTaskArg { - std::shared_ptr res; - std::shared_ptr conn; - ReplClientTaskArg(std::shared_ptr _res, - std::shared_ptr _conn) - : res(_res), conn(_conn) {} -}; - -struct ReplClientWriteBinlogTaskArg { - std::shared_ptr res; - std::shared_ptr conn; - void* res_private_data; - PikaReplBgWorker* worker; - ReplClientWriteBinlogTaskArg( - const std::shared_ptr _res, - std::shared_ptr _conn, - void* _res_private_data, - PikaReplBgWorker* _worker) : - res(_res), conn(_conn), - res_private_data(_res_private_data), worker(_worker) {} -}; - -struct ReplClientWriteDBTaskArg { - PikaCmdArgsType* argv; - BinlogItem* binlog_item; - std::string table_name; - uint32_t partition_id; - ReplClientWriteDBTaskArg(PikaCmdArgsType* _argv, - BinlogItem* _binlog_item, - const std::string _table_name, - uint32_t _partition_id) - : argv(_argv), binlog_item(_binlog_item), - table_name(_table_name), partition_id(_partition_id) {} - ~ReplClientWriteDBTaskArg() { - delete argv; - delete binlog_item; - } -}; - - -class PikaReplClient { - public: - PikaReplClient(int cron_interval, int keepalive_timeout); - ~PikaReplClient(); - - int Start(); - int Stop(); - - slash::Status Write(const std::string& ip, const int port, const std::string& msg); - slash::Status Close(const std::string& ip, const int port); - - void Schedule(pink::TaskFunc func, void* arg); - void ScheduleWriteBinlogTask(std::string table_partition, - const std::shared_ptr res, - std::shared_ptr conn, - void* req_private_data); - void ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id); - - Status SendMetaSync(); - Status SendPartitionDBSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip); - Status SendPartitionTrySync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip); - Status SendPartitionBinlogSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& ack_start, - const BinlogOffset& ack_end, - const std::string& local_ip, - bool is_frist_send); - Status SendRemoveSlaveNode(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const std::string& local_ip); - private: - size_t GetHashIndex(std::string key, bool upper_half); - void UpdateNextAvail() { - next_avail_ = (next_avail_ + 1) % bg_workers_.size(); - } - - PikaReplClientThread* client_thread_; - int next_avail_; - std::hash str_hash; - std::vector bg_workers_; -}; - -#endif diff --git a/tools/pika_migrate/include/pika_repl_client_conn.h b/tools/pika_migrate/include/pika_repl_client_conn.h deleted file mode 100644 index 516507f2d5..0000000000 --- a/tools/pika_migrate/include/pika_repl_client_conn.h +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_REPL_CLIENT_CONN_H_ -#define PIKA_REPL_CLIENT_CONN_H_ - -#include "pink/include/pb_conn.h" - -#include - -#include "include/pika_conf.h" -#include "src/pika_inner_message.pb.h" - -class PikaReplClientConn: public pink::PbConn { - public: - PikaReplClientConn(int fd, const std::string& ip_port, pink::Thread *thread, void* worker_specific_data, pink::PinkEpoll* epoll); - virtual ~PikaReplClientConn() = default; - - static void HandleMetaSyncResponse(void* arg); - static void HandleDBSyncResponse(void* arg); - static void HandleTrySyncResponse(void* arg); - static void HandleRemoveSlaveNodeResponse(void* arg); - static bool IsTableStructConsistent(const std::vector& current_tables, - const std::vector& expect_tables); - int DealMessage() override; - private: - // dispatch binlog by its table_name + partition - void DispatchBinlogRes(const std::shared_ptr response); - - struct ReplRespArg { - std::shared_ptr resp; - std::shared_ptr conn; - ReplRespArg(std::shared_ptr _resp, std::shared_ptr _conn) : resp(_resp), conn(_conn) { - } - }; -}; - -#endif diff --git a/tools/pika_migrate/include/pika_repl_client_thread.h b/tools/pika_migrate/include/pika_repl_client_thread.h deleted file mode 100644 index c0ed6ab48b..0000000000 --- a/tools/pika_migrate/include/pika_repl_client_thread.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_REPL_CLIENT_THREAD_H_ -#define PIKA_REPL_CLIENT_THREAD_H_ - -#include -#include - -#include "include/pika_repl_client_conn.h" - -#include "pink/include/pink_conn.h" -#include "pink/include/client_thread.h" - -class PikaReplClientThread : public pink::ClientThread { - public: - PikaReplClientThread(int cron_interval, int keepalive_timeout); - virtual ~PikaReplClientThread() = default; - int Start(); - - private: - class ReplClientConnFactory : public pink::ConnFactory { - public: - virtual std::shared_ptr NewPinkConn( - int connfd, - const std::string &ip_port, - pink::Thread *thread, - void* worker_specific_data, - pink::PinkEpoll* pink_epoll) const override { - return std::make_shared(connfd, ip_port, thread, worker_specific_data, pink_epoll); - } - }; - class ReplClientHandle : public pink::ClientHandle { - public: - void CronHandle() const override { - } - void FdTimeoutHandle(int fd, const std::string& ip_port) const override; - void FdClosedHandle(int fd, const std::string& ip_port) const override; - bool AccessHandle(std::string& ip) const override { - // ban 127.0.0.1 if you want to test this routine - // if (ip.find("127.0.0.2") != std::string::npos) { - // std::cout << "AccessHandle " << ip << std::endl; - // return false; - // } - return true; - } - int CreateWorkerSpecificData(void** data) const override { - return 0; - } - int DeleteWorkerSpecificData(void* data) const override { - return 0; - } - void DestConnectFailedHandle(std::string ip_port, std::string reason) const override { - } - }; - - ReplClientConnFactory conn_factory_; - ReplClientHandle handle_; -}; - -#endif // PIKA_REPL_CLIENT_THREAD_H_ diff --git a/tools/pika_migrate/include/pika_repl_server.h b/tools/pika_migrate/include/pika_repl_server.h deleted file mode 100644 index 592052d92a..0000000000 --- a/tools/pika_migrate/include/pika_repl_server.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_REPL_SERVER_H_ -#define PIKA_REPL_SERVER_H_ - -#include "pink/include/thread_pool.h" - -#include - -#include "include/pika_command.h" -#include "include/pika_repl_bgworker.h" -#include "include/pika_repl_server_thread.h" - -struct ReplServerTaskArg { - std::shared_ptr req; - std::shared_ptr conn; - ReplServerTaskArg(std::shared_ptr _req, std::shared_ptr _conn) - : req(_req), conn(_conn) {} -}; - -class PikaReplServer { - public: - PikaReplServer(const std::set& ips, int port, int cron_interval); - ~PikaReplServer(); - - int Start(); - int Stop(); - - slash::Status SendSlaveBinlogChips(const std::string& ip, int port, const std::vector& tasks); - slash::Status Write(const std::string& ip, const int port, const std::string& msg); - - void Schedule(pink::TaskFunc func, void* arg); - void UpdateClientConnMap(const std::string& ip_port, int fd); - void RemoveClientConn(int fd); - void KillAllConns(); - - private: - pink::ThreadPool* server_tp_; - PikaReplServerThread* pika_repl_server_thread_; - - pthread_rwlock_t client_conn_rwlock_; - std::map client_conn_map_; -}; - -#endif diff --git a/tools/pika_migrate/include/pika_repl_server_conn.h b/tools/pika_migrate/include/pika_repl_server_conn.h deleted file mode 100644 index d5757f0373..0000000000 --- a/tools/pika_migrate/include/pika_repl_server_conn.h +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_REPL_SERVER_CONN_H_ -#define PIKA_REPL_SERVER_CONN_H_ - -#include - -#include "pink/include/pb_conn.h" -#include "pink/include/pink_thread.h" - -#include "src/pika_inner_message.pb.h" - -class PikaReplServerConn: public pink::PbConn { - public: - PikaReplServerConn(int fd, std::string ip_port, pink::Thread* thread, void* worker_specific_data, pink::PinkEpoll* epoll); - virtual ~PikaReplServerConn(); - - static void HandleMetaSyncRequest(void* arg); - static void HandleTrySyncRequest(void* arg); - static void HandleDBSyncRequest(void* arg); - static void HandleBinlogSyncRequest(void* arg); - static void HandleRemoveSlaveNodeRequest(void* arg); - - int DealMessage(); -}; - -#endif // INCLUDE_PIKA_REPL_SERVER_CONN_H_ diff --git a/tools/pika_migrate/include/pika_repl_server_thread.h b/tools/pika_migrate/include/pika_repl_server_thread.h deleted file mode 100644 index f322a1df7c..0000000000 --- a/tools/pika_migrate/include/pika_repl_server_thread.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_REPL_SERVER_THREAD_H_ -#define PIKA_REPL_SERVER_THREAD_H_ - -#include "pink/src/holy_thread.h" - -#include "include/pika_repl_server_conn.h" - -class PikaReplServerThread : public pink::HolyThread { - public: - PikaReplServerThread(const std::set& ips, int port, int cron_interval); - virtual ~PikaReplServerThread() = default; - - int ListenPort(); - - // for ProcessBinlogData use - uint64_t GetnPlusSerial() { - return serial_++; - } - - private: - class ReplServerConnFactory : public pink::ConnFactory { - public: - explicit ReplServerConnFactory(PikaReplServerThread* binlog_receiver) - : binlog_receiver_(binlog_receiver) { - } - - virtual std::shared_ptr NewPinkConn( - int connfd, - const std::string& ip_port, - pink::Thread* thread, - void* worker_specific_data, - pink::PinkEpoll* pink_epoll) const override { - return std::make_shared(connfd, ip_port, thread, binlog_receiver_, pink_epoll); - } - private: - PikaReplServerThread* binlog_receiver_; - }; - - class ReplServerHandle : public pink::ServerHandle { - public: - virtual void FdClosedHandle(int fd, const std::string& ip_port) const override; - }; - - ReplServerConnFactory conn_factory_; - ReplServerHandle handle_; - int port_; - uint64_t serial_; -}; - -#endif diff --git a/tools/pika_migrate/include/pika_rm.h b/tools/pika_migrate/include/pika_rm.h deleted file mode 100644 index cb20a8b250..0000000000 --- a/tools/pika_migrate/include/pika_rm.h +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_RM_H_ -#define PIKA_RM_H_ - -#include -#include -#include -#include -#include - -#include "slash/include/slash_status.h" - -#include "include/pika_binlog_reader.h" -#include "include/pika_repl_client.h" -#include "include/pika_repl_server.h" - -#define kBinlogSendPacketNum 40 -#define kBinlogSendBatchNum 100 - -// unit seconds -#define kSendKeepAliveTimeout (10 * 1000000) -#define kRecvKeepAliveTimeout (20 * 1000000) - -using slash::Status; - -struct SyncWinItem { - BinlogOffset offset_; - bool acked_; - bool operator==(const SyncWinItem& other) const { - if (offset_.filenum == other.offset_.filenum && offset_.offset == other.offset_.offset) { - return true; - } - return false; - } - explicit SyncWinItem(const BinlogOffset& offset) : offset_(offset), acked_(false) { - } - SyncWinItem(uint32_t filenum, uint64_t offset) : offset_(filenum, offset), acked_(false) { - } - std::string ToString() const { - return offset_.ToString() + " acked: " + std::to_string(acked_); - } -}; - -class SyncWindow { - public: - SyncWindow() { - } - void Push(const SyncWinItem& item); - bool Update(const SyncWinItem& start_item, const SyncWinItem& end_item, BinlogOffset* acked_offset); - int Remainings(); - std::string ToStringStatus() const { - if (win_.empty()) { - return " Size: " + std::to_string(win_.size()) + "\r\n"; - } else { - std::string res; - res += " Size: " + std::to_string(win_.size()) + "\r\n"; - res += (" Begin_item: " + win_.begin()->ToString() + "\r\n"); - res += (" End_item: " + win_.rbegin()->ToString() + "\r\n"); - return res; - } - } - private: - // TODO(whoiami) ring buffer maybe - std::deque win_; -}; - -// role master use -class SlaveNode : public RmNode { - public: - SlaveNode(const std::string& ip, int port, const std::string& table_name, uint32_t partition_id, int session_id); - ~SlaveNode(); - void Lock() { - slave_mu.Lock(); - } - void Unlock() { - slave_mu.Unlock(); - } - SlaveState slave_state; - - BinlogSyncState b_state; - SyncWindow sync_win; - BinlogOffset sent_offset; - BinlogOffset acked_offset; - - std::string ToStringStatus(); - - std::shared_ptr binlog_reader; - Status InitBinlogFileReader(const std::shared_ptr& binlog, const BinlogOffset& offset); - void ReleaseBinlogFileReader(); - - slash::Mutex slave_mu; -}; - -class SyncPartition { - public: - SyncPartition(const std::string& table_name, uint32_t partition_id); - virtual ~SyncPartition() = default; - - PartitionInfo& SyncPartitionInfo() { - return partition_info_; - } - protected: - // std::shared_ptr binlog_; - PartitionInfo partition_info_; -}; - -class SyncMasterPartition : public SyncPartition { - public: - SyncMasterPartition(const std::string& table_name, uint32_t partition_id); - Status AddSlaveNode(const std::string& ip, int port, int session_id); - Status RemoveSlaveNode(const std::string& ip, int port); - - Status ActivateSlaveBinlogSync(const std::string& ip, int port, const std::shared_ptr binlog, const BinlogOffset& offset); - Status ActivateSlaveDbSync(const std::string& ip, int port); - - Status SyncBinlogToWq(const std::string& ip, int port); - Status UpdateSlaveBinlogAckInfo(const std::string& ip, int port, const BinlogOffset& start, const BinlogOffset& end); - Status GetSlaveSyncBinlogInfo(const std::string& ip, int port, BinlogOffset* sent_offset, BinlogOffset* acked_offset); - Status GetSlaveState(const std::string& ip, int port, SlaveState* const slave_state); - - Status SetLastSendTime(const std::string& ip, int port, uint64_t time); - Status GetLastSendTime(const std::string& ip, int port, uint64_t* time); - - Status SetLastRecvTime(const std::string& ip, int port, uint64_t time); - Status GetLastRecvTime(const std::string& ip, int port, uint64_t* time); - - Status GetSafetyPurgeBinlog(std::string* safety_purge); - bool BinlogCloudPurge(uint32_t index); - - Status WakeUpSlaveBinlogSync(); - Status CheckSyncTimeout(uint64_t now); - - int GetNumberOfSlaveNode(); - bool CheckSlaveNodeExist(const std::string& ip, int port); - Status GetSlaveNodeSession(const std::string& ip, int port, int32_t* session); - - void GetValidSlaveNames(std::vector* slavenames); - // display use - Status GetInfo(std::string* info); - // debug use - std::string ToStringStatus(); - - int32_t GenSessionId(); - bool CheckSessionId(const std::string& ip, int port, - const std::string& table_name, - uint64_t partition_id, int session_id); - - private: - bool CheckReadBinlogFromCache(); - // inovker need to hold partition_mu_ - void CleanMasterNode(); - void CleanSlaveNode(); - // invoker need to hold slave_mu_ - Status ReadCachedBinlogToWq(const std::shared_ptr& slave_ptr); - Status ReadBinlogFileToWq(const std::shared_ptr& slave_ptr); - // inovker need to hold partition_mu_ - Status GetSlaveNode(const std::string& ip, int port, std::shared_ptr* slave_node); - - slash::Mutex partition_mu_; - std::vector> slaves_; - - slash::Mutex session_mu_; - int32_t session_id_; - - // BinlogCacheWindow win_; -}; - -class SyncSlavePartition : public SyncPartition { - public: - SyncSlavePartition(const std::string& table_name, uint32_t partition_id); - - void Activate(const RmNode& master, const ReplState& repl_state); - void Deactivate(); - - void SetLastRecvTime(uint64_t time); - uint64_t LastRecvTime(); - - void SetReplState(const ReplState& repl_state); - ReplState State(); - - Status CheckSyncTimeout(uint64_t now); - - // For display - Status GetInfo(std::string* info); - // For debug - std::string ToStringStatus(); - - const std::string& MasterIp() { - return m_info_.Ip(); - } - int MasterPort() { - return m_info_.Port(); - } - void SetMasterSessionId(int32_t session_id) { - m_info_.SetSessionId(session_id); - } - int32_t MasterSessionId() { - return m_info_.SessionId(); - } - void SetLocalIp(const std::string& local_ip) { - local_ip_ = local_ip; - } - std::string LocalIp() { - return local_ip_; - } - - private: - slash::Mutex partition_mu_; - RmNode m_info_; - ReplState repl_state_; - std::string local_ip_; -}; - -class BinlogReaderManager { - public: - ~BinlogReaderManager(); - Status FetchBinlogReader(const RmNode& rm_node, std::shared_ptr* reader); - Status ReleaseBinlogReader(const RmNode& rm_node); - private: - slash::Mutex reader_mu_; - std::unordered_map, hash_rm_node> occupied_; - std::vector> vacant_; -}; - -class PikaReplicaManager { - public: - PikaReplicaManager(); - ~PikaReplicaManager(); - - void Start(); - void Stop(); - - Status AddSyncPartitionSanityCheck(const std::set& p_infos); - Status AddSyncPartition(const std::set& p_infos); - Status RemoveSyncPartitionSanityCheck(const std::set& p_infos); - Status RemoveSyncPartition(const std::set& p_infos); - Status SelectLocalIp(const std::string& remote_ip, - const int remote_port, - std::string* const local_ip); - Status ActivateSyncSlavePartition(const RmNode& node, const ReplState& repl_state); - Status UpdateSyncSlavePartitionSessionId(const PartitionInfo& p_info, int32_t session_id); - Status DeactivateSyncSlavePartition(const PartitionInfo& p_info); - Status SetSlaveReplState(const PartitionInfo& p_info, const ReplState& repl_state); - Status GetSlaveReplState(const PartitionInfo& p_info, ReplState* repl_state); - - // For Pika Repl Client Thread - Status SendMetaSyncRequest(); - Status SendRemoveSlaveNodeRequest(const std::string& table, uint32_t partition_id); - Status SendPartitionTrySyncRequest(const std::string& table_name, size_t partition_id); - Status SendPartitionDBSyncRequest(const std::string& table_name, size_t partition_id); - Status SendPartitionBinlogSyncAckRequest(const std::string& table, uint32_t partition_id, - const BinlogOffset& ack_start, const BinlogOffset& ack_end, - bool is_first_send = false); - Status CloseReplClientConn(const std::string& ip, int32_t port); - - // For Pika Repl Server Thread - Status SendSlaveBinlogChipsRequest(const std::string& ip, int port, const std::vector& tasks); - - // For SyncMasterPartition - std::shared_ptr GetSyncMasterPartitionByName(const PartitionInfo& p_info); - Status GetSafetyPurgeBinlogFromSMP(const std::string& table_name, - uint32_t partition_id, std::string* safety_purge); - bool BinlogCloudPurgeFromSMP(const std::string& table_name, - uint32_t partition_id, uint32_t index); - - // For SyncSlavePartition - std::shared_ptr GetSyncSlavePartitionByName(const PartitionInfo& p_info); - - - - Status RunSyncSlavePartitionStateMachine(); - - Status SetMasterLastRecvTime(const RmNode& slave, uint64_t time); - Status SetSlaveLastRecvTime(const RmNode& slave, uint64_t time); - - Status CheckSyncTimeout(uint64_t now); - - // To check partition info - // For pkcluster info command - Status GetPartitionInfo( - const std::string& table, uint32_t partition_id, std::string* info); - - void FindCompleteReplica(std::vector* replica); - void FindCommonMaster(std::string* master); - - Status CheckPartitionRole( - const std::string& table, uint32_t partition_id, int* role); - - void RmStatus(std::string* debug_info); - - // following funcs invoked by master partition only - - Status AddPartitionSlave(const RmNode& slave); - Status RemovePartitionSlave(const RmNode& slave); - bool CheckPartitionSlaveExist(const RmNode& slave); - Status GetPartitionSlaveSession(const RmNode& slave, int32_t* session); - - Status LostConnection(const std::string& ip, int port); - - Status ActivateBinlogSync(const RmNode& slave, const BinlogOffset& offset); - Status ActivateDbSync(const RmNode& slave); - - // Update binlog win and try to send next binlog - Status UpdateSyncBinlogStatus(const RmNode& slave, const BinlogOffset& offset_start, const BinlogOffset& offset_end); - Status GetSyncBinlogStatus(const RmNode& slave, BinlogOffset* sent_boffset, BinlogOffset* acked_boffset); - Status GetSyncMasterPartitionSlaveState(const RmNode& slave, SlaveState* const slave_state); - - Status WakeUpBinlogSync(); - - // Session Id - int32_t GenPartitionSessionId(const std::string& table_name, uint32_t partition_id); - int32_t GetSlavePartitionSessionId(const std::string& table_name, uint32_t partition_id); - bool CheckSlavePartitionSessionId(const std::string& table_name, uint32_t partition_id, - int session_id); - bool CheckMasterPartitionSessionId(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int session_id); - - // write_queue related - void ProduceWriteQueue(const std::string& ip, int port, const std::vector& tasks); - int ConsumeWriteQueue(); - void DropItemInWriteQueue(const std::string& ip, int port); - - // Schedule Task - void ScheduleReplServerBGTask(pink::TaskFunc func, void* arg); - void ScheduleReplClientBGTask(pink::TaskFunc func, void* arg); - void ScheduleWriteBinlogTask(const std::string& table_partition, - const std::shared_ptr res, - std::shared_ptr conn, void* res_private_data); - void ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id); - - void ReplServerRemoveClientConn(int fd); - void ReplServerUpdateClientConnMap(const std::string& ip_port, int fd); - - BinlogReaderManager binlog_reader_mgr; - - private: - void InitPartition(); - - pthread_rwlock_t partitions_rw_; - std::unordered_map, hash_partition_info> sync_master_partitions_; - std::unordered_map, hash_partition_info> sync_slave_partitions_; - - slash::Mutex write_queue_mu_; - // every host owns a queue - std::unordered_map> write_queues_; // ip+port, queue - - PikaReplClient* pika_repl_client_; - PikaReplServer* pika_repl_server_; - int last_meta_sync_timestamp_; -}; - -#endif // PIKA_RM_H diff --git a/tools/pika_migrate/include/pika_rsync_service.h b/tools/pika_migrate/include/pika_rsync_service.h deleted file mode 100644 index f728f52b57..0000000000 --- a/tools/pika_migrate/include/pika_rsync_service.h +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_RSYNC_SERVICE_H_ -#define PIKA_RSYNC_SERVICE_H_ - -#include "iostream" - -class PikaRsyncService { - public: - PikaRsyncService(const std::string& raw_path, - const int port); - ~PikaRsyncService(); - int StartRsync(); - bool CheckRsyncAlive(); - int ListenPort(); - - private: - int CreateSecretFile(); - std::string raw_path_; - std::string rsync_path_; - std::string pid_path_; - int port_; -}; - -#endif diff --git a/tools/pika_migrate/include/pika_sender.h b/tools/pika_migrate/include/pika_sender.h deleted file mode 100644 index 1cdb38f34b..0000000000 --- a/tools/pika_migrate/include/pika_sender.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef PIKA_SENDER_H_ -#define PIKA_SENDER_H_ - -#include -#include -#include -#include -#include - -#include "pink/include/bg_thread.h" -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" - -class PikaSender : public pink::Thread { -public: - PikaSender(std::string ip, int64_t port, std::string password); - virtual ~PikaSender(); - void LoadKey(const std::string &cmd); - void Stop(); - - int64_t elements() { return elements_; } - - void SendCommand(std::string &command, const std::string &key); - int QueueSize(); - void ConnectRedis(); - -private: - pink::PinkCli *cli_; - slash::CondVar signal_; - slash::Mutex keys_mutex_; - std::queue keys_queue_; - std::string ip_; - int port_; - std::string password_; - std::atomic should_exit_; - int64_t elements_; - - virtual void *ThreadMain(); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_server.h b/tools/pika_migrate/include/pika_server.h deleted file mode 100644 index 49085088b3..0000000000 --- a/tools/pika_migrate/include/pika_server.h +++ /dev/null @@ -1,425 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_SERVER_H_ -#define PIKA_SERVER_H_ - -#include -#include - -#include "slash/include/slash_mutex.h" -#include "slash/include/slash_status.h" -#include "slash/include/slash_string.h" -#include "pink/include/bg_thread.h" -#include "pink/include/thread_pool.h" -#include "pink/include/pink_pubsub.h" -#include "blackwidow/blackwidow.h" -#include "blackwidow/backupable.h" - -#include "include/redis_sender.h" -#include "include/pika_conf.h" -#include "include/pika_table.h" -#include "include/pika_binlog.h" -#include "include/pika_define.h" -#include "include/pika_monitor_thread.h" -#include "include/pika_rsync_service.h" -#include "include/pika_dispatch_thread.h" -#include "include/pika_repl_client.h" -#include "include/pika_repl_server.h" -#include "include/pika_auxiliary_thread.h" - -using slash::Status; -using slash::Slice; - -struct StatisticData { - StatisticData() - : accumulative_connections(0), - thread_querynum(0), - last_thread_querynum(0), - last_sec_thread_querynum(0), - last_time_us(0) { - CmdTable* cmds = new CmdTable(); - cmds->reserve(300); - InitCmdTable(cmds); - CmdTable::const_iterator it = cmds->begin(); - for (; it != cmds->end(); ++it) { - std::string tmp = it->first; - exec_count_table[slash::StringToUpper(tmp)].store(0); - } - DestoryCmdTable(cmds); - delete cmds; - } - - std::atomic accumulative_connections; - std::unordered_map> exec_count_table; - std::atomic thread_querynum; - std::atomic last_thread_querynum; - std::atomic last_sec_thread_querynum; - std::atomic last_time_us; -}; -/* -static std::set MultiKvCommands {kCmdNameDel, - kCmdNameMget, kCmdNameKeys, kCmdNameMset, - kCmdNameMsetnx, kCmdNameExists, kCmdNameScan, - kCmdNameScanx, kCmdNamePKScanRange, kCmdNamePKRScanRange, - kCmdNameRPopLPush, kCmdNameZUnionstore, kCmdNameZInterstore, - kCmdNameSUnion, kCmdNameSUnionstore, kCmdNameSInter, - kCmdNameSInterstore, kCmdNameSDiff, kCmdNameSDiffstore, - kCmdNameSMove, kCmdNameBitOp, kCmdNamePfAdd, - kCmdNamePfCount, kCmdNamePfMerge, kCmdNameGeoAdd, - kCmdNameGeoPos, kCmdNameGeoDist, kCmdNameGeoHash, - kCmdNameGeoRadius, kCmdNameGeoRadiusByMember}; -*/ - -static std::set ShardingModeNotSupportCommands { - kCmdNameMsetnx, kCmdNameScan, kCmdNameKeys, - kCmdNameScanx, kCmdNamePKScanRange, kCmdNamePKRScanRange, - kCmdNameRPopLPush, kCmdNameZUnionstore, kCmdNameZInterstore, - kCmdNameSUnion, kCmdNameSUnionstore, kCmdNameSInter, - kCmdNameSInterstore, kCmdNameSDiff, kCmdNameSDiffstore, - kCmdNameSMove, kCmdNameBitOp, kCmdNamePfAdd, - kCmdNamePfCount, kCmdNamePfMerge, kCmdNameGeoAdd, - kCmdNameGeoPos, kCmdNameGeoDist, kCmdNameGeoHash, - kCmdNameGeoRadius, kCmdNameGeoRadiusByMember, kCmdNamePKPatternMatchDel}; - - -extern PikaConf *g_pika_conf; - -enum TaskType { - kCompactAll, - kCompactStrings, - kCompactHashes, - kCompactSets, - kCompactZSets, - kCompactList, - kResetReplState, - kPurgeLog, - kStartKeyScan, - kStopKeyScan, - kBgSave, -}; - -class PikaServer { - public: - PikaServer(); - ~PikaServer(); - - /* - * Server init info - */ - bool ServerInit(); - - void Start(); - void Exit(); - - std::string host(); - int port(); - time_t start_time_s(); - std::string master_ip(); - int master_port(); - int role(); - bool readonly(const std::string& table, const std::string& key); - int repl_state(); - std::string repl_state_str(); - bool force_full_sync(); - void SetForceFullSync(bool v); - void SetDispatchQueueLimit(int queue_limit); - blackwidow::BlackwidowOptions bw_options(); - - /* - * Table use - */ - void InitTableStruct(); - std::shared_ptr GetTable(const std::string& table_name); - std::set GetTablePartitionIds(const std::string& table_name); - bool IsBgSaving(); - bool IsKeyScaning(); - bool IsCompacting(); - bool IsTableExist(const std::string& table_name); - bool IsTablePartitionExist(const std::string& table_name, uint32_t partition_id); - bool IsCommandSupport(const std::string& command); - bool IsTableBinlogIoError(const std::string& table_name); - Status DoSameThingSpecificTable(const TaskType& type, const std::set& tables = {}); - - /* - * Partition use - */ - void PreparePartitionTrySync(); - void PartitionSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys); - void PartitionSetSmallCompactionThreshold(uint32_t small_compaction_threshold); - bool GetTablePartitionBinlogOffset(const std::string& table_name, - uint32_t partition_id, - BinlogOffset* const boffset); - std::shared_ptr GetPartitionByDbName(const std::string& db_name); - std::shared_ptr GetTablePartitionById( - const std::string& table_name, - uint32_t partition_id); - std::shared_ptr GetTablePartitionByKey( - const std::string& table_name, - const std::string& key); - Status DoSameThingEveryPartition(const TaskType& type); - - /* - * Master use - */ - void BecomeMaster(); - void DeleteSlave(int fd); //conn fd - int32_t CountSyncSlaves(); - int32_t GetSlaveListString(std::string& slave_list_str); - int32_t GetShardingSlaveListString(std::string& slave_list_str); - bool TryAddSlave(const std::string& ip, int64_t port, int fd, - const std::vector& table_structs); - slash::Mutex slave_mutex_; // protect slaves_; - std::vector slaves_; - - - /* - * Slave use - */ - void SyncError(); - void RemoveMaster(); - bool SetMaster(std::string& master_ip, int master_port); - - /* - * Slave State Machine - */ - bool ShouldMetaSync(); - void FinishMetaSync(); - bool MetaSyncDone(); - void ResetMetaSyncStatus(); - bool AllPartitionConnectSuccess(); - bool LoopPartitionStateMachine(); - void SetLoopPartitionStateMachine(bool need_loop); - - /* - * ThreadPool Process Task - */ - void Schedule(pink::TaskFunc func, void* arg); - - /* - * BGSave used - */ - void BGSaveTaskSchedule(pink::TaskFunc func, void* arg); - - /* - * PurgeLog used - */ - void PurgelogsTaskSchedule(pink::TaskFunc func, void* arg); - - /* - * Flushall & Flushdb used - */ - void PurgeDir(const std::string& path); - void PurgeDirTaskSchedule(void (*function)(void*), void* arg); - - /* - * DBSync used - */ - void DBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id); - void TryDBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int32_t top); - void DbSyncSendFile(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id); - std::string DbSyncTaskIndex(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id); - - /* - * Keyscan used - */ - void KeyScanTaskSchedule(pink::TaskFunc func, void* arg); - - /* - * Client used - */ - void ClientKillAll(); - int ClientKill(const std::string &ip_port); - int64_t ClientList(std::vector *clients = nullptr); - - /* - * Monitor used - */ - bool HasMonitorClients(); - void AddMonitorMessage(const std::string &monitor_message); - void AddMonitorClient(std::shared_ptr client_ptr); - - /* - * Slowlog used - */ - void SlowlogTrim(); - void SlowlogReset(); - uint32_t SlowlogLen(); - void SlowlogObtain(int64_t number, std::vector* slowlogs); - void SlowlogPushEntry(const PikaCmdArgsType& argv, int32_t time, int64_t duration); - - /* - * Statistic used - */ - void ResetStat(); - uint64_t ServerQueryNum(); - uint64_t ServerCurrentQps(); - uint64_t accumulative_connections(); - void incr_accumulative_connections(); - void ResetLastSecQuerynum(); - void UpdateQueryNumAndExecCountTable(const std::string& command); - std::unordered_map ServerExecCountTable(); - - /* - * Slave to Master communication used - */ - int SendToPeer(); - void SignalAuxiliary(); - Status TriggerSendBinlogSync(); - - /* - * PubSub used - */ - int PubSubNumPat(); - int Publish(const std::string& channel, const std::string& msg); - int UnSubscribe(std::shared_ptr conn, - const std::vector& channels, - const bool pattern, - std::vector>* result); - void Subscribe(std::shared_ptr conn, - const std::vector& channels, - const bool pattern, - std::vector>* result); - void PubSubChannels(const std::string& pattern, - std::vector* result); - void PubSubNumSub(const std::vector& channels, - std::vector>* result); - - /* - * migrate used - */ - int SendRedisCommand(const std::string& command, const std::string& key); - void RetransmitData(const std::string& path); - - - friend class Cmd; - friend class InfoCmd; - friend class PkClusterAddSlotsCmd; - friend class PkClusterDelSlotsCmd; - friend class PikaReplClientConn; - friend class PkClusterInfoCmd; - - private: - /* - * TimingTask use - */ - void DoTimingTask(); - void AutoCompactRange(); - void AutoPurge(); - void AutoDeleteExpiredDump(); - void AutoKeepAliveRSync(); - - std::string host_; - int port_; - time_t start_time_s_; - - blackwidow::BlackwidowOptions bw_options_; - void InitBlackwidowOptions(); - - std::atomic exit_; - - /* - * Table used - */ - std::atomic slot_state_; - pthread_rwlock_t tables_rw_; - std::map> tables_; - - /* - * CronTask used - */ - bool have_scheduled_crontask_; - struct timeval last_check_compact_time_; - - /* - * Communicate with the client used - */ - int worker_num_; - pink::ThreadPool* pika_thread_pool_; - PikaDispatchThread* pika_dispatch_thread_; - - - /* - * Slave used - */ - std::string master_ip_; - int master_port_; - int repl_state_; - int role_; - bool loop_partition_state_machine_; - bool force_full_sync_; - pthread_rwlock_t state_protector_; //protect below, use for master-slave mode - - /* - * Bgsave used - */ - pink::BGThread bgsave_thread_; - - /* - * Purgelogs use - */ - pink::BGThread purge_thread_; - - /* - * DBSync used - */ - slash::Mutex db_sync_protector_; - std::unordered_set db_sync_slaves_; - - /* - * Keyscan used - */ - pink::BGThread key_scan_thread_; - - /* - * Monitor used - */ - PikaMonitorThread* pika_monitor_thread_; - - /* - * Rsync used - */ - PikaRsyncService* pika_rsync_service_; - - /* - * Pubsub used - */ - pink::PubSubThread* pika_pubsub_thread_; - - /* - * Communication used - */ - PikaAuxiliaryThread* pika_auxiliary_thread_; - - /* - * - */ - std::vector redis_senders_; - - /* - * Slowlog used - */ - uint64_t slowlog_entry_id_; - pthread_rwlock_t slowlog_protector_; - std::list slowlog_list_; - - /* - * Statistic used - */ - StatisticData statistic_data_; - - PikaServer(PikaServer &ps); - void operator =(const PikaServer &ps); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_set.h b/tools/pika_migrate/include/pika_set.h deleted file mode 100644 index fe00850751..0000000000 --- a/tools/pika_migrate/include/pika_set.h +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_SET_H_ -#define PIKA_SET_H_ - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -/* - * set - */ -class SAddCmd : public Cmd { - public: - SAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SAddCmd(*this); - } - private: - std::string key_; - std::vector members_; - virtual void DoInitial() override; -}; - -class SPopCmd : public Cmd { - public: - SPopCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SPopCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class SCardCmd : public Cmd { - public: - SCardCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SCardCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class SMembersCmd : public Cmd { - public: - SMembersCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SMembersCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class SScanCmd : public Cmd { - public: - SScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SScanCmd(*this); - } - private: - std::string key_, pattern_; - int64_t cursor_, count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class SRemCmd : public Cmd { - public: - SRemCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SRemCmd(*this); - } - private: - std::string key_; - std::vector members_; - virtual void DoInitial() override; -}; - -class SUnionCmd : public Cmd { - public: - SUnionCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SUnionCmd(*this); - } - private: - std::vector keys_; - virtual void DoInitial() override; -}; - -class SUnionstoreCmd : public Cmd { - public: - SUnionstoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SUnionstoreCmd(*this); - } - private: - std::string dest_key_; - std::vector keys_; - virtual void DoInitial() override; -}; - -class SInterCmd : public Cmd { - public: - SInterCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SInterCmd(*this); - } - private: - std::vector keys_; - virtual void DoInitial() override; -}; - -class SInterstoreCmd : public Cmd { - public: - SInterstoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SInterstoreCmd(*this); - } - private: - std::string dest_key_; - std::vector keys_; - virtual void DoInitial() override; -}; - -class SIsmemberCmd : public Cmd { - public: - SIsmemberCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SIsmemberCmd(*this); - } - private: - std::string key_, member_; - virtual void DoInitial() override; -}; - -class SDiffCmd : public Cmd { - public: - SDiffCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SDiffCmd(*this); - } - private: - std::vector keys_; - virtual void DoInitial() override; -}; - -class SDiffstoreCmd : public Cmd { - public: - SDiffstoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SDiffstoreCmd(*this); - } - private: - std::string dest_key_; - std::vector keys_; - virtual void DoInitial() override; -}; - -class SMoveCmd : public Cmd { - public: - SMoveCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SMoveCmd(*this); - } - private: - std::string src_key_, dest_key_, member_; - virtual void DoInitial() override; -}; - -class SRandmemberCmd : public Cmd { - public: - SRandmemberCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), count_(1) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SRandmemberCmd(*this); - } - private: - std::string key_; - int64_t count_; - bool reply_arr; - virtual void DoInitial() override; - virtual void Clear() { - count_ = 1; - reply_arr = false; - } -}; - -#endif diff --git a/tools/pika_migrate/include/pika_slaveping_thread.h b/tools/pika_migrate/include/pika_slaveping_thread.h deleted file mode 100644 index bc8e6a7ef9..0000000000 --- a/tools/pika_migrate/include/pika_slaveping_thread.h +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_SLAVEPING_THREAD_H_ -#define PIKA_SLAVEPING_THREAD_H_ - -#include - -#include "slash/include/slash_status.h" -#include "pink/include/pink_cli.h" -#include "pink/include/pink_thread.h" - -using slash::Status; - -class PikaSlavepingThread : public pink::Thread { - public: - PikaSlavepingThread(int64_t sid) - : sid_(sid), is_first_send_(true) { - cli_ = pink::NewPbCli(); - cli_->set_connect_timeout(1500); - set_thread_name("SlavePingThread"); - }; - virtual ~PikaSlavepingThread() { - StopThread(); - delete cli_; - LOG(INFO) << "SlavepingThread " << thread_id() << " exit!!!"; - }; - - Status Send(); - Status RecvProc(); - - private: - int64_t sid_; - bool is_first_send_; - - int sockfd_; - pink::PinkCli *cli_; - - virtual void* ThreadMain(); -}; - -#endif diff --git a/tools/pika_migrate/include/pika_slot.h b/tools/pika_migrate/include/pika_slot.h deleted file mode 100644 index 052f87269b..0000000000 --- a/tools/pika_migrate/include/pika_slot.h +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_SLOT_H_ -#define PIKA_SLOT_H_ - -#include "include/pika_command.h" - -class SlotsInfoCmd : public Cmd { - public: - SlotsInfoCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsInfoCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsHashKeyCmd : public Cmd { - public: - SlotsHashKeyCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsHashKeyCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtSlotAsyncCmd : public Cmd { - public: - SlotsMgrtSlotAsyncCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtSlotAsyncCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtTagSlotAsyncCmd : public Cmd { - public: - SlotsMgrtTagSlotAsyncCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), dest_port_(0), slot_num_(-1) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtTagSlotAsyncCmd(*this); - } - private: - virtual void DoInitial() override; - std::string dest_ip_; - int64_t dest_port_; - int64_t slot_num_; - virtual void Clear() { - dest_ip_.clear(); - dest_port_ = 0; - slot_num_ = -1; - } -}; - -class SlotsScanCmd : public Cmd { - public: - SlotsScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsScanCmd(*this); - } - private: - int64_t cursor_; - uint32_t slotnum_; - std::string pattern_; - int64_t count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class SlotsDelCmd : public Cmd { - public: - SlotsDelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsDelCmd(*this); - } - private: - std::vector slots_; - virtual void DoInitial() override; - virtual void Clear() { - slots_.clear(); - } -}; - -class SlotsMgrtExecWrapperCmd : public Cmd { - public: - SlotsMgrtExecWrapperCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtExecWrapperCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; - virtual void Clear() { - key_.clear(); - } -}; - -class SlotsMgrtAsyncStatusCmd : public Cmd { - public: - SlotsMgrtAsyncStatusCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtAsyncStatusCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtAsyncCancelCmd : public Cmd { - public: - SlotsMgrtAsyncCancelCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtAsyncCancelCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtSlotCmd : public Cmd { - public: - SlotsMgrtSlotCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtSlotCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtTagSlotCmd : public Cmd { - public: - SlotsMgrtTagSlotCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtTagSlotCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtOneCmd : public Cmd { - public: - SlotsMgrtOneCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtOneCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class SlotsMgrtTagOneCmd : public Cmd { - public: - SlotsMgrtTagOneCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new SlotsMgrtTagOneCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -#endif // PIKA_SLOT_H_ diff --git a/tools/pika_migrate/include/pika_table.h b/tools/pika_migrate/include/pika_table.h deleted file mode 100644 index adf6b62b6c..0000000000 --- a/tools/pika_migrate/include/pika_table.h +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_TABLE_H_ -#define PIKA_TABLE_H_ - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -class Table : public std::enable_shared_from_this
{ - public: - Table(const std::string& table_name, - uint32_t partition_num, - const std::string& db_path, - const std::string& log_path); - virtual ~Table(); - - friend class Cmd; - friend class InfoCmd; - friend class PkClusterInfoCmd; - friend class PikaServer; - - std::string GetTableName(); - void BgSaveTable(); - void CompactTable(const blackwidow::DataType& type); - bool FlushPartitionDB(); - bool FlushPartitionSubDB(const std::string& db_name); - bool IsBinlogIoError(); - uint32_t PartitionNum(); - - // Dynamic change partition - Status AddPartitions(const std::set& partition_ids); - Status RemovePartitions(const std::set& partition_ids); - - // KeyScan use; - void KeyScan(); - bool IsKeyScaning(); - void RunKeyScan(); - void StopKeyScan(); - void ScanDatabase(const blackwidow::DataType& type); - KeyScanInfo GetKeyScanInfo(); - Status GetPartitionsKeyScanInfo(std::map* infos); - - // Compact use; - void Compact(const blackwidow::DataType& type); - - void LeaveAllPartition(); - std::set GetPartitionIds(); - std::shared_ptr GetPartitionById(uint32_t partition_id); - std::shared_ptr GetPartitionByKey(const std::string& key); - - private: - std::string table_name_; - uint32_t partition_num_; - std::string db_path_; - std::string log_path_; - - // lock order - // partitions_rw_ > key_scan_protector_ - - pthread_rwlock_t partitions_rw_; - std::map> partitions_; - - /* - * KeyScan use - */ - static void DoKeyScan(void *arg); - void InitKeyScan(); - slash::Mutex key_scan_protector_; - KeyScanInfo key_scan_info_; - - /* - * No allowed copy and copy assign - */ - Table(const Table&); - void operator=(const Table&); -}; - -struct BgTaskArg { - std::shared_ptr
table; - std::shared_ptr partition; -}; - - -#endif diff --git a/tools/pika_migrate/include/pika_version.h b/tools/pika_migrate/include/pika_version.h deleted file mode 100644 index c0c6a2b617..0000000000 --- a/tools/pika_migrate/include/pika_version.h +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef INCLUDE_PIKA_VERSION_H_ -#define INCLUDE_PIKA_VERSION_H_ - -#define PIKA_MAJOR 3 -#define PIKA_MINOR 2 -#define PIKA_PATCH 7 - -#endif // INCLUDE_PIKA_VERSION_H_ diff --git a/tools/pika_migrate/include/pika_zset.h b/tools/pika_migrate/include/pika_zset.h deleted file mode 100644 index d4ab4ca6ea..0000000000 --- a/tools/pika_migrate/include/pika_zset.h +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#ifndef PIKA_ZSET_H_ -#define PIKA_ZSET_H_ - -#include "blackwidow/blackwidow.h" - -#include "include/pika_command.h" -#include "include/pika_partition.h" - -/* - * zset - */ -class ZAddCmd : public Cmd { - public: - ZAddCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZAddCmd(*this); - } - private: - std::string key_; - std::vector score_members; - virtual void DoInitial() override; -}; - -class ZCardCmd : public Cmd { - public: - ZCardCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZCardCmd(*this); - } - private: - std::string key_; - virtual void DoInitial() override; -}; - -class ZScanCmd : public Cmd { - public: - ZScanCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), pattern_("*"), count_(10) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZScanCmd(*this); - } - private: - std::string key_, pattern_; - int64_t cursor_, count_; - virtual void DoInitial() override; - virtual void Clear() { - pattern_ = "*"; - count_ = 10; - } -}; - -class ZIncrbyCmd : public Cmd { - public: - ZIncrbyCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZIncrbyCmd(*this); - } - private: - std::string key_, member_; - double by_; - virtual void DoInitial() override; -}; - -class ZsetRangeParentCmd : public Cmd { - public: - ZsetRangeParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), is_ws_(false) {} - protected: - std::string key_; - int64_t start_, stop_; - bool is_ws_; - virtual void DoInitial() override; - virtual void Clear() { - is_ws_ = false; - } -}; - -class ZRangeCmd : public ZsetRangeParentCmd { - public: - ZRangeCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangeParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRangeCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZRevrangeCmd : public ZsetRangeParentCmd { - public: - ZRevrangeCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangeParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrangeCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZsetRangebyscoreParentCmd : public Cmd { - public: - ZsetRangebyscoreParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true), with_scores_(false), offset_(0), count_(-1) {} - protected: - std::string key_; - double min_score_, max_score_; - bool left_close_, right_close_, with_scores_; - int64_t offset_, count_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - with_scores_ = false; - offset_ = 0; - count_ = -1; - } -}; - -class ZRangebyscoreCmd : public ZsetRangebyscoreParentCmd { - public: - ZRangebyscoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangebyscoreParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRangebyscoreCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZRevrangebyscoreCmd : public ZsetRangebyscoreParentCmd { - public: - ZRevrangebyscoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangebyscoreParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrangebyscoreCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZCountCmd : public Cmd { - public: - ZCountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZCountCmd(*this); - } - private: - std::string key_; - double min_score_, max_score_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = true; - right_close_ = true; - } -}; - -class ZRemCmd : public Cmd { - public: - ZRemCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemCmd(*this); - } - private: - std::string key_; - std::vector members_; - virtual void DoInitial() override; -}; - -class ZsetUIstoreParentCmd : public Cmd { - public: - ZsetUIstoreParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), aggregate_(blackwidow::SUM) {} - protected: - std::string dest_key_; - int64_t num_keys_; - blackwidow::AGGREGATE aggregate_; - std::vector keys_; - std::vector weights_; - virtual void DoInitial() override; - virtual void Clear() { - aggregate_ = blackwidow::SUM; - } -}; - -class ZUnionstoreCmd : public ZsetUIstoreParentCmd { - public: - ZUnionstoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetUIstoreParentCmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZUnionstoreCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZInterstoreCmd : public ZsetUIstoreParentCmd { - public: - ZInterstoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetUIstoreParentCmd(name, arity, flag) {} - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZInterstoreCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZsetRankParentCmd : public Cmd { - public: - ZsetRankParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - protected: - std::string key_, member_; - virtual void DoInitial() override; -}; - -class ZRankCmd : public ZsetRankParentCmd { - public: - ZRankCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRankParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRankCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZRevrankCmd : public ZsetRankParentCmd { - public: - ZRevrankCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRankParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrankCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZScoreCmd : public ZsetRankParentCmd { - public: - ZScoreCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRankParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZScoreCmd(*this); - } - private: - std::string key_, member_; - virtual void DoInitial() override; -}; - - -class ZsetRangebylexParentCmd : public Cmd { - public: - ZsetRangebylexParentCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true), offset_(0), count_(-1) {} - protected: - std::string key_, min_member_, max_member_; - bool left_close_, right_close_; - int64_t offset_, count_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - offset_ = 0; - count_ = -1; - } -}; - -class ZRangebylexCmd : public ZsetRangebylexParentCmd { - public: - ZRangebylexCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangebylexParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRangebylexCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZRevrangebylexCmd : public ZsetRangebylexParentCmd { - public: - ZRevrangebylexCmd(const std::string& name, int arity, uint16_t flag) - : ZsetRangebylexParentCmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRevrangebylexCmd(*this); - } - private: - virtual void DoInitial() override; -}; - -class ZLexcountCmd : public Cmd { - public: - ZLexcountCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZLexcountCmd(*this); - } - private: - std::string key_, min_member_, max_member_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - } -}; - -class ZRemrangebyrankCmd : public Cmd { - public: - ZRemrangebyrankCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemrangebyrankCmd(*this); - } - private: - std::string key_; - int64_t start_rank_, stop_rank_; - virtual void DoInitial() override; -}; - -class ZRemrangebyscoreCmd : public Cmd { - public: - ZRemrangebyscoreCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemrangebyscoreCmd(*this); - } - private: - std::string key_; - double min_score_, max_score_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - } -}; - -class ZRemrangebylexCmd : public Cmd { - public: - ZRemrangebylexCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag), left_close_(true), right_close_(true) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZRemrangebylexCmd(*this); - } - private: - std::string key_; - std::string min_member_, max_member_; - bool left_close_, right_close_; - virtual void DoInitial() override; - virtual void Clear() { - left_close_ = right_close_ = true; - } -}; - -class ZPopmaxCmd : public Cmd { - public: - ZPopmaxCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.emplace_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZPopmaxCmd(*this); - } - private: - virtual void DoInitial() override; - std::string key_; - int64_t count_; -}; - -class ZPopminCmd : public Cmd { - public: - ZPopminCmd(const std::string& name, int arity, uint16_t flag) - : Cmd(name, arity, flag) {} - virtual std::vector current_key() const { - std::vector res; - res.push_back(key_); - return res; - } - virtual void Do(std::shared_ptr partition = nullptr); - virtual Cmd* Clone() override { - return new ZPopminCmd(*this); - } - private: - virtual void DoInitial() override; - std::string key_; - int64_t count_; -}; - -#endif diff --git a/tools/pika_migrate/include/redis_sender.h b/tools/pika_migrate/include/redis_sender.h deleted file mode 100644 index aa905e3d68..0000000000 --- a/tools/pika_migrate/include/redis_sender.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef REDIS_SENDER_H_ -#define REDIS_SENDER_H_ - -#include -#include -#include -#include -#include - -#include "pink/include/bg_thread.h" -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" - -class RedisSender : public pink::Thread { - public: - RedisSender(int id, std::string ip, int64_t port, std::string password); - virtual ~RedisSender(); - void Stop(void); - int64_t elements() { - return elements_; - } - - void SendRedisCommand(const std::string &command); - - private: - int SendCommand(std::string &command); - void ConnectRedis(); - - private: - int id_; - pink::PinkCli *cli_; - slash::CondVar rsignal_; - slash::CondVar wsignal_; - slash::Mutex commands_mutex_; - std::queue commands_queue_; - std::string ip_; - int port_; - std::string password_; - bool should_exit_; - int32_t cnt_; - int64_t elements_; - std::atomic last_write_time_; - - virtual void *ThreadMain(); -}; - -#endif diff --git a/tools/pika_migrate/pikatests.sh b/tools/pika_migrate/pikatests.sh deleted file mode 100755 index bf17cf73e5..0000000000 --- a/tools/pika_migrate/pikatests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -rm -rf ./log -rm -rf .db -cp output/bin/pika src/redis-server -cp output/conf/pika.conf tests/assets/default.conf - -tclsh tests/test_helper.tcl --clients 1 --single unit/$1 -rm src/redis-server -rm -rf ./log -rm -rf ./db diff --git a/tools/pika_migrate/src/build_version.cc.in b/tools/pika_migrate/src/build_version.cc.in deleted file mode 100644 index de52eeaeba..0000000000 --- a/tools/pika_migrate/src/build_version.cc.in +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/build_version.h" -const char* pika_build_git_sha = - "pika_git_sha:@@GIT_SHA@@"; -const char* pika_build_git_date = "pika_build_git_date:@@GIT_DATE_TIME@@"; -const char* pika_build_compile_date = __DATE__; diff --git a/tools/pika_migrate/src/migrator_thread.cc b/tools/pika_migrate/src/migrator_thread.cc deleted file mode 100644 index a7b7122c51..0000000000 --- a/tools/pika_migrate/src/migrator_thread.cc +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/migrator_thread.h" - -#include - -#include -#include -#include - -#include "blackwidow/blackwidow.h" -#include "src/redis_strings.h" -#include "src/redis_lists.h" -#include "src/redis_hashes.h" -#include "src/redis_sets.h" -#include "src/redis_zsets.h" -#include "src/scope_snapshot.h" -#include "src/strings_value_format.h" - -#include "include/pika_conf.h" - -const int64_t MAX_BATCH_NUM = 30000; - -extern PikaConf* g_pika_conf; - -MigratorThread::~MigratorThread() { -} - -void MigratorThread::MigrateStringsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; - if (MAX_BATCH_NUM < scan_batch_num) { - if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { - scan_batch_num = MAX_BATCH_NUM; - } else { - scan_batch_num = g_pika_conf->sync_batch_num() * 2; - } - } - - int64_t ttl = -1; - int64_t cursor = 0; - blackwidow::Status s; - std::string value; - std::vector keys; - std::map type_timestamp; - std::map type_status; - while (true) { - cursor = bw->Scan(blackwidow::DataType::kStrings, cursor, "*", scan_batch_num, &keys); - - for (const auto& key : keys) { - s = bw->Get(key, &value); - if (!s.ok()) { - LOG(WARNING) << "get " << key << " error: " << s.ToString(); - continue; - } - - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("SET"); - argv.push_back(key); - argv.push_back(value); - - ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kStrings] != -2) { - ttl = type_timestamp[blackwidow::kStrings]; - } - - if (ttl > 0) { - argv.push_back("EX"); - argv.push_back(std::to_string(ttl)); - } - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - - if (!cursor) { - break; - } - } -} - -void MigratorThread::MigrateListsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; - if (MAX_BATCH_NUM < scan_batch_num) { - if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { - scan_batch_num = MAX_BATCH_NUM; - } else { - scan_batch_num = g_pika_conf->sync_batch_num() * 2; - } - } - - int64_t ttl = -1; - int64_t cursor = 0; - blackwidow::Status s; - std::vector keys; - std::map type_timestamp; - std::map type_status; - - while (true) { - cursor = bw->Scan(blackwidow::DataType::kLists, cursor, "*", scan_batch_num, &keys); - - for (const auto& key : keys) { - int64_t pos = 0; - std::vector nodes; - blackwidow::Status s = bw->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); - if (!s.ok()) { - LOG(WARNING) << "db->LRange(key:" << key << ", pos:" << pos - << ", batch size: " << g_pika_conf->sync_batch_num() << ") = " << s.ToString(); - continue; - } - - while (s.ok() && !should_exit_ && !nodes.empty()) { - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("RPUSH"); - argv.push_back(key); - for (const auto& node : nodes) { - argv.push_back(node); - } - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - - pos += g_pika_conf->sync_batch_num(); - nodes.clear(); - s = bw->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); - if (!s.ok()) { - LOG(WARNING) << "db->LRange(key:" << key << ", pos:" << pos - << ", batch size:" << g_pika_conf->sync_batch_num() << ") = " << s.ToString(); - } - } - - ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kLists] != -2) { - ttl = type_timestamp[blackwidow::kLists]; - } - - if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("EXPIRE"); - argv.push_back(key); - argv.push_back(std::to_string(ttl)); - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - } - - if (!cursor) { - break; - } - } -} - -void MigratorThread::MigrateHashesDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; - if (MAX_BATCH_NUM < scan_batch_num) { - if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { - scan_batch_num = MAX_BATCH_NUM; - } else { - scan_batch_num = g_pika_conf->sync_batch_num() * 2; - } - } - - int64_t ttl = -1; - int64_t cursor = 0; - blackwidow::Status s; - std::vector keys; - std::map type_timestamp; - std::map type_status; - - while (true) { - cursor = bw->Scan(blackwidow::DataType::kHashes, cursor, "*", scan_batch_num, &keys); - - for (const auto& key : keys) { - std::vector fvs; - blackwidow::Status s = bw->HGetall(key, &fvs); - if (!s.ok()) { - LOG(WARNING) << "db->HGetall(key:" << key << ") = " << s.ToString(); - continue; - } - - auto it = fvs.begin(); - while (!should_exit_ && it != fvs.end()) { - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("HMSET"); - argv.push_back(key); - for (int idx = 0; - idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != fvs.end(); - idx++, it++) { - argv.push_back(it->field); - argv.push_back(it->value); - } - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - - ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kHashes] != -2) { - ttl = type_timestamp[blackwidow::kHashes]; - } - - if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("EXPIRE"); - argv.push_back(key); - argv.push_back(std::to_string(ttl)); - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - } - - if (!cursor) { - break; - } - } -} - -void MigratorThread::MigrateSetsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; - if (MAX_BATCH_NUM < scan_batch_num) { - if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { - scan_batch_num = MAX_BATCH_NUM; - } else { - scan_batch_num = g_pika_conf->sync_batch_num() * 2; - } - } - - int64_t ttl = -1; - int64_t cursor = 0; - blackwidow::Status s; - std::vector keys; - std::map type_timestamp; - std::map type_status; - - while (true) { - cursor = bw->Scan(blackwidow::DataType::kSets, cursor, "*", scan_batch_num, &keys); - - for (const auto& key : keys) { - std::vector members; - blackwidow::Status s = bw->SMembers(key, &members); - if (!s.ok()) { - LOG(WARNING) << "db->SMembers(key:" << key << ") = " << s.ToString(); - continue; - } - auto it = members.begin(); - while (!should_exit_ && it != members.end()) { - std::string cmd; - pink::RedisCmdArgsType argv; - - argv.push_back("SADD"); - argv.push_back(key); - for (int idx = 0; - idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != members.end(); - idx++, it++) { - argv.push_back(*it); - } - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - - ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kSets] != -2) { - ttl = type_timestamp[blackwidow::kSets]; - } - - if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("EXPIRE"); - argv.push_back(key); - argv.push_back(std::to_string(ttl)); - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - } - - if (!cursor) { - break; - } - } -} - -void MigratorThread::MigrateZsetsDB() { - blackwidow::BlackWidow *bw = (blackwidow::BlackWidow*)(db_); - - int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; - if (MAX_BATCH_NUM < scan_batch_num) { - if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { - scan_batch_num = MAX_BATCH_NUM; - } else { - scan_batch_num = g_pika_conf->sync_batch_num() * 2; - } - } - - int64_t ttl = -1; - int64_t cursor = 0; - blackwidow::Status s; - std::vector keys; - std::map type_timestamp; - std::map type_status; - - while (true) { - cursor = bw->Scan(blackwidow::DataType::kZSets, cursor, "*", scan_batch_num, &keys); - - for (const auto& key : keys) { - std::vector score_members; - blackwidow::Status s = bw->ZRange(key, 0, -1, &score_members); - if (!s.ok()) { - LOG(WARNING) << "db->ZRange(key:" << key << ") = " << s.ToString(); - continue; - } - auto it = score_members.begin(); - while (!should_exit_ && it != score_members.end()) { - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("ZADD"); - argv.push_back(key); - for (int idx = 0; - idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != score_members.end(); - idx++, it++) { - argv.push_back(std::to_string(it->score)); - argv.push_back(it->member); - } - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - - ttl = -1; - type_status.clear(); - type_timestamp = bw->TTL(key, &type_status); - if (type_timestamp[blackwidow::kZSets] != -2) { - ttl = type_timestamp[blackwidow::kZSets]; - } - - if (s.ok() && ttl > 0) { - pink::RedisCmdArgsType argv; - std::string cmd; - - argv.push_back("EXPIRE"); - argv.push_back(key); - argv.push_back(std::to_string(ttl)); - - pink::SerializeRedisCommand(argv, &cmd); - PlusNum(); - DispatchKey(cmd, key); - } - } - - if (!cursor) { - break; - } - } -} - -void MigratorThread::MigrateDB() { - switch (int(type_)) { - case int(blackwidow::kStrings) : { - MigrateStringsDB(); - break; - } - - case int(blackwidow::kLists) : { - MigrateListsDB(); - break; - } - - case int(blackwidow::kHashes) : { - MigrateHashesDB(); - break; - } - - case int(blackwidow::kSets) : { - MigrateSetsDB(); - break; - } - - case int(blackwidow::kZSets) : { - MigrateZsetsDB(); - break; - } - - default: { - LOG(WARNING) << "illegal db type " << type_; - break; - } - } -} - -void MigratorThread::DispatchKey(const std::string &command, const std::string& key) { - thread_index_ = (thread_index_ + 1) % thread_num_; - size_t idx = thread_index_; - if (key.size()) { // no empty - idx = std::hash()(key) % thread_num_; - } - (*senders_)[idx]->LoadKey(command); -} - -const char* GetDBTypeString(int type) { - switch (type) { - case int(blackwidow::kStrings) : { - return "blackwidow::kStrings"; - } - - case int(blackwidow::kLists) : { - return "blackwidow::kLists"; - } - - case int(blackwidow::kHashes) : { - return "blackwidow::kHashes"; - } - - case int(blackwidow::kSets) : { - return "blackwidow::kSets"; - } - - case int(blackwidow::kZSets) : { - return "blackwidow::kZSets"; - } - - default: { - return "blackwidow::Unknown"; - } - } -} - -void *MigratorThread::ThreadMain() { - MigrateDB(); - should_exit_ = true; - LOG(INFO) << GetDBTypeString(type_) << " keys have been dispatched completly"; - return NULL; -} - diff --git a/tools/pika_migrate/src/pika.cc b/tools/pika_migrate/src/pika.cc deleted file mode 100644 index 0408752dd9..0000000000 --- a/tools/pika_migrate/src/pika.cc +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include -#include -#include - -#include "slash/include/env.h" -#include "include/pika_rm.h" -#include "include/pika_server.h" -#include "include/pika_command.h" -#include "include/pika_conf.h" -#include "include/pika_define.h" -#include "include/pika_version.h" -#include "include/pika_cmd_table_manager.h" - -#ifdef TCMALLOC_EXTENSION -#include -#endif - -PikaConf* g_pika_conf; -PikaServer* g_pika_server; -PikaReplicaManager* g_pika_rm; - -PikaCmdTableManager* g_pika_cmd_table_manager; - -static void version() { - char version[32]; - snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, - PIKA_MINOR, PIKA_PATCH); - printf("-----------Pika server %s ----------\n", version); -} - -static void PikaConfInit(const std::string& path) { - printf("path : %s\n", path.c_str()); - g_pika_conf = new PikaConf(path); - if (g_pika_conf->Load() != 0) { - LOG(FATAL) << "pika load conf error"; - } - version(); - printf("-----------Pika config list----------\n"); - g_pika_conf->DumpConf(); - printf("-----------Pika config end----------\n"); -} - -static void PikaGlogInit() { - if (!slash::FileExists(g_pika_conf->log_path())) { - slash::CreatePath(g_pika_conf->log_path()); - } - - if (!g_pika_conf->daemonize()) { - FLAGS_alsologtostderr = true; - } - FLAGS_log_dir = g_pika_conf->log_path(); - FLAGS_minloglevel = 0; - FLAGS_max_log_size = 1800; - FLAGS_logbufsecs = 0; - ::google::InitGoogleLogging("pika"); -} - -static void daemonize() { - if (fork() != 0) exit(0); /* parent exits */ - setsid(); /* create a new session */ -} - -static void close_std() { - int fd; - if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { - dup2(fd, STDIN_FILENO); - dup2(fd, STDOUT_FILENO); - dup2(fd, STDERR_FILENO); - close(fd); - } -} - -static void create_pid_file(void) { - /* Try to write the pid file in a best-effort way. */ - std::string path(g_pika_conf->pidfile()); - - size_t pos = path.find_last_of('/'); - if (pos != std::string::npos) { - // mkpath(path.substr(0, pos).c_str(), 0755); - slash::CreateDir(path.substr(0, pos)); - } else { - path = kPikaPidFile; - } - - FILE *fp = fopen(path.c_str(), "w"); - if (fp) { - fprintf(fp,"%d\n",(int)getpid()); - fclose(fp); - } -} - -static void IntSigHandle(const int sig) { - LOG(INFO) << "Catch Signal " << sig << ", cleanup..."; - g_pika_server->Exit(); -} - -static void PikaSignalSetup() { - signal(SIGHUP, SIG_IGN); - signal(SIGPIPE, SIG_IGN); - signal(SIGINT, &IntSigHandle); - signal(SIGQUIT, &IntSigHandle); - signal(SIGTERM, &IntSigHandle); -} - -static void usage() -{ - char version[32]; - snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, - PIKA_MINOR, PIKA_PATCH); - fprintf(stderr, - "Pika module %s\n" - "usage: pika [-hv] [-c conf/file]\n" - "\t-h -- show this help\n" - "\t-c conf/file -- config file \n" - " example: ./output/bin/pika -c ./conf/pika.conf\n", - version - ); -} - -int main(int argc, char *argv[]) { - if (argc != 2 && argc != 3) { - usage(); - exit(-1); - } - - bool path_opt = false; - char c; - char path[1024]; - while (-1 != (c = getopt(argc, argv, "c:hv"))) { - switch (c) { - case 'c': - snprintf(path, 1024, "%s", optarg); - path_opt = true; - break; - case 'h': - usage(); - return 0; - case 'v': - version(); - return 0; - default: - usage(); - return 0; - } - } - - if (path_opt == false) { - fprintf (stderr, "Please specify the conf file path\n" ); - usage(); - exit(-1); - } -#ifdef TCMALLOC_EXTENSION - MallocExtension::instance()->Initialize(); -#endif - PikaConfInit(path); - - rlimit limit; - rlim_t maxfiles = g_pika_conf->maxclients() + PIKA_MIN_RESERVED_FDS; - if (getrlimit(RLIMIT_NOFILE, &limit) == -1) { - LOG(WARNING) << "getrlimit error: " << strerror(errno); - } else if (limit.rlim_cur < maxfiles) { - rlim_t old_limit = limit.rlim_cur; - limit.rlim_cur = maxfiles; - limit.rlim_max = maxfiles; - if (setrlimit(RLIMIT_NOFILE, &limit) != -1) { - LOG(WARNING) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; - } else { - LOG(FATAL) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) << "), do it by yourself"; - } - } - - // daemonize if needed - if (g_pika_conf->daemonize()) { - daemonize(); - create_pid_file(); - } - - - PikaGlogInit(); - PikaSignalSetup(); - - LOG(INFO) << "Server at: " << path; - g_pika_server = new PikaServer(); - g_pika_rm = new PikaReplicaManager(); - g_pika_cmd_table_manager = new PikaCmdTableManager(); - - if (g_pika_conf->daemonize()) { - close_std(); - } - - g_pika_rm->Start(); - g_pika_server->Start(); - - if (g_pika_conf->daemonize()) { - unlink(g_pika_conf->pidfile().c_str()); - } - - // stop PikaReplicaManager first,avoid internal threads - // may references to dead PikaServer - g_pika_rm->Stop(); - - delete g_pika_server; - delete g_pika_rm; - delete g_pika_cmd_table_manager; - ::google::ShutdownGoogleLogging(); - delete g_pika_conf; - - return 0; -} diff --git a/tools/pika_migrate/src/pika_admin.cc b/tools/pika_migrate/src/pika_admin.cc deleted file mode 100644 index 8424a6e8c0..0000000000 --- a/tools/pika_migrate/src/pika_admin.cc +++ /dev/null @@ -1,2106 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_admin.h" - -#include -#include -#include - -#include "slash/include/rsync.h" - -#include "include/pika_conf.h" -#include "include/pika_server.h" -#include "include/pika_rm.h" -#include "include/pika_version.h" -#include "include/build_version.h" - -#ifdef TCMALLOC_EXTENSION -#include -#endif - -extern PikaServer *g_pika_server; -extern PikaConf *g_pika_conf; -extern PikaReplicaManager *g_pika_rm; - -static std::string ConstructPinginPubSubResp(const PikaCmdArgsType &argv) { - if (argv.size() > 2) { - return "-ERR wrong number of arguments for " + kCmdNamePing + - " command\r\n"; - } - std::stringstream resp; - - resp << "*2\r\n" - << "$4\r\n" - << "pong\r\n"; - if (argv.size() == 2) { - resp << "$" << argv[1].size() << "\r\n" << argv[1] << "\r\n"; - } else { - resp << "$0\r\n\r\n"; - } - return resp.str(); -} - -/* - * slaveof no one - * slaveof ip port - * slaveof ip port force - */ -void SlaveofCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlaveof); - return; - } - - if (argv_.size() > 4) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlaveof); - return; - } - - if (argv_.size() == 3 - && !strcasecmp(argv_[1].data(), "no") - && !strcasecmp(argv_[2].data(), "one")) { - is_noone_ = true; - return; - } - - // self is master of A , want to slavof B - if (g_pika_server->role() & PIKA_ROLE_MASTER) { - res_.SetRes(CmdRes::kErrOther, "already master of others, invalid usage"); - return; - } - - master_ip_ = argv_[1]; - std::string str_master_port = argv_[2]; - if (!slash::string2l(str_master_port.data(), str_master_port.size(), &master_port_) || master_port_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - - if ((master_ip_ == "127.0.0.1" || master_ip_ == g_pika_server->host()) - && master_port_ == g_pika_server->port()) { - res_.SetRes(CmdRes::kErrOther, "you fucked up"); - return; - } - - if (argv_.size() == 4) { - if (!strcasecmp(argv_[3].data(), "force")) { - g_pika_server->SetForceFullSync(true); - } else { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlaveof); - } - } -} - -void SlaveofCmd::Do(std::shared_ptr partition) { - // Check if we are already connected to the specified master - if ((master_ip_ == "127.0.0.1" || g_pika_server->master_ip() == master_ip_) - && g_pika_server->master_port() == master_port_) { - res_.SetRes(CmdRes::kOk); - return; - } - - g_pika_server->RemoveMaster(); - - if (is_noone_) { - res_.SetRes(CmdRes::kOk); - g_pika_conf->SetSlaveof(std::string()); - return; - } - - bool sm_ret = g_pika_server->SetMaster(master_ip_, master_port_); - - if (sm_ret) { - res_.SetRes(CmdRes::kOk); - g_pika_conf->SetSlaveof(master_ip_ + ":" + std::to_string(master_port_)); - } else { - res_.SetRes(CmdRes::kErrOther, "Server is not in correct state for slaveof"); - } -} - -/* - * dbslaveof db[0 ~ 7] - * dbslaveof db[0 ~ 7] force - * dbslaveof db[0 ~ 7] no one - * dbslaveof db[0 ~ 7] filenum offset - */ -void DbSlaveofCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDbSlaveof); - return; - } - if (!g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "DbSlaveof only support on classic mode"); - return; - } - if (g_pika_server->role() ^ PIKA_ROLE_SLAVE - || !g_pika_server->MetaSyncDone()) { - res_.SetRes(CmdRes::kErrOther, "Not currently a slave"); - return; - } - - if (argv_.size() > 4) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDbSlaveof); - return; - } - - db_name_ = argv_[1]; - if (!g_pika_server->IsTableExist(db_name_)) { - res_.SetRes(CmdRes::kErrOther, "Invaild db name"); - return; - } - - if (argv_.size() == 3 - && !strcasecmp(argv_[2].data(), "force")) { - force_sync_ = true; - return; - } - - if (argv_.size() == 4) { - if (!strcasecmp(argv_[2].data(), "no") - && !strcasecmp(argv_[3].data(), "one")) { - is_noone_ = true; - return; - } - - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &filenum_) || filenum_ < 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &offset_) || offset_ < 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - have_offset_ = true; - } -} - -void DbSlaveofCmd::Do(std::shared_ptr partition) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName(PartitionInfo(db_name_,0)); - if (!slave_partition) { - res_.SetRes(CmdRes::kErrOther, "Db not found"); - return; - } - - Status s; - if (is_noone_) { - // In classic mode a table has only one partition - s = g_pika_rm->SendRemoveSlaveNodeRequest(db_name_, 0); - } else { - if (slave_partition->State() == ReplState::kNoConnect - || slave_partition->State() == ReplState::kError) { - if (have_offset_) { - std::shared_ptr db_partition = - g_pika_server->GetPartitionByDbName(db_name_); - db_partition->logger()->SetProducerStatus(filenum_, offset_); - } - ReplState state = force_sync_ - ? ReplState::kTryDBSync : ReplState::kTryConnect; - s = g_pika_rm->ActivateSyncSlavePartition( - RmNode(g_pika_server->master_ip(), g_pika_server->master_port(), - db_name_, 0), state); - } - } - - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void AuthCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameAuth); - return; - } - pwd_ = argv_[1]; -} - -void AuthCmd::Do(std::shared_ptr partition) { - std::string root_password(g_pika_conf->requirepass()); - std::string user_password(g_pika_conf->userpass()); - if (user_password.empty() && root_password.empty()) { - res_.SetRes(CmdRes::kErrOther, "Client sent AUTH, but no password is set"); - return; - } - - if (pwd_ == user_password) { - res_.SetRes(CmdRes::kOk, "USER"); - } - if (pwd_ == root_password) { - res_.SetRes(CmdRes::kOk, "ROOT"); - } - if (res_.none()) { - res_.SetRes(CmdRes::kInvalidPwd); - return; - } - - std::shared_ptr conn = GetConn(); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNamePing); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - cli_conn->auth_stat().ChecknUpdate(res().raw_message()); -} - -void BgsaveCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBgsave); - return; - } - if (argv_.size() == 2) { - std::vector tables; - slash::StringSplit(argv_[1], COMMA, tables); - for (const auto& table : tables) { - if (!g_pika_server->IsTableExist(table)) { - res_.SetRes(CmdRes::kInvalidTable, table); - return; - } else { - bgsave_tables_.insert(table); - } - } - } -} - -void BgsaveCmd::Do(std::shared_ptr partition) { - g_pika_server->DoSameThingSpecificTable(TaskType::kBgSave, bgsave_tables_); - LogCommand(); - res_.AppendContent("+Background saving started"); -} - -void CompactCmd::DoInitial() { - if (!CheckArg(argv_.size()) - || argv_.size() > 3) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameCompact); - return; - } - - if (g_pika_server->IsKeyScaning()) { - res_.SetRes(CmdRes::kErrOther, "The info keyspace operation is executing, Try again later"); - return; - } - - if (argv_.size() == 1) { - struct_type_ = "all"; - } else if (argv_.size() == 2) { - struct_type_ = argv_[1]; - } else if (argv_.size() == 3) { - std::vector tables; - slash::StringSplit(argv_[1], COMMA, tables); - for (const auto& table : tables) { - if (!g_pika_server->IsTableExist(table)) { - res_.SetRes(CmdRes::kInvalidTable, table); - return; - } else { - compact_tables_.insert(table); - } - } - struct_type_ = argv_[2]; - } -} - -void CompactCmd::Do(std::shared_ptr partition) { - if (!strcasecmp(struct_type_.data(), "all")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactAll, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "string")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactStrings, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "hash")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactHashes, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "set")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactSets, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "zset")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactZSets, compact_tables_); - } else if (!strcasecmp(struct_type_.data(), "list")) { - g_pika_server->DoSameThingSpecificTable(TaskType::kCompactList, compact_tables_); - } else { - res_.SetRes(CmdRes::kInvalidDbType, struct_type_); - return; - } - LogCommand(); - res_.SetRes(CmdRes::kOk); -} - -void PurgelogstoCmd::DoInitial() { - if (!CheckArg(argv_.size()) - || argv_.size() > 3) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePurgelogsto); - return; - } - std::string filename = argv_[1]; - if (filename.size() <= kBinlogPrefixLen || - kBinlogPrefix != filename.substr(0, kBinlogPrefixLen)) { - res_.SetRes(CmdRes::kInvalidParameter); - return; - } - std::string str_num = filename.substr(kBinlogPrefixLen); - int64_t num = 0; - if (!slash::string2l(str_num.data(), str_num.size(), &num) || num < 0) { - res_.SetRes(CmdRes::kInvalidParameter); - return; - } - num_ = num; - - table_ = (argv_.size() == 3) ? argv_[2] :g_pika_conf->default_table(); - if (!g_pika_server->IsTableExist(table_)) { - res_.SetRes(CmdRes::kInvalidTable, table_); - return; - } -} - -void PurgelogstoCmd::Do(std::shared_ptr partition) { - std::shared_ptr table_partition = g_pika_server->GetTablePartitionById(table_, 0); - if (!table_partition) { - res_.SetRes(CmdRes::kErrOther, "Partition not found"); - } else { - table_partition->PurgeLogs(num_, true); - res_.SetRes(CmdRes::kOk); - } -} - -void PingCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePing); - return; - } -} - -void PingCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = GetConn(); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNamePing); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - - if (cli_conn->IsPubSub()) { - return res_.SetRes(CmdRes::kNone, ConstructPinginPubSubResp(argv_)); - } - res_.SetRes(CmdRes::kPong); -} - -void SelectCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSelect); - return; - } - if (g_pika_conf->classic_mode()) { - int index = atoi(argv_[1].data()); - if (std::to_string(index) != argv_[1]) { - res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect); - return; - } else if (index < 0 || index >= g_pika_conf->databases()) { - res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect + " DB index is out of range"); - return; - } else { - table_name_ = "db" + argv_[1]; - } - } else { - // only pika codis use sharding mode currently, but pika - // codis only support single db, so in sharding mode we - // do no thing in select command - table_name_ = g_pika_conf->default_table(); - } - if (!g_pika_server->IsTableExist(table_name_)) { - res_.SetRes(CmdRes::kInvalidTable, kCmdNameSelect); - return; - } -} - -void SelectCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = - std::dynamic_pointer_cast(GetConn()); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNameSelect); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - conn->SetCurrentTable(table_name_); - res_.SetRes(CmdRes::kOk); -} - -void FlushallCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameFlushall); - return; - } -} -void FlushallCmd::Do(std::shared_ptr partition) { - if (!partition) { - LOG(INFO) << "Flushall, but partition not found"; - } else { - partition->FlushDB(); - } -} - -// flushall convert flushdb writes to every partition binlog -std::string FlushallCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, 1, "*"); - - // to flushdb cmd - std::string flushdb_cmd("flushdb"); - RedisAppendLen(content, flushdb_cmd.size(), "$"); - RedisAppendContent(content, flushdb_cmd); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -void FlushdbCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameFlushdb); - return; - } - if (argv_.size() == 1) { - db_name_ = "all"; - } else { - std::string struct_type = argv_[1]; - if (!strcasecmp(struct_type.data(), "string")) { - db_name_ = "strings"; - } else if (!strcasecmp(struct_type.data(), "hash")) { - db_name_ = "hashes"; - } else if (!strcasecmp(struct_type.data(), "set")) { - db_name_ = "sets"; - } else if (!strcasecmp(struct_type.data(), "zset")) { - db_name_ = "zsets"; - } else if (!strcasecmp(struct_type.data(), "list")) { - db_name_ = "lists"; - } else { - res_.SetRes(CmdRes::kInvalidDbType); - } - } -} - -void FlushdbCmd::Do(std::shared_ptr partition) { - if (!partition) { - LOG(INFO) << "Flushdb, but partition not found"; - } else { - if (db_name_ == "all") { - partition->FlushDB(); - } else { - partition->FlushSubDB(db_name_); - } - } -} - -void ClientCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameClient); - return; - } - if (!strcasecmp(argv_[1].data(), "list") && argv_.size() == 2) { - // nothing - } else if (!strcasecmp(argv_[1].data(), "list") && argv_.size() == 5) { - if (!strcasecmp(argv_[2].data(), "order") && - !strcasecmp(argv_[3].data(), "by")) { - info_ = argv_[4]; - } else { - res_.SetRes(CmdRes::kErrOther, - "Syntax error, try CLIENT (LIST [order by [addr|idle])"); - return; - } - } else if (!strcasecmp(argv_[1].data(), "kill") && argv_.size() == 3) { - info_ = argv_[2]; - } else { - res_.SetRes(CmdRes::kErrOther, - "Syntax error, try CLIENT (LIST [order by [addr|idle]| KILL ip:port)"); - return; - } - operation_ = argv_[1]; - return; -} - -void ClientCmd::Do(std::shared_ptr partition) { - if (!strcasecmp(operation_.data(), "list")) { - struct timeval now; - gettimeofday(&now, NULL); - std::vector clients; - g_pika_server->ClientList(&clients); - std::vector::iterator iter = clients.begin(); - std::string reply = ""; - char buf[128]; - if (!strcasecmp(info_.data(), "addr")) { - std::sort(clients.begin(), clients.end(), AddrCompare); - } else if (!strcasecmp(info_.data(), "idle")) { - std::sort(clients.begin(), clients.end(), IdleCompare); - } - while (iter != clients.end()) { - snprintf(buf, sizeof(buf), "addr=%s fd=%d idle=%ld\n", - iter->ip_port.c_str(), iter->fd, - iter->last_interaction == 0 ? 0 : now.tv_sec - iter->last_interaction); - reply.append(buf); - iter++; - } - res_.AppendString(reply); - } else if (!strcasecmp(operation_.data(), "kill") && - !strcasecmp(info_.data(), "all")) { - g_pika_server->ClientKillAll(); - res_.SetRes(CmdRes::kOk); - } else if (g_pika_server->ClientKill(info_) == 1) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, "No such client"); - } - return; -} - -void ShutdownCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameShutdown); - return; - } - - // For now, only shutdown need check local - if (is_local()) { - std::shared_ptr conn = GetConn(); - if (conn) { - if (conn->ip_port().find("127.0.0.1") == std::string::npos - && conn->ip_port().find(g_pika_server->host()) == std::string::npos) { - LOG(WARNING) << "\'shutdown\' should be localhost" << " command from " << conn->ip_port(); - res_.SetRes(CmdRes::kErrOther, kCmdNameShutdown + " should be localhost"); - } - } else { - LOG(WARNING) << name_ << " weak ptr is empty"; - res_.SetRes(CmdRes::kErrOther, kCmdNameShutdown); - return; - } - } -} -// no return -void ShutdownCmd::Do(std::shared_ptr partition) { - DLOG(WARNING) << "handle \'shutdown\'"; - g_pika_server->Exit(); - res_.SetRes(CmdRes::kNone); -} - -const std::string InfoCmd::kInfoSection = "info"; -const std::string InfoCmd::kAllSection = "all"; -const std::string InfoCmd::kServerSection = "server"; -const std::string InfoCmd::kClientsSection = "clients"; -const std::string InfoCmd::kStatsSection = "stats"; -const std::string InfoCmd::kExecCountSection= "command_exec_count"; -const std::string InfoCmd::kCPUSection = "cpu"; -const std::string InfoCmd::kReplicationSection = "replication"; -const std::string InfoCmd::kKeyspaceSection = "keyspace"; -const std::string InfoCmd::kDataSection = "data"; -const std::string InfoCmd::kDebugSection = "debug"; - -void InfoCmd::DoInitial() { - size_t argc = argv_.size(); - if (argc > 4) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (argc == 1) { - info_section_ = kInfo; - return; - } //then the agc is 2 or 3 - - if (!strcasecmp(argv_[1].data(), kAllSection.data())) { - info_section_ = kInfoAll; - } else if (!strcasecmp(argv_[1].data(), kServerSection.data())) { - info_section_ = kInfoServer; - } else if (!strcasecmp(argv_[1].data(), kClientsSection.data())) { - info_section_ = kInfoClients; - } else if (!strcasecmp(argv_[1].data(), kStatsSection.data())) { - info_section_ = kInfoStats; - } else if (!strcasecmp(argv_[1].data(), kExecCountSection.data())) { - info_section_ = kInfoExecCount; - } else if (!strcasecmp(argv_[1].data(), kCPUSection.data())) { - info_section_ = kInfoCPU; - } else if (!strcasecmp(argv_[1].data(), kReplicationSection.data())) { - info_section_ = kInfoReplication; - } else if (!strcasecmp(argv_[1].data(), kKeyspaceSection.data())) { - info_section_ = kInfoKeyspace; - if (argc == 2) { - LogCommand(); - return; - } - // info keyspace [ 0 | 1 | off ] - // info keyspace 1 db0,db1 - // info keyspace 0 db0,db1 - // info keyspace off db0,db1 - if (argv_[2] == "1") { - if (g_pika_server->IsCompacting()) { - res_.SetRes(CmdRes::kErrOther, "The compact operation is executing, Try again later"); - } else { - rescan_ = true; - } - } else if (argv_[2] == "off") { - off_ = true; - } else if (argv_[2] != "0") { - res_.SetRes(CmdRes::kSyntaxErr); - } - - if (argc == 4) { - std::vector tables; - slash::StringSplit(argv_[3], COMMA, tables); - for (const auto& table : tables) { - if (!g_pika_server->IsTableExist(table)) { - res_.SetRes(CmdRes::kInvalidTable, table); - return; - } else { - keyspace_scan_tables_.insert(table); - } - } - } - LogCommand(); - return; - } else if (!strcasecmp(argv_[1].data(), kDataSection.data())) { - info_section_ = kInfoData; - } else if (!strcasecmp(argv_[1].data(), kDebugSection.data())) { - info_section_ = kInfoDebug; - } else { - info_section_ = kInfoErr; - } - if (argc != 2) { - res_.SetRes(CmdRes::kSyntaxErr); - } -} - -void InfoCmd::Do(std::shared_ptr partition) { - std::string info; - switch (info_section_) { - case kInfo: - InfoServer(info); - info.append("\r\n"); - InfoData(info); - info.append("\r\n"); - InfoClients(info); - info.append("\r\n"); - InfoStats(info); - info.append("\r\n"); - InfoCPU(info); - info.append("\r\n"); - InfoReplication(info); - info.append("\r\n"); - InfoKeyspace(info); - break; - case kInfoAll: - InfoServer(info); - info.append("\r\n"); - InfoData(info); - info.append("\r\n"); - InfoClients(info); - info.append("\r\n"); - InfoStats(info); - info.append("\r\n"); - InfoExecCount(info); - info.append("\r\n"); - InfoCPU(info); - info.append("\r\n"); - InfoReplication(info); - info.append("\r\n"); - InfoKeyspace(info); - break; - case kInfoServer: - InfoServer(info); - break; - case kInfoClients: - InfoClients(info); - break; - case kInfoStats: - InfoStats(info); - break; - case kInfoExecCount: - InfoExecCount(info); - break; - case kInfoCPU: - InfoCPU(info); - break; - case kInfoReplication: - InfoReplication(info); - break; - case kInfoKeyspace: - InfoKeyspace(info); - break; - case kInfoData: - InfoData(info); - break; - case kInfoDebug: - InfoDebug(info); - break; - default: - //kInfoErr is nothing - break; - } - - - res_.AppendStringLen(info.size()); - res_.AppendContent(info); - return; -} - -void InfoCmd::InfoServer(std::string& info) { - static struct utsname host_info; - static bool host_info_valid = false; - if (!host_info_valid) { - uname(&host_info); - host_info_valid = true; - } - - time_t current_time_s = time(NULL); - std::stringstream tmp_stream; - char version[32]; - snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, - PIKA_MINOR, PIKA_PATCH); - tmp_stream << "# Server\r\n"; - tmp_stream << "pika_version:" << version << "\r\n"; - tmp_stream << pika_build_git_sha << "\r\n"; - tmp_stream << "pika_build_compile_date: " << - pika_build_compile_date << "\r\n"; - tmp_stream << "os:" << host_info.sysname << " " << host_info.release << " " << host_info.machine << "\r\n"; - tmp_stream << "arch_bits:" << (reinterpret_cast(&host_info.machine) + strlen(host_info.machine) - 2) << "\r\n"; - tmp_stream << "process_id:" << getpid() << "\r\n"; - tmp_stream << "tcp_port:" << g_pika_conf->port() << "\r\n"; - tmp_stream << "thread_num:" << g_pika_conf->thread_num() << "\r\n"; - tmp_stream << "sync_thread_num:" << g_pika_conf->sync_thread_num() << "\r\n"; - tmp_stream << "uptime_in_seconds:" << (current_time_s - g_pika_server->start_time_s()) << "\r\n"; - tmp_stream << "uptime_in_days:" << (current_time_s / (24*3600) - g_pika_server->start_time_s() / (24*3600) + 1) << "\r\n"; - tmp_stream << "config_file:" << g_pika_conf->conf_path() << "\r\n"; - tmp_stream << "server_id:" << g_pika_conf->server_id() << "\r\n"; - - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoClients(std::string& info) { - std::stringstream tmp_stream; - tmp_stream << "# Clients\r\n"; - tmp_stream << "connected_clients:" << g_pika_server->ClientList() << "\r\n"; - - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoStats(std::string& info) { - std::stringstream tmp_stream; - tmp_stream << "# Stats\r\n"; - tmp_stream << "total_connections_received:" << g_pika_server->accumulative_connections() << "\r\n"; - tmp_stream << "instantaneous_ops_per_sec:" << g_pika_server->ServerCurrentQps() << "\r\n"; - tmp_stream << "total_commands_processed:" << g_pika_server->ServerQueryNum() << "\r\n"; - tmp_stream << "is_bgsaving:" << (g_pika_server->IsBgSaving() ? "Yes" : "No") << "\r\n"; - tmp_stream << "is_scaning_keyspace:" << (g_pika_server->IsKeyScaning() ? "Yes" : "No") << "\r\n"; - tmp_stream << "is_compact:" << (g_pika_server->IsCompacting() ? "Yes" : "No") << "\r\n"; - tmp_stream << "compact_cron:" << g_pika_conf->compact_cron() << "\r\n"; - tmp_stream << "compact_interval:" << g_pika_conf->compact_interval() << "\r\n"; - - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoExecCount(std::string& info) { - std::stringstream tmp_stream; - tmp_stream << "# Command_Exec_Count\r\n"; - - std::unordered_map command_exec_count_table = g_pika_server->ServerExecCountTable(); - for (const auto& item : command_exec_count_table) { - if (item.second == 0) { - continue; - } - tmp_stream << item.first << ":" << item.second << "\r\n"; - } - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoCPU(std::string& info) { - struct rusage self_ru, c_ru; - getrusage(RUSAGE_SELF, &self_ru); - getrusage(RUSAGE_CHILDREN, &c_ru); - std::stringstream tmp_stream; - tmp_stream << "# CPU\r\n"; - tmp_stream << "used_cpu_sys:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000 << - "\r\n"; - tmp_stream << "used_cpu_user:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000 << - "\r\n"; - tmp_stream << "used_cpu_sys_children:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000 << - "\r\n"; - tmp_stream << "used_cpu_user_children:" << - setiosflags(std::ios::fixed) << std::setprecision(2) << - (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000 << - "\r\n"; - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoShardingReplication(std::string& info) { - int role = 0; - std::string slave_list_string; - uint32_t slave_num = g_pika_server->GetShardingSlaveListString(slave_list_string); - if (slave_num) { - role |= PIKA_ROLE_MASTER; - } - std::string common_master; - std::string master_ip; - int master_port = 0; - g_pika_rm->FindCommonMaster(&common_master); - if (!common_master.empty()) { - role |= PIKA_ROLE_SLAVE; - if(!slash::ParseIpPortString(common_master, master_ip, master_port)) { - return; - } - } - - std::stringstream tmp_stream; - tmp_stream << "# Replication("; - switch (role) { - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : tmp_stream << "MASTER)\r\nrole:master\r\n"; break; - case PIKA_ROLE_SLAVE : tmp_stream << "SLAVE)\r\nrole:slave\r\n"; break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : tmp_stream << "Master && SLAVE)\r\nrole:master&&slave\r\n"; break; - default: info.append("ERR: server role is error\r\n"); return; - } - switch (role) { - case PIKA_ROLE_SLAVE : - tmp_stream << "master_host:" << master_ip << "\r\n"; - tmp_stream << "master_port:" << master_port << "\r\n"; - tmp_stream << "master_link_status:up"<< "\r\n"; - tmp_stream << "slave_priority:" << g_pika_conf->slave_priority() << "\r\n"; - break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : - tmp_stream << "master_host:" << master_ip << "\r\n"; - tmp_stream << "master_port:" << master_port << "\r\n"; - tmp_stream << "master_link_status:up"<< "\r\n"; - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : - tmp_stream << "connected_slaves:" << slave_num << "\r\n" << slave_list_string; - } - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoReplication(std::string& info) { - if (!g_pika_conf->classic_mode()) { - // In Sharding mode, show different replication info - InfoShardingReplication(info); - return; - } - - int host_role = g_pika_server->role(); - std::stringstream tmp_stream; - std::stringstream out_of_sync; - - bool all_partition_sync = true; - slash::RWLock table_rwl(&g_pika_server->tables_rw_, false); - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& partition_item : table_item.second->partitions_) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_item.second->GetTableName(), - partition_item.second->GetPartitionId())); - if (!slave_partition) { - out_of_sync << "(" << partition_item.second->GetPartitionName() << ": InternalError)"; - continue; - } - if (slave_partition->State() != ReplState::kConnected) { - all_partition_sync = false; - out_of_sync << "(" << partition_item.second->GetPartitionName() << ":"; - if (slave_partition->State() == ReplState::kNoConnect) { - out_of_sync << "NoConnect)"; - } else if (slave_partition->State() == ReplState::kWaitDBSync) { - out_of_sync << "WaitDBSync)"; - } else if (slave_partition->State() == ReplState::kError) { - out_of_sync << "Error)"; - } else { - out_of_sync << "Other)"; - } - } - } - } - - tmp_stream << "# Replication("; - switch (host_role) { - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : tmp_stream << "MASTER)\r\nrole:master\r\n"; break; - case PIKA_ROLE_SLAVE : tmp_stream << "SLAVE)\r\nrole:slave\r\n"; break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : tmp_stream << "Master && SLAVE)\r\nrole:master&&slave\r\n"; break; - default: info.append("ERR: server role is error\r\n"); return; - } - - std::string slaves_list_str; - switch (host_role) { - case PIKA_ROLE_SLAVE : - tmp_stream << "master_host:" << g_pika_server->master_ip() << "\r\n"; - tmp_stream << "master_port:" << g_pika_server->master_port() << "\r\n"; - tmp_stream << "master_link_status:" << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) - && all_partition_sync) ? "up" : "down") << "\r\n"; - tmp_stream << "slave_priority:" << g_pika_conf->slave_priority() << "\r\n"; - tmp_stream << "slave_read_only:" << g_pika_conf->slave_read_only() << "\r\n"; - if (!all_partition_sync) { - tmp_stream <<"db_repl_error_state:" << out_of_sync.str() << "\r\n"; - } - break; - case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE : - tmp_stream << "master_host:" << g_pika_server->master_ip() << "\r\n"; - tmp_stream << "master_port:" << g_pika_server->master_port() << "\r\n"; - tmp_stream << "master_link_status:" << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) - && all_partition_sync) ? "up" : "down") << "\r\n"; - tmp_stream << "slave_read_only:" << g_pika_conf->slave_read_only() << "\r\n"; - if (!all_partition_sync) { - tmp_stream <<"db_repl_error_state:" << out_of_sync.str() << "\r\n"; - } - case PIKA_ROLE_SINGLE : - case PIKA_ROLE_MASTER : - tmp_stream << "connected_slaves:" << g_pika_server->GetSlaveListString(slaves_list_str) << "\r\n" << slaves_list_str; - } - - - Status s; - uint32_t filenum = 0; - uint64_t offset = 0; - std::string safety_purge; - for (const auto& t_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&t_item.second->partitions_rw_, false); - for (const auto& p_item : t_item.second->partitions_) { - p_item.second->logger()->GetProducerStatus(&filenum, &offset); - tmp_stream << p_item.second->GetPartitionName() << " binlog_offset=" << filenum << " " << offset; - s = g_pika_rm->GetSafetyPurgeBinlogFromSMP(p_item.second->GetTableName(), p_item.second->GetPartitionId(), &safety_purge); - tmp_stream << ",safety_purge=" << (s.ok() ? safety_purge : "error") << "\r\n"; - } - } - - info.append(tmp_stream.str()); -} - -void InfoCmd::InfoKeyspace(std::string& info) { - if (off_) { - g_pika_server->DoSameThingSpecificTable(TaskType::kStopKeyScan, keyspace_scan_tables_); - info.append("OK\r\n"); - return; - } - - std::string table_name; - KeyScanInfo key_scan_info; - int32_t duration; - std::vector key_infos; - std::stringstream tmp_stream; - tmp_stream << "# Keyspace\r\n"; - slash::RWLock rwl(&g_pika_server->tables_rw_, false); - for (const auto& table_item : g_pika_server->tables_) { - if (keyspace_scan_tables_.empty() - || keyspace_scan_tables_.find(table_item.first) != keyspace_scan_tables_.end()) { - table_name = table_item.second->GetTableName(); - key_scan_info = table_item.second->GetKeyScanInfo(); - key_infos = key_scan_info.key_infos; - duration = key_scan_info.duration; - if (key_infos.size() != 5) { - info.append("info keyspace error\r\n"); - return; - } - tmp_stream << "# Time:" << key_scan_info.s_start_time << "\r\n"; - if (duration == -2) { - tmp_stream << "# Duration: " << "In Waiting\r\n"; - } else if (duration == -1) { - tmp_stream << "# Duration: " << "In Processing\r\n"; - } else if (duration >= 0) { - tmp_stream << "# Duration: " << std::to_string(duration) + "s" << "\r\n"; - } - - tmp_stream << table_name << " Strings_keys=" << key_infos[0].keys << ", expires=" << key_infos[0].expires << ", invaild_keys=" << key_infos[0].invaild_keys << "\r\n"; - tmp_stream << table_name << " Hashes_keys=" << key_infos[1].keys << ", expires=" << key_infos[1].expires << ", invaild_keys=" << key_infos[1].invaild_keys << "\r\n"; - tmp_stream << table_name << " Lists_keys=" << key_infos[2].keys << ", expires=" << key_infos[2].expires << ", invaild_keys=" << key_infos[2].invaild_keys << "\r\n"; - tmp_stream << table_name << " Zsets_keys=" << key_infos[3].keys << ", expires=" << key_infos[3].expires << ", invaild_keys=" << key_infos[3].invaild_keys << "\r\n"; - tmp_stream << table_name << " Sets_keys=" << key_infos[4].keys << ", expires=" << key_infos[4].expires << ", invaild_keys=" << key_infos[4].invaild_keys << "\r\n\r\n"; - } - } - info.append(tmp_stream.str()); - - if (rescan_) { - g_pika_server->DoSameThingSpecificTable(TaskType::kStartKeyScan, keyspace_scan_tables_); - } - return; -} - -void InfoCmd::InfoData(std::string& info) { - std::stringstream tmp_stream; - std::stringstream db_fatal_msg_stream; - - int64_t db_size = slash::Du(g_pika_conf->db_path()); - tmp_stream << "# Data" << "\r\n"; - tmp_stream << "db_size:" << db_size << "\r\n"; - tmp_stream << "db_size_human:" << (db_size >> 20) << "M\r\n"; - int64_t log_size = slash::Du(g_pika_conf->log_path()); - tmp_stream << "log_size:" << log_size << "\r\n"; - tmp_stream << "log_size_human:" << (log_size >> 20) << "M\r\n"; - tmp_stream << "compression:" << g_pika_conf->compression() << "\r\n"; - - // rocksdb related memory usage - std::map type_result; - uint64_t total_background_errors = 0; - uint64_t total_memtable_usage = 0, memtable_usage = 0; - uint64_t total_table_reader_usage = 0, table_reader_usage = 0; - slash::RWLock table_rwl(&g_pika_server->tables_rw_, false); - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& patition_item : table_item.second->partitions_) { - type_result.clear(); - memtable_usage = table_reader_usage = 0; - patition_item.second->DbRWLockReader(); - patition_item.second->db()->GetUsage(blackwidow::PROPERTY_TYPE_ROCKSDB_MEMTABLE, &memtable_usage); - patition_item.second->db()->GetUsage(blackwidow::PROPERTY_TYPE_ROCKSDB_TABLE_READER, &table_reader_usage); - patition_item.second->db()->GetUsage(blackwidow::PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS, &type_result); - patition_item.second->DbRWUnLock(); - total_memtable_usage += memtable_usage; - total_table_reader_usage += table_reader_usage; - for (const auto& item : type_result) { - if (item.second != 0) { - db_fatal_msg_stream << (total_background_errors != 0 ? "," : ""); - db_fatal_msg_stream << patition_item.second->GetPartitionName() << "/" << item.first; - total_background_errors += item.second; - } - } - } - } - - tmp_stream << "used_memory:" << (total_memtable_usage + total_table_reader_usage) << "\r\n"; - tmp_stream << "used_memory_human:" << ((total_memtable_usage + total_table_reader_usage) >> 20) << "M\r\n"; - tmp_stream << "db_memtable_usage:" << total_memtable_usage << "\r\n"; - tmp_stream << "db_tablereader_usage:" << total_table_reader_usage << "\r\n"; - tmp_stream << "db_fatal:" << (total_background_errors != 0 ? "1" : "0") << "\r\n"; - tmp_stream << "db_fatal_msg:" << (total_background_errors != 0 ? db_fatal_msg_stream.str() : "NULL") << "\r\n"; - - info.append(tmp_stream.str()); - return; -} - -void InfoCmd::InfoDebug(std::string& info) { - std::stringstream tmp_stream; - tmp_stream << "# Synchronization Status" << "\r\n"; - info.append(tmp_stream.str()); - g_pika_rm->RmStatus(&info); - return; -} - -void ConfigCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameConfig); - return; - } - size_t argc = argv_.size(); - if (!strcasecmp(argv_[1].data(), "get")) { - if (argc != 3) { - res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG get"); - return; - } - } else if (!strcasecmp(argv_[1].data(), "set")) { - if (argc == 3 && argv_[2] != "*") { - res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG set"); - return; - } else if (argc != 4 && argc != 3) { - res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG set"); - return; - } - } else if (!strcasecmp(argv_[1].data(), "rewrite")) { - if (argc != 2) { - res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG rewrite"); - return; - } - } else if (!strcasecmp(argv_[1].data(), "resetstat")) { - if (argc != 2) { - res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG resetstat"); - return; - } - } else { - res_.SetRes(CmdRes::kErrOther, "CONFIG subcommand must be one of GET, SET, RESETSTAT, REWRITE"); - return; - } - config_args_v_.assign(argv_.begin() + 1, argv_.end()); - return; -} - -void ConfigCmd::Do(std::shared_ptr partition) { - std::string config_ret; - if (!strcasecmp(config_args_v_[0].data(), "get")) { - ConfigGet(config_ret); - } else if (!strcasecmp(config_args_v_[0].data(), "set")) { - ConfigSet(config_ret); - } else if (!strcasecmp(config_args_v_[0].data(), "rewrite")) { - ConfigRewrite(config_ret); - } else if (!strcasecmp(config_args_v_[0].data(), "resetstat")) { - ConfigResetstat(config_ret); - } - res_.AppendStringRaw(config_ret); - return; -} - -static void EncodeString(std::string *dst, const std::string &value) { - dst->append("$"); - dst->append(std::to_string(value.size())); - dst->append("\r\n"); - dst->append(value.data(), value.size()); - dst->append("\r\n"); -} - -static void EncodeInt32(std::string *dst, const int32_t v) { - std::string vstr = std::to_string(v); - dst->append("$"); - dst->append(std::to_string(vstr.length())); - dst->append("\r\n"); - dst->append(vstr); - dst->append("\r\n"); -} - -static void EncodeInt64(std::string *dst, const int64_t v) { - std::string vstr = std::to_string(v); - dst->append("$"); - dst->append(std::to_string(vstr.length())); - dst->append("\r\n"); - dst->append(vstr); - dst->append("\r\n"); -} - -void ConfigCmd::ConfigGet(std::string &ret) { - size_t elements = 0; - std::string config_body; - std::string pattern = config_args_v_[1]; - - if (slash::stringmatch(pattern.data(), "port", 1)) { - elements += 2; - EncodeString(&config_body, "port"); - EncodeInt32(&config_body, g_pika_conf->port()); - } - - if (slash::stringmatch(pattern.data(), "thread-num", 1)) { - elements += 2; - EncodeString(&config_body, "thread-num"); - EncodeInt32(&config_body, g_pika_conf->thread_num()); - } - - if (slash::stringmatch(pattern.data(), "thread-pool-size", 1)) { - elements += 2; - EncodeString(&config_body, "thread-pool-size"); - EncodeInt32(&config_body, g_pika_conf->thread_pool_size()); - } - - if (slash::stringmatch(pattern.data(), "sync-thread-num", 1)) { - elements += 2; - EncodeString(&config_body, "sync-thread-num"); - EncodeInt32(&config_body, g_pika_conf->sync_thread_num()); - } - - if (slash::stringmatch(pattern.data(), "log-path", 1)) { - elements += 2; - EncodeString(&config_body, "log-path"); - EncodeString(&config_body, g_pika_conf->log_path()); - } - - if (slash::stringmatch(pattern.data(), "db-path", 1)) { - elements += 2; - EncodeString(&config_body, "db-path"); - EncodeString(&config_body, g_pika_conf->db_path()); - } - - if (slash::stringmatch(pattern.data(), "maxmemory", 1)) { - elements += 2; - EncodeString(&config_body, "maxmemory"); - EncodeInt64(&config_body, g_pika_conf->write_buffer_size()); - } - - if (slash::stringmatch(pattern.data(), "write-buffer-size", 1)) { - elements += 2; - EncodeString(&config_body, "write-buffer-size"); - EncodeInt64(&config_body, g_pika_conf->write_buffer_size()); - } - - if (slash::stringmatch(pattern.data(), "timeout", 1)) { - elements += 2; - EncodeString(&config_body, "timeout"); - EncodeInt32(&config_body, g_pika_conf->timeout()); - } - - if (slash::stringmatch(pattern.data(), "requirepass", 1)) { - elements += 2; - EncodeString(&config_body, "requirepass"); - EncodeString(&config_body, g_pika_conf->requirepass()); - } - - if (slash::stringmatch(pattern.data(), "masterauth", 1)) { - elements += 2; - EncodeString(&config_body, "masterauth"); - EncodeString(&config_body, g_pika_conf->masterauth()); - } - - if (slash::stringmatch(pattern.data(), "userpass", 1)) { - elements += 2; - EncodeString(&config_body, "userpass"); - EncodeString(&config_body, g_pika_conf->userpass()); - } - - if (slash::stringmatch(pattern.data(), "userblacklist", 1)) { - elements += 2; - EncodeString(&config_body, "userblacklist"); - EncodeString(&config_body, (g_pika_conf->suser_blacklist()).c_str()); - } - - if (slash::stringmatch(pattern.data(), "instance-mode", 1)) { - elements += 2; - EncodeString(&config_body, "instance-mode"); - EncodeString(&config_body, (g_pika_conf->classic_mode() ? "classic" : "sharding")); - } - - if (g_pika_conf->classic_mode() - && slash::stringmatch(pattern.data(), "databases", 1)) { - elements += 2; - EncodeString(&config_body, "databases"); - EncodeInt32(&config_body, g_pika_conf->databases()); - } - - if (!g_pika_conf->classic_mode() - && slash::stringmatch(pattern.data(), "default-slot-num", 1)) { - elements += 2; - EncodeString(&config_body, "default-slot-num"); - EncodeInt32(&config_body, g_pika_conf->default_slot_num()); - } - - if (slash::stringmatch(pattern.data(), "daemonize", 1)) { - elements += 2; - EncodeString(&config_body, "daemonize"); - EncodeString(&config_body, g_pika_conf->daemonize() ? "yes" : "no"); - } - - if (slash::stringmatch(pattern.data(), "dump-path", 1)) { - elements += 2; - EncodeString(&config_body, "dump-path"); - EncodeString(&config_body, g_pika_conf->bgsave_path()); - } - - if (slash::stringmatch(pattern.data(), "dump-expire", 1)) { - elements += 2; - EncodeString(&config_body, "dump-expire"); - EncodeInt32(&config_body, g_pika_conf->expire_dump_days()); - } - - if (slash::stringmatch(pattern.data(), "dump-prefix", 1)) { - elements += 2; - EncodeString(&config_body, "dump-prefix"); - EncodeString(&config_body, g_pika_conf->bgsave_prefix()); - } - - if (slash::stringmatch(pattern.data(), "pidfile", 1)) { - elements += 2; - EncodeString(&config_body, "pidfile"); - EncodeString(&config_body, g_pika_conf->pidfile()); - } - - if (slash::stringmatch(pattern.data(), "maxclients", 1)) { - elements += 2; - EncodeString(&config_body, "maxclients"); - EncodeInt32(&config_body, g_pika_conf->maxclients()); - } - - if (slash::stringmatch(pattern.data(), "target-file-size-base", 1)) { - elements += 2; - EncodeString(&config_body, "target-file-size-base"); - EncodeInt32(&config_body, g_pika_conf->target_file_size_base()); - } - - if (slash::stringmatch(pattern.data(), "max-cache-statistic-keys", 1)) { - elements += 2; - EncodeString(&config_body, "max-cache-statistic-keys"); - EncodeInt32(&config_body, g_pika_conf->max_cache_statistic_keys()); - } - - if (slash::stringmatch(pattern.data(), "small-compaction-threshold", 1)) { - elements += 2; - EncodeString(&config_body, "small-compaction-threshold"); - EncodeInt32(&config_body, g_pika_conf->small_compaction_threshold()); - } - - if (slash::stringmatch(pattern.data(), "max-background-flushes", 1)) { - elements += 2; - EncodeString(&config_body, "max-background-flushes"); - EncodeInt32(&config_body, g_pika_conf->max_background_flushes()); - } - - if (slash::stringmatch(pattern.data(), "max-background-compactions", 1)) { - elements += 2; - EncodeString(&config_body, "max-background-compactions"); - EncodeInt32(&config_body, g_pika_conf->max_background_compactions()); - } - - if (slash::stringmatch(pattern.data(), "max-cache-files", 1)) { - elements += 2; - EncodeString(&config_body, "max-cache-files"); - EncodeInt32(&config_body, g_pika_conf->max_cache_files()); - } - - if (slash::stringmatch(pattern.data(), "max-bytes-for-level-multiplier", 1)) { - elements += 2; - EncodeString(&config_body, "max-bytes-for-level-multiplier"); - EncodeInt32(&config_body, g_pika_conf->max_bytes_for_level_multiplier()); - } - - if (slash::stringmatch(pattern.data(), "block-size", 1)) { - elements += 2; - EncodeString(&config_body, "block-size"); - EncodeInt64(&config_body, g_pika_conf->block_size()); - } - - if (slash::stringmatch(pattern.data(), "block-cache", 1)) { - elements += 2; - EncodeString(&config_body, "block-cache"); - EncodeInt64(&config_body, g_pika_conf->block_cache()); - } - - if (slash::stringmatch(pattern.data(), "share-block-cache", 1)) { - elements += 2; - EncodeString(&config_body, "share-block-cache"); - EncodeString(&config_body, g_pika_conf->share_block_cache() ? "yes" : "no"); - } - - if (slash::stringmatch(pattern.data(), "cache-index-and-filter-blocks", 1)) { - elements += 2; - EncodeString(&config_body, "cache-index-and-filter-blocks"); - EncodeString(&config_body, g_pika_conf->cache_index_and_filter_blocks() ? "yes" : "no"); - } - - if (slash::stringmatch(pattern.data(), "optimize-filters-for-hits", 1)) { - elements += 2; - EncodeString(&config_body, "optimize-filters-for-hits"); - EncodeString(&config_body, g_pika_conf->optimize_filters_for_hits() ? "yes" : "no"); - } - - if (slash::stringmatch(pattern.data(), "level-compaction-dynamic-level-bytes", 1)) { - elements += 2; - EncodeString(&config_body, "level-compaction-dynamic-level-bytes"); - EncodeString(&config_body, g_pika_conf->level_compaction_dynamic_level_bytes() ? "yes" : "no"); - } - - if (slash::stringmatch(pattern.data(), "expire-logs-days", 1)) { - elements += 2; - EncodeString(&config_body, "expire-logs-days"); - EncodeInt32(&config_body, g_pika_conf->expire_logs_days()); - } - - if (slash::stringmatch(pattern.data(), "expire-logs-nums", 1)) { - elements += 2; - EncodeString(&config_body, "expire-logs-nums"); - EncodeInt32(&config_body, g_pika_conf->expire_logs_nums()); - } - - if (slash::stringmatch(pattern.data(), "root-connection-num", 1)) { - elements += 2; - EncodeString(&config_body, "root-connection-num"); - EncodeInt32(&config_body, g_pika_conf->root_connection_num()); - } - - if (slash::stringmatch(pattern.data(), "slowlog-write-errorlog", 1)) { - elements += 2; - EncodeString(&config_body, "slowlog-write-errorlog"); - EncodeString(&config_body, g_pika_conf->slowlog_write_errorlog() ? "yes" : "no"); - } - - if (slash::stringmatch(pattern.data(), "slowlog-log-slower-than", 1)) { - elements += 2; - EncodeString(&config_body, "slowlog-log-slower-than"); - EncodeInt32(&config_body, g_pika_conf->slowlog_slower_than()); - } - - if (slash::stringmatch(pattern.data(), "slowlog-max-len", 1)) { - elements += 2; - EncodeString(&config_body, "slowlog-max-len"); - EncodeInt32(&config_body, g_pika_conf->slowlog_max_len()); - } - - if (slash::stringmatch(pattern.data(), "write-binlog", 1)) { - elements += 2; - EncodeString(&config_body, "write-binlog"); - EncodeString(&config_body, g_pika_conf->write_binlog() ? "yes" : "no"); - } - - if (slash::stringmatch(pattern.data(), "binlog-file-size", 1)) { - elements += 2; - EncodeString(&config_body, "binlog-file-size"); - EncodeInt32(&config_body, g_pika_conf->binlog_file_size()); - } - - if (slash::stringmatch(pattern.data(), "max-cache-statistic-keys", 1)) { - elements += 2; - EncodeString(&config_body, "max-cache-statistic-keys"); - EncodeInt32(&config_body, g_pika_conf->max_cache_statistic_keys()); - } - - if (slash::stringmatch(pattern.data(), "small-compaction-threshold", 1)) { - elements += 2; - EncodeString(&config_body, "small-compaction-threshold"); - EncodeInt32(&config_body, g_pika_conf->small_compaction_threshold()); - } - - if (slash::stringmatch(pattern.data(), "max-write-buffer-size", 1)) { - elements += 2; - EncodeString(&config_body, "max-write-buffer-size"); - EncodeInt64(&config_body, g_pika_conf->max_write_buffer_size()); - } - - if (slash::stringmatch(pattern.data(), "max-client-response-size", 1)) { - elements += 2; - EncodeString(&config_body, "max-client-response-size"); - EncodeInt64(&config_body, g_pika_conf->max_client_response_size()); - } - - if (slash::stringmatch(pattern.data(), "compression", 1)) { - elements += 2; - EncodeString(&config_body, "compression"); - EncodeString(&config_body, g_pika_conf->compression()); - } - - if (slash::stringmatch(pattern.data(), "db-sync-path", 1)) { - elements += 2; - EncodeString(&config_body, "db-sync-path"); - EncodeString(&config_body, g_pika_conf->db_sync_path()); - } - - if (slash::stringmatch(pattern.data(), "db-sync-speed", 1)) { - elements += 2; - EncodeString(&config_body, "db-sync-speed"); - EncodeInt32(&config_body, g_pika_conf->db_sync_speed()); - } - - if (slash::stringmatch(pattern.data(), "compact-cron", 1)) { - elements += 2; - EncodeString(&config_body, "compact-cron"); - EncodeString(&config_body, g_pika_conf->compact_cron()); - } - - if (slash::stringmatch(pattern.data(), "compact-interval", 1)) { - elements += 2; - EncodeString(&config_body, "compact-interval"); - EncodeString(&config_body, g_pika_conf->compact_interval()); - } - - if (slash::stringmatch(pattern.data(), "network-interface", 1)) { - elements += 2; - EncodeString(&config_body, "network-interface"); - EncodeString(&config_body, g_pika_conf->network_interface()); - } - - if (slash::stringmatch(pattern.data(), "slaveof", 1)) { - elements += 2; - EncodeString(&config_body, "slaveof"); - EncodeString(&config_body, g_pika_conf->slaveof()); - } - - if (slash::stringmatch(pattern.data(), "slave-priority", 1)) { - elements += 2; - EncodeString(&config_body, "slave-priority"); - EncodeInt32(&config_body, g_pika_conf->slave_priority()); - } - - if (slash::stringmatch(pattern.data(), "sync-window-size", 1)) { - elements += 2; - EncodeString(&config_body, "sync-window-size"); - EncodeInt32(&config_body, g_pika_conf->sync_window_size()); - } - - std::stringstream resp; - resp << "*" << std::to_string(elements) << "\r\n" << config_body; - ret = resp.str(); -} - -// Remember to sync change PikaConf::ConfigRewrite(); -void ConfigCmd::ConfigSet(std::string& ret) { - std::string set_item = config_args_v_[1]; - if (set_item == "*") { - ret = "*23\r\n"; - EncodeString(&ret, "timeout"); - EncodeString(&ret, "requirepass"); - EncodeString(&ret, "masterauth"); - EncodeString(&ret, "userpass"); - EncodeString(&ret, "userblacklist"); - EncodeString(&ret, "dump-prefix"); - EncodeString(&ret, "maxclients"); - EncodeString(&ret, "dump-expire"); - EncodeString(&ret, "expire-logs-days"); - EncodeString(&ret, "expire-logs-nums"); - EncodeString(&ret, "root-connection-num"); - EncodeString(&ret, "slowlog-write-errorlog"); - EncodeString(&ret, "slowlog-log-slower-than"); - EncodeString(&ret, "slowlog-max-len"); - EncodeString(&ret, "write-binlog"); - EncodeString(&ret, "max-cache-statistic-keys"); - EncodeString(&ret, "small-compaction-threshold"); - EncodeString(&ret, "max-client-response-size"); - EncodeString(&ret, "db-sync-speed"); - EncodeString(&ret, "compact-cron"); - EncodeString(&ret, "compact-interval"); - EncodeString(&ret, "slave-priority"); - EncodeString(&ret, "sync-window-size"); - return; - } - long int ival; - std::string value = config_args_v_[2]; - if (set_item == "timeout") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument " + value + " for CONFIG SET 'timeout'\r\n"; - return; - } - g_pika_conf->SetTimeout(ival); - ret = "+OK\r\n"; - } else if (set_item == "requirepass") { - g_pika_conf->SetRequirePass(value); - ret = "+OK\r\n"; - } else if (set_item == "masterauth") { - g_pika_conf->SetMasterAuth(value); - ret = "+OK\r\n"; - } else if (set_item == "userpass") { - g_pika_conf->SetUserPass(value); - ret = "+OK\r\n"; - } else if (set_item == "userblacklist") { - g_pika_conf->SetUserBlackList(value); - ret = "+OK\r\n"; - } else if (set_item == "dump-prefix") { - g_pika_conf->SetBgsavePrefix(value); - ret = "+OK\r\n"; - } else if (set_item == "maxclients") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'maxclients'\r\n"; - return; - } - g_pika_conf->SetMaxConnection(ival); - g_pika_server->SetDispatchQueueLimit(ival); - ret = "+OK\r\n"; - } else if (set_item == "dump-expire") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'dump-expire'\r\n"; - return; - } - g_pika_conf->SetExpireDumpDays(ival); - ret = "+OK\r\n"; - } else if (set_item == "slave-priority") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slave-priority'\r\n"; - return; - } - g_pika_conf->SetSlavePriority(ival); - ret = "+OK\r\n"; - } else if (set_item == "expire-logs-days") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-days'\r\n"; - return; - } - g_pika_conf->SetExpireLogsDays(ival); - ret = "+OK\r\n"; - } else if (set_item == "expire-logs-nums") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-nums'\r\n"; - return; - } - g_pika_conf->SetExpireLogsNums(ival); - ret = "+OK\r\n"; - } else if (set_item == "root-connection-num") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival <= 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'root-connection-num'\r\n"; - return; - } - g_pika_conf->SetRootConnectionNum(ival); - ret = "+OK\r\n"; - } else if (set_item == "slowlog-write-errorlog") { - bool is_write_errorlog; - if (value == "yes") { - is_write_errorlog = true; - } else if (value == "no") { - is_write_errorlog = false; - } else { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-write-errorlog'\r\n"; - return; - } - g_pika_conf->SetSlowlogWriteErrorlog(is_write_errorlog); - ret = "+OK\r\n"; - } else if (set_item == "slowlog-log-slower-than") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-log-slower-than'\r\n"; - return; - } - g_pika_conf->SetSlowlogSlowerThan(ival); - ret = "+OK\r\n"; - } else if (set_item == "slowlog-max-len") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-max-len'\r\n"; - return; - } - g_pika_conf->SetSlowlogMaxLen(ival); - g_pika_server->SlowlogTrim(); - ret = "+OK\r\n"; - } else if (set_item == "max-cache-statistic-keys") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-cache-statistic-keys'\r\n"; - return; - } - g_pika_conf->SetMaxCacheStatisticKeys(ival); - g_pika_server->PartitionSetMaxCacheStatisticKeys(ival); - ret = "+OK\r\n"; - } else if (set_item == "small-compaction-threshold") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'small-compaction-threshold'\r\n"; - return; - } - g_pika_conf->SetSmallCompactionThreshold(ival); - g_pika_server->PartitionSetSmallCompactionThreshold(ival); - ret = "+OK\r\n"; - } else if (set_item == "max-client-response-size") { - if (!slash::string2l(value.data(), value.size(), &ival) || ival < 0) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-client-response-size'\r\n"; - return; - } - g_pika_conf->SetMaxClientResponseSize(ival); - ret = "+OK\r\n"; - } else if (set_item == "write-binlog") { - int role = g_pika_server->role(); - if (role == PIKA_ROLE_SLAVE) { - ret = "-ERR need to close master-slave mode first\r\n"; - return; - } else if (value != "yes" && value != "no") { - ret = "-ERR invalid write-binlog (yes or no)\r\n"; - return; - } else { - g_pika_conf->SetWriteBinlog(value); - ret = "+OK\r\n"; - } - } else if (set_item == "db-sync-speed") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'db-sync-speed(MB)'\r\n"; - return; - } - if (ival < 0 || ival > 1024) { - ival = 1024; - } - g_pika_conf->SetDbSyncSpeed(ival); - ret = "+OK\r\n"; - } else if (set_item == "compact-cron") { - bool invalid = false; - if (value != "") { - bool have_week = false; - std::string compact_cron, week_str; - int slash_num = count(value.begin(), value.end(), '/'); - if (slash_num == 2) { - have_week = true; - std::string::size_type first_slash = value.find("/"); - week_str = value.substr(0, first_slash); - compact_cron = value.substr(first_slash + 1); - } else { - compact_cron = value; - } - - std::string::size_type len = compact_cron.length(); - std::string::size_type colon = compact_cron.find("-"); - std::string::size_type underline = compact_cron.find("/"); - if (colon == std::string::npos || underline == std::string::npos || - colon >= underline || colon + 1 >= len || - colon + 1 == underline || underline + 1 >= len) { - invalid = true; - } else { - int week = std::atoi(week_str.c_str()); - int start = std::atoi(compact_cron.substr(0, colon).c_str()); - int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); - int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); - if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || usage > 100) { - invalid = true; - } - } - } - if (invalid) { - ret = "-ERR invalid compact-cron\r\n"; - return; - } else { - g_pika_conf->SetCompactCron(value); - ret = "+OK\r\n"; - } - } else if (set_item == "compact-interval") { - bool invalid = false; - if (value != "") { - std::string::size_type len = value.length(); - std::string::size_type slash = value.find("/"); - if (slash == std::string::npos || slash + 1 >= len) { - invalid = true; - } else { - int interval = std::atoi(value.substr(0, slash).c_str()); - int usage = std::atoi(value.substr(slash+1).c_str()); - if (interval <= 0 || usage < 0 || usage > 100) { - invalid = true; - } - } - } - if (invalid) { - ret = "-ERR invalid compact-interval\r\n"; - return; - } else { - g_pika_conf->SetCompactInterval(value); - ret = "+OK\r\n"; - } - } else if (set_item == "sync-window-size") { - if (!slash::string2l(value.data(), value.size(), &ival)) { - ret = "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"; - return; - } - if (ival <= 0 || ival > kBinlogReadWinMaxSize) { - ret = "-ERR Argument exceed range \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"; - return; - } - g_pika_conf->SetSyncWindowSize(ival); - ret = "+OK\r\n"; - } else { - ret = "-ERR Unsupported CONFIG parameter: " + set_item + "\r\n"; - } -} - -void ConfigCmd::ConfigRewrite(std::string &ret) { - g_pika_conf->ConfigRewrite(); - ret = "+OK\r\n"; -} - -void ConfigCmd::ConfigResetstat(std::string &ret) { - g_pika_server->ResetStat(); - ret = "+OK\r\n"; -} - -void MonitorCmd::DoInitial() { - if (argv_.size() != 1) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameMonitor); - return; - } -} - -void MonitorCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn_repl = GetConn(); - if (!conn_repl) { - res_.SetRes(CmdRes::kErrOther, kCmdNameMonitor); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - std::shared_ptr conn = - std::dynamic_pointer_cast(conn_repl)->server_thread()->MoveConnOut(conn_repl->fd()); - assert(conn.get() == conn_repl.get()); - g_pika_server->AddMonitorClient(std::dynamic_pointer_cast(conn)); - g_pika_server->AddMonitorMessage("OK"); - return; // Monitor thread will return "OK" -} - -void DbsizeCmd::DoInitial() { - if (argv_.size() != 1) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDbsize); - return; - } -} - -void DbsizeCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table = g_pika_server->GetTable(table_name_); - if (!table) { - res_.SetRes(CmdRes::kInvalidTable); - } else { - KeyScanInfo key_scan_info = table->GetKeyScanInfo(); - std::vector key_infos = key_scan_info.key_infos; - if (key_infos.size() != 5) { - res_.SetRes(CmdRes::kErrOther, "keyspace error"); - return; - } - int64_t dbsize = key_infos[0].keys - + key_infos[1].keys - + key_infos[2].keys - + key_infos[3].keys - + key_infos[4].keys; - res_.AppendInteger(dbsize); - } -} - -void TimeCmd::DoInitial() { - if (argv_.size() != 1) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameTime); - return; - } -} - -void TimeCmd::Do(std::shared_ptr partition) { - struct timeval tv; - if (gettimeofday(&tv, NULL) == 0) { - res_.AppendArrayLen(2); - char buf[32]; - int32_t len = slash::ll2string(buf, sizeof(buf), tv.tv_sec); - res_.AppendStringLen(len); - res_.AppendContent(buf); - - len = slash::ll2string(buf, sizeof(buf), tv.tv_usec); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } else { - res_.SetRes(CmdRes::kErrOther, strerror(errno)); - } -} - -void DelbackupCmd::DoInitial() { - if (argv_.size() != 1) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDelbackup); - return; - } -} - -void DelbackupCmd::Do(std::shared_ptr partition) { - std::string db_sync_prefix = g_pika_conf->bgsave_prefix(); - std::string db_sync_path = g_pika_conf->bgsave_path(); - std::vector dump_dir; - - // Dump file is not exist - if (!slash::FileExists(db_sync_path)) { - res_.SetRes(CmdRes::kOk); - return; - } - // Directory traversal - if (slash::GetChildren(db_sync_path, dump_dir) != 0) { - res_.SetRes(CmdRes::kOk); - return; - } - - int len = dump_dir.size(); - for (size_t i = 0; i < dump_dir.size(); i++) { - if (dump_dir[i].substr(0, db_sync_prefix.size()) != db_sync_prefix || dump_dir[i].size() != (db_sync_prefix.size() + 8)) { - continue; - } - - std::string str_date = dump_dir[i].substr(db_sync_prefix.size(), (dump_dir[i].size() - db_sync_prefix.size())); - char *end = NULL; - std::strtol(str_date.c_str(), &end, 10); - if (*end != 0) { - continue; - } - - std::string dump_dir_name = db_sync_path + dump_dir[i] + "/" + table_name_; - if (g_pika_server->CountSyncSlaves() == 0) { - LOG(INFO) << "Not syncing, delete dump file: " << dump_dir_name; - slash::DeleteDirIfExist(dump_dir_name); - len--; - } else { - LOG(INFO) << "Syncing, can not delete " << dump_dir_name << " dump file" << std::endl; - } - } - res_.SetRes(CmdRes::kOk); - return; -} - -void EchoCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameEcho); - return; - } - body_ = argv_[1]; - return; -} - -void EchoCmd::Do(std::shared_ptr partition) { - res_.AppendString(body_); - return; -} - -void ScandbCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameEcho); - return; - } - if (argv_.size() == 1) { - type_ = blackwidow::kAll; - } else { - if (!strcasecmp(argv_[1].data(),"string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; - } else { - res_.SetRes(CmdRes::kInvalidDbType); - } - } - return; -} - -void ScandbCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table = g_pika_server->GetTable(table_name_); - if (!table) { - res_.SetRes(CmdRes::kInvalidTable); - } else { - table->ScanDatabase(type_); - res_.SetRes(CmdRes::kOk); - } - return; -} - -void SlowlogCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlowlog); - return; - } - if (argv_.size() == 2 && !strcasecmp(argv_[1].data(), "reset")) { - condition_ = SlowlogCmd::kRESET; - } else if (argv_.size() == 2 && !strcasecmp(argv_[1].data(), "len")) { - condition_ = SlowlogCmd::kLEN; - } else if ((argv_.size() == 2 || argv_.size() == 3) && !strcasecmp(argv_[1].data(), "get")) { - condition_ = SlowlogCmd::kGET; - if (argv_.size() == 3 && !slash::string2l(argv_[2].data(), argv_[2].size(), &number_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kErrOther, "Unknown SLOWLOG subcommand or wrong # of args. Try GET, RESET, LEN."); - return; - } -} - -void SlowlogCmd::Do(std::shared_ptr partition) { - if (condition_ == SlowlogCmd::kRESET) { - g_pika_server->SlowlogReset(); - res_.SetRes(CmdRes::kOk); - } else if (condition_ == SlowlogCmd::kLEN) { - res_.AppendInteger(g_pika_server->SlowlogLen()); - } else { - std::vector slowlogs; - g_pika_server->SlowlogObtain(number_, &slowlogs); - res_.AppendArrayLen(slowlogs.size()); - for (const auto& slowlog : slowlogs) { - res_.AppendArrayLen(4); - res_.AppendInteger(slowlog.id); - res_.AppendInteger(slowlog.start_time); - res_.AppendInteger(slowlog.duration); - res_.AppendArrayLen(slowlog.argv.size()); - for (const auto& arg : slowlog.argv) { - res_.AppendString(arg); - } - } - } - return; -} - -void PaddingCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePadding); - return; - } -} - -void PaddingCmd::Do(std::shared_ptr partition) { - res_.SetRes(CmdRes::kOk); -} - -std::string PaddingCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - return PikaBinlogTransverter::ConstructPaddingBinlog( - BinlogType::TypeFirst, argv_[1].size() + BINLOG_ITEM_HEADER_SIZE - + PADDING_BINLOG_PROTOCOL_SIZE + SPACE_STROE_PARAMETER_LENGTH); -} - -#ifdef TCMALLOC_EXTENSION -void TcmallocCmd::DoInitial() { - if (argv_.size() != 2 && argv_.size() != 3) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameTcmalloc); - return; - } - rate_ = 0; - std::string type = argv_[1]; - if (!strcasecmp(type.data(), "stats")) { - type_ = 0; - } else if (!strcasecmp(type.data(), "rate")) { - type_ = 1; - if (argv_.size() == 3) { - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &rate_)) { - res_.SetRes(CmdRes::kSyntaxErr, kCmdNameTcmalloc); - } - } - } else if (!strcasecmp(type.data(), "list")) { - type_ = 2; - } else if (!strcasecmp(type.data(), "free")) { - type_ = 3; - } else { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNameTcmalloc); - return; - } -} - -void TcmallocCmd::Do(std::shared_ptr partition) { - std::vector fli; - std::vector elems; - switch(type_) { - case 0: - char stats[1024]; - MallocExtension::instance()->GetStats(stats, 1024); - slash::StringSplit(stats, '\n', elems); - res_.AppendArrayLen(elems.size()); - for (auto& i : elems) { - res_.AppendString(i); - } - break; - case 1: - if (rate_) { - MallocExtension::instance()->SetMemoryReleaseRate(rate_); - } - res_.AppendInteger(MallocExtension::instance()->GetMemoryReleaseRate()); - break; - case 2: - MallocExtension::instance()->GetFreeListSizes(&fli); - res_.AppendArrayLen(fli.size()); - for (auto& i : fli) { - res_.AppendString("type: " + std::string(i.type) + ", min: " + std::to_string(i.min_object_size) + - ", max: " + std::to_string(i.max_object_size) + ", total: " + std::to_string(i.total_bytes_free)); - } - break; - case 3: - MallocExtension::instance()->ReleaseFreeMemory(); - res_.SetRes(CmdRes::kOk); - } -} -#endif - -void PKPatternMatchDelCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePKPatternMatchDel); - return; - } - pattern_ = argv_[1]; - if (!strcasecmp(argv_[2].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[2].data(), "list")) { - type_ = blackwidow::kLists; - } else if (!strcasecmp(argv_[2].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[2].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[2].data(), "hash")) { - type_ = blackwidow::kHashes; - } else { - res_.SetRes(CmdRes::kInvalidDbType, kCmdNamePKPatternMatchDel); - return; - } -} - -void PKPatternMatchDelCmd::Do(std::shared_ptr partition) { - int ret = 0; - rocksdb::Status s = partition->db()->PKPatternMatchDel(type_, pattern_, &ret); - if (s.ok()) { - res_.AppendInteger(ret); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} diff --git a/tools/pika_migrate/src/pika_auxiliary_thread.cc b/tools/pika_migrate/src/pika_auxiliary_thread.cc deleted file mode 100644 index 62a2b22941..0000000000 --- a/tools/pika_migrate/src/pika_auxiliary_thread.cc +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_auxiliary_thread.h" - -#include "include/pika_server.h" -#include "include/pika_define.h" -#include "include/pika_rm.h" - -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -PikaAuxiliaryThread::~PikaAuxiliaryThread() { - StopThread(); - LOG(INFO) << "PikaAuxiliary thread " << thread_id() << " exit!!!"; -} - -void* PikaAuxiliaryThread::ThreadMain() { - while (!should_stop()) { - if (g_pika_conf->classic_mode()) { - if (g_pika_server->ShouldMetaSync()) { - g_pika_rm->SendMetaSyncRequest(); - } else if (g_pika_server->MetaSyncDone()) { - g_pika_rm->RunSyncSlavePartitionStateMachine(); - } - } else { - g_pika_rm->RunSyncSlavePartitionStateMachine(); - } - - Status s = g_pika_rm->CheckSyncTimeout(slash::NowMicros()); - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } - - // TODO(whoiami) timeout - s = g_pika_server->TriggerSendBinlogSync(); - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } - // send to peer - int res = g_pika_server->SendToPeer(); - if (!res) { - // sleep 100 ms - mu_.Lock(); - cv_.TimedWait(100); - mu_.Unlock(); - } else { - //LOG_EVERY_N(INFO, 1000) << "Consume binlog number " << res; - } - } - return NULL; -} - diff --git a/tools/pika_migrate/src/pika_binlog.cc b/tools/pika_migrate/src/pika_binlog.cc deleted file mode 100644 index 7c71b32bba..0000000000 --- a/tools/pika_migrate/src/pika_binlog.cc +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_binlog.h" - -#include -#include - -#include "include/pika_binlog_transverter.h" - -using slash::RWLock; - -std::string NewFileName(const std::string name, const uint32_t current) { - char buf[256]; - snprintf(buf, sizeof(buf), "%s%u", name.c_str(), current); - return std::string(buf); -} - -/* - * Version - */ -Version::Version(slash::RWFile *save) - : pro_num_(0), - pro_offset_(0), - logic_id_(0), - save_(save) { - assert(save_ != NULL); - - pthread_rwlock_init(&rwlock_, NULL); -} - -Version::~Version() { - StableSave(); - pthread_rwlock_destroy(&rwlock_); -} - -Status Version::StableSave() { - char *p = save_->GetData(); - memcpy(p, &pro_num_, sizeof(uint32_t)); - p += 4; - memcpy(p, &pro_offset_, sizeof(uint64_t)); - p += 8; - memcpy(p, &logic_id_, sizeof(uint64_t)); - p += 8; - return Status::OK(); -} - -Status Version::Init() { - Status s; - if (save_->GetData() != NULL) { - memcpy((char*)(&pro_num_), save_->GetData(), sizeof(uint32_t)); - memcpy((char*)(&pro_offset_), save_->GetData() + 4, sizeof(uint64_t)); - memcpy((char*)(&logic_id_), save_->GetData() + 12, sizeof(uint64_t)); - return Status::OK(); - } else { - return Status::Corruption("version init error"); - } -} - -/* - * Binlog - */ -Binlog::Binlog(const std::string& binlog_path, const int file_size) : - consumer_num_(0), - version_(NULL), - queue_(NULL), - versionfile_(NULL), - pro_num_(0), - pool_(NULL), - exit_all_consume_(false), - binlog_path_(binlog_path), - file_size_(file_size) { - - // To intergrate with old version, we don't set mmap file size to 100M; - //slash::SetMmapBoundSize(file_size); - //slash::kMmapBoundSize = 1024 * 1024 * 100; - - Status s; - - slash::CreateDir(binlog_path_); - - filename = binlog_path_ + kBinlogPrefix; - const std::string manifest = binlog_path_ + kManifest; - std::string profile; - - if (!slash::FileExists(manifest)) { - LOG(INFO) << "Binlog: Manifest file not exist, we create a new one."; - - profile = NewFileName(filename, pro_num_); - s = slash::NewWritableFile(profile, &queue_); - if (!s.ok()) { - LOG(FATAL) << "Binlog: new " << filename << " " << s.ToString(); - } - - - s = slash::NewRWFile(manifest, &versionfile_); - if (!s.ok()) { - LOG(FATAL) << "Binlog: new versionfile error " << s.ToString(); - } - - version_ = new Version(versionfile_); - version_->StableSave(); - } else { - LOG(INFO) << "Binlog: Find the exist file."; - - s = slash::NewRWFile(manifest, &versionfile_); - if (s.ok()) { - version_ = new Version(versionfile_); - version_->Init(); - pro_num_ = version_->pro_num_; - - // Debug - //version_->debug(); - } else { - LOG(FATAL) << "Binlog: open versionfile error"; - } - - profile = NewFileName(filename, pro_num_); - DLOG(INFO) << "Binlog: open profile " << profile; - s = slash::AppendWritableFile(profile, &queue_, version_->pro_offset_); - if (!s.ok()) { - LOG(FATAL) << "Binlog: Open file " << profile << " error " << s.ToString(); - } - - uint64_t filesize = queue_->Filesize(); - DLOG(INFO) << "Binlog: filesize is " << filesize; - } - - InitLogFile(); -} - -Binlog::~Binlog() { - delete version_; - delete versionfile_; - - delete queue_; -} - -void Binlog::InitLogFile() { - assert(queue_ != NULL); - - uint64_t filesize = queue_->Filesize(); - block_offset_ = filesize % kBlockSize; -} - -Status Binlog::GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint64_t* logic_id) { - slash::RWLock(&(version_->rwlock_), false); - - *filenum = version_->pro_num_; - *pro_offset = version_->pro_offset_; - if (logic_id != NULL) { - *logic_id = version_->logic_id_; - } - - return Status::OK(); -} - -// Note: mutex lock should be held -Status Binlog::Put(const std::string &item) { - return Put(item.c_str(), item.size()); -} - -// Note: mutex lock should be held -Status Binlog::Put(const char* item, int len) { - Status s; - - /* Check to roll log file */ - uint64_t filesize = queue_->Filesize(); - if (filesize > file_size_) { - delete queue_; - queue_ = NULL; - - pro_num_++; - std::string profile = NewFileName(filename, pro_num_); - slash::NewWritableFile(profile, &queue_); - - { - slash::RWLock(&(version_->rwlock_), true); - version_->pro_offset_ = 0; - version_->pro_num_ = pro_num_; - version_->StableSave(); - } - InitLogFile(); - } - - int pro_offset; - s = Produce(Slice(item, len), &pro_offset); - if (s.ok()) { - slash::RWLock(&(version_->rwlock_), true); - version_->pro_offset_ = pro_offset; - version_->logic_id_++; - version_->StableSave(); - } - - return s; -} - -Status Binlog::EmitPhysicalRecord(RecordType t, const char *ptr, size_t n, int *temp_pro_offset) { - Status s; - assert(n <= 0xffffff); - assert(block_offset_ + kHeaderSize + n <= kBlockSize); - - char buf[kHeaderSize]; - - uint64_t now; - struct timeval tv; - gettimeofday(&tv, NULL); - now = tv.tv_sec; - buf[0] = static_cast(n & 0xff); - buf[1] = static_cast((n & 0xff00) >> 8); - buf[2] = static_cast(n >> 16); - buf[3] = static_cast(now & 0xff); - buf[4] = static_cast((now & 0xff00) >> 8); - buf[5] = static_cast((now & 0xff0000) >> 16); - buf[6] = static_cast((now & 0xff000000) >> 24); - buf[7] = static_cast(t); - - s = queue_->Append(Slice(buf, kHeaderSize)); - if (s.ok()) { - s = queue_->Append(Slice(ptr, n)); - if (s.ok()) { - s = queue_->Flush(); - } - } - block_offset_ += static_cast(kHeaderSize + n); - - *temp_pro_offset += kHeaderSize + n; - return s; -} - -Status Binlog::Produce(const Slice &item, int *temp_pro_offset) { - Status s; - const char *ptr = item.data(); - size_t left = item.size(); - bool begin = true; - - *temp_pro_offset = version_->pro_offset_; - do { - const int leftover = static_cast(kBlockSize) - block_offset_; - assert(leftover >= 0); - if (static_cast(leftover) < kHeaderSize) { - if (leftover > 0) { - s = queue_->Append(Slice("\x00\x00\x00\x00\x00\x00\x00", leftover)); - if (!s.ok()) { - return s; - } - *temp_pro_offset += leftover; - } - block_offset_ = 0; - } - - const size_t avail = kBlockSize - block_offset_ - kHeaderSize; - const size_t fragment_length = (left < avail) ? left : avail; - RecordType type; - const bool end = (left == fragment_length); - if (begin && end) { - type = kFullType; - } else if (begin) { - type = kFirstType; - } else if (end) { - type = kLastType; - } else { - type = kMiddleType; - } - - s = EmitPhysicalRecord(type, ptr, fragment_length, temp_pro_offset); - ptr += fragment_length; - left -= fragment_length; - begin = false; - } while (s.ok() && left > 0); - - return s; -} - -Status Binlog::AppendPadding(slash::WritableFile* file, uint64_t* len) { - if (*len < kHeaderSize) { - return Status::OK(); - } - - Status s; - char buf[kBlockSize]; - uint64_t now; - struct timeval tv; - gettimeofday(&tv, NULL); - now = tv.tv_sec; - - uint64_t left = *len; - while (left > 0 && s.ok()) { - uint32_t size = (left >= kBlockSize) ? kBlockSize : left; - if (size < kHeaderSize) { - break; - } else { - uint32_t bsize = size - kHeaderSize; - std::string binlog = PikaBinlogTransverter::ConstructPaddingBinlog( - BinlogType::TypeFirst, bsize); - if (binlog.empty()) { - break; - } - buf[0] = static_cast(bsize & 0xff); - buf[1] = static_cast((bsize & 0xff00) >> 8); - buf[2] = static_cast(bsize >> 16); - buf[3] = static_cast(now & 0xff); - buf[4] = static_cast((now & 0xff00) >> 8); - buf[5] = static_cast((now & 0xff0000) >> 16); - buf[6] = static_cast((now & 0xff000000) >> 24); - buf[7] = static_cast(kFullType); - s = file->Append(Slice(buf, kHeaderSize)); - if (s.ok()) { - s = file->Append(Slice(binlog.data(), binlog.size())); - if (s.ok()) { - s = file->Flush(); - left -= size; - } - } - } - } - *len -= left; - return s; -} - -Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset) { - slash::MutexLock l(&mutex_); - - // offset smaller than the first header - if (pro_offset < 4) { - pro_offset = 0; - } - - delete queue_; - - std::string init_profile = NewFileName(filename, 0); - if (slash::FileExists(init_profile)) { - slash::DeleteFile(init_profile); - } - - std::string profile = NewFileName(filename, pro_num); - if (slash::FileExists(profile)) { - slash::DeleteFile(profile); - } - - slash::NewWritableFile(profile, &queue_); - Binlog::AppendPadding(queue_, &pro_offset); - - pro_num_ = pro_num; - - { - slash::RWLock(&(version_->rwlock_), true); - version_->pro_num_ = pro_num; - version_->pro_offset_ = pro_offset; - version_->StableSave(); - } - - InitLogFile(); - return Status::OK(); -} diff --git a/tools/pika_migrate/src/pika_binlog_reader.cc b/tools/pika_migrate/src/pika_binlog_reader.cc deleted file mode 100644 index 46c7e8c604..0000000000 --- a/tools/pika_migrate/src/pika_binlog_reader.cc +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_binlog_reader.h" - -#include - -PikaBinlogReader::PikaBinlogReader(uint32_t cur_filenum, - uint64_t cur_offset) - : cur_filenum_(cur_filenum), - cur_offset_(cur_offset), - logger_(nullptr), - queue_(nullptr), - backing_store_(new char[kBlockSize]), - buffer_() { - last_record_offset_ = cur_offset % kBlockSize; - pthread_rwlock_init(&rwlock_, NULL); -} - -PikaBinlogReader::PikaBinlogReader() - : cur_filenum_(0), - cur_offset_(0), - logger_(nullptr), - queue_(nullptr), - backing_store_(new char[kBlockSize]), - buffer_() { - last_record_offset_ = 0 % kBlockSize; - pthread_rwlock_init(&rwlock_, NULL); -} - - -PikaBinlogReader::~PikaBinlogReader() { - delete[] backing_store_; - delete queue_; - pthread_rwlock_destroy(&rwlock_); -} - -void PikaBinlogReader::GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset) { - slash::RWLock(&(rwlock_), false); - *cur_filenum = cur_filenum_; - *cur_offset = cur_offset_; -} - -bool PikaBinlogReader::ReadToTheEnd() { - uint32_t pro_num; - uint64_t pro_offset; - logger_->GetProducerStatus(&pro_num, &pro_offset); - slash::RWLock(&(rwlock_), false); - return (pro_num == cur_filenum_ && pro_offset == cur_offset_); -} - -int PikaBinlogReader::Seek(std::shared_ptr logger, uint32_t filenum, uint64_t offset) { - std::string confile = NewFileName(logger->filename, filenum); - if (!slash::FileExists(confile)) { - return -1; - } - slash::SequentialFile* readfile; - if (!slash::NewSequentialFile(confile, &readfile).ok()) { - return -1; - } - if (queue_) { - delete queue_; - } - queue_ = readfile; - logger_ = logger; - - slash::RWLock(&(rwlock_), true); - cur_filenum_ = filenum; - cur_offset_ = offset; - last_record_offset_ = cur_filenum_ % kBlockSize; - - slash::Status s; - uint64_t start_block = (cur_offset_ / kBlockSize) * kBlockSize; - s = queue_->Skip((cur_offset_ / kBlockSize) * kBlockSize); - uint64_t block_offset = cur_offset_ % kBlockSize; - uint64_t ret = 0; - uint64_t res = 0; - bool is_error = false; - - while (true) { - if (res >= block_offset) { - cur_offset_ = start_block + res; - break; - } - ret = 0; - is_error = GetNext(&ret); - if (is_error == true) { - return -1; - } - res += ret; - } - last_record_offset_ = cur_offset_ % kBlockSize; - return 0; -} - -bool PikaBinlogReader::GetNext(uint64_t* size) { - uint64_t offset = 0; - slash::Status s; - bool is_error = false; - - while (true) { - buffer_.clear(); - s = queue_->Read(kHeaderSize, &buffer_, backing_store_); - if (!s.ok()) { - is_error = true; - return is_error; - } - - const char* header = buffer_.data(); - const uint32_t a = static_cast(header[0]) & 0xff; - const uint32_t b = static_cast(header[1]) & 0xff; - const uint32_t c = static_cast(header[2]) & 0xff; - const unsigned int type = header[7]; - const uint32_t length = a | (b << 8) | (c << 16); - - if (type == kFullType) { - s = queue_->Read(length, &buffer_, backing_store_); - offset += kHeaderSize + length; - break; - } else if (type == kFirstType) { - s = queue_->Read(length, &buffer_, backing_store_); - offset += kHeaderSize + length; - } else if (type == kMiddleType) { - s = queue_->Read(length, &buffer_, backing_store_); - offset += kHeaderSize + length; - } else if (type == kLastType) { - s = queue_->Read(length, &buffer_, backing_store_); - offset += kHeaderSize + length; - break; - } else { - is_error = true; - break; - } - } - *size = offset; - return is_error; -} - -unsigned int PikaBinlogReader::ReadPhysicalRecord(slash::Slice *result, uint32_t* filenum, uint64_t* offset) { - slash::Status s; - if (kBlockSize - last_record_offset_ <= kHeaderSize) { - queue_->Skip(kBlockSize - last_record_offset_); - slash::RWLock(&(rwlock_), true); - cur_offset_ += (kBlockSize - last_record_offset_); - last_record_offset_ = 0; - } - buffer_.clear(); - s = queue_->Read(kHeaderSize, &buffer_, backing_store_); - if (s.IsEndFile()) { - return kEof; - } else if (!s.ok()) { - return kBadRecord; - } - - const char* header = buffer_.data(); - const uint32_t a = static_cast(header[0]) & 0xff; - const uint32_t b = static_cast(header[1]) & 0xff; - const uint32_t c = static_cast(header[2]) & 0xff; - const unsigned int type = header[7]; - const uint32_t length = a | (b << 8) | (c << 16); - if (type == kZeroType || length == 0) { - buffer_.clear(); - return kOldRecord; - } - - buffer_.clear(); - s = queue_->Read(length, &buffer_, backing_store_); - *result = slash::Slice(buffer_.data(), buffer_.size()); - last_record_offset_ += kHeaderSize + length; - if (s.ok()) { - slash::RWLock(&(rwlock_), true); - *filenum = cur_filenum_; - cur_offset_ += (kHeaderSize + length); - *offset = cur_offset_; - } - return type; -} - -Status PikaBinlogReader::Consume(std::string* scratch, uint32_t* filenum, uint64_t* offset) { - Status s; - - slash::Slice fragment; - while (true) { - const unsigned int record_type = ReadPhysicalRecord(&fragment, filenum, offset); - - switch (record_type) { - case kFullType: - *scratch = std::string(fragment.data(), fragment.size()); - s = Status::OK(); - break; - case kFirstType: - scratch->assign(fragment.data(), fragment.size()); - s = Status::NotFound("Middle Status"); - break; - case kMiddleType: - scratch->append(fragment.data(), fragment.size()); - s = Status::NotFound("Middle Status"); - break; - case kLastType: - scratch->append(fragment.data(), fragment.size()); - s = Status::OK(); - break; - case kEof: - return Status::EndFile("Eof"); - case kBadRecord: - return Status::IOError("Data Corruption"); - case kOldRecord: - return Status::EndFile("Eof"); - default: - return Status::IOError("Unknow reason"); - } - if (s.ok()) { - break; - } - } - // DLOG(INFO) << "Binlog Sender consumer a msg: " << scratch; - return Status::OK(); -} - -// Get a whole message; -// Append to scratch; -// the status will be OK, IOError or Corruption, EndFile; -Status PikaBinlogReader::Get(std::string* scratch, uint32_t* filenum, uint64_t* offset) { - if (logger_ == nullptr || queue_ == NULL) { - return Status::Corruption("Not seek"); - } - scratch->clear(); - Status s = Status::OK(); - - do { - if (ReadToTheEnd()) { - return Status::EndFile("End of cur log file"); - } - s = Consume(scratch, filenum, offset); - if (s.IsEndFile()) { - std::string confile = NewFileName(logger_->filename, cur_filenum_ + 1); - - // sleep 10ms wait produce thread generate the new binlog - usleep(10000); - - // Roll to next file need retry; - if (slash::FileExists(confile)) { - DLOG(INFO) << "BinlogSender roll to new binlog" << confile; - delete queue_; - queue_ = NULL; - - slash::NewSequentialFile(confile, &(queue_)); - { - slash::RWLock(&(rwlock_), true); - cur_filenum_++; - cur_offset_ = 0; - } - last_record_offset_ = 0; - } else { - return Status::IOError("File Does Not Exists"); - } - } else { - break; - } - } while (s.IsEndFile()); - - return Status::OK(); -} - - diff --git a/tools/pika_migrate/src/pika_binlog_transverter.cc b/tools/pika_migrate/src/pika_binlog_transverter.cc deleted file mode 100644 index 702fd6ca5d..0000000000 --- a/tools/pika_migrate/src/pika_binlog_transverter.cc +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_binlog_transverter.h" - -#include -#include -#include - -#include "slash/include/slash_coding.h" - -#include "include/pika_command.h" - -uint32_t BinlogItem::exec_time() const { - return exec_time_; -} - -uint32_t BinlogItem::server_id() const { - return server_id_; -} - -uint64_t BinlogItem::logic_id() const { - return logic_id_; -} - -uint32_t BinlogItem::filenum() const { - return filenum_; -} - -uint64_t BinlogItem::offset() const { - return offset_; -} - -std::string BinlogItem::content() const { - return content_; -} - -void BinlogItem::set_exec_time(uint32_t exec_time) { - exec_time_ = exec_time; -} - -void BinlogItem::set_server_id(uint32_t server_id) { - server_id_ = server_id; -} - -void BinlogItem::set_logic_id(uint64_t logic_id) { - logic_id_ = logic_id; -} - -void BinlogItem::set_filenum(uint32_t filenum) { - filenum_ = filenum; -} - -void BinlogItem::set_offset(uint64_t offset) { - offset_ = offset; -} - -std::string BinlogItem::ToString() const { - std::string str; - str.append("exec_time: " + std::to_string(exec_time_)); - str.append(",server_id: " + std::to_string(server_id_)); - str.append(",logic_id: " + std::to_string(logic_id_)); - str.append(",filenum: " + std::to_string(filenum_)); - str.append(",offset: " + std::to_string(offset_)); - str.append("\ncontent: "); - for (size_t idx = 0; idx < content_.size(); ++idx) { - if (content_[idx] == '\n') { - str.append("\\n"); - } else if (content_[idx] == '\r') { - str.append("\\r"); - } else { - str.append(1, content_[idx]); - } - } - str.append("\n"); - return str; -} - -std::string PikaBinlogTransverter::BinlogEncode(BinlogType type, - uint32_t exec_time, - uint32_t server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset, - const std::string& content, - const std::vector& extends) { - std::string binlog; - slash::PutFixed16(&binlog, type); - slash::PutFixed32(&binlog, exec_time); - slash::PutFixed32(&binlog, server_id); - slash::PutFixed64(&binlog, logic_id); - slash::PutFixed32(&binlog, filenum); - slash::PutFixed64(&binlog, offset); - uint32_t content_length = content.size(); - slash::PutFixed32(&binlog, content_length); - binlog.append(content); - return binlog; -} - -bool PikaBinlogTransverter::BinlogDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item) { - uint16_t binlog_type = 0; - uint32_t content_length = 0; - std::string binlog_str = binlog; - slash::GetFixed16(&binlog_str, &binlog_type); - if (binlog_type != type) { - LOG(ERROR) << "Binlog Item type error, expect type:" << type << " actualy type: " << binlog_type; - return false; - } - slash::GetFixed32(&binlog_str, &binlog_item->exec_time_); - slash::GetFixed32(&binlog_str, &binlog_item->server_id_); - slash::GetFixed64(&binlog_str, &binlog_item->logic_id_); - slash::GetFixed32(&binlog_str, &binlog_item->filenum_); - slash::GetFixed64(&binlog_str, &binlog_item->offset_); - slash::GetFixed32(&binlog_str, &content_length); - if (binlog_str.size() == content_length) { - binlog_item->content_.assign(binlog_str.data(), content_length); - } else { - LOG(ERROR) << "Binlog Item get content error, expect length:" << content_length << " left length:" << binlog_str.size(); - return false; - } - return true; -} - -/* - * *************************************************Type First Binlog Item Format************************************************** - * | | | | | | | | | - * | 2 Bytes | 4 Bytes | 4 Bytes | 8 Bytes | 4 Bytes | 8 Bytes | 4 Bytes | content length Bytes | - * |---------------------------------------------- 34 Bytes -----------------------------------------------| - * - * content: *2\r\n$7\r\npadding\r\n$00001\r\n***\r\n - * length of *** -> total_len - PADDING_BINLOG_PROTOCOL_SIZE - SPACE_STROE_PARAMETER_LENGTH; - * - * We allocate five bytes to store the length of the parameter - */ -std::string PikaBinlogTransverter::ConstructPaddingBinlog(BinlogType type, - uint32_t size) { - assert(size <= kBlockSize - kHeaderSize); - assert(BINLOG_ITEM_HEADER_SIZE + PADDING_BINLOG_PROTOCOL_SIZE - + SPACE_STROE_PARAMETER_LENGTH <= size); - - std::string binlog; - slash::PutFixed16(&binlog, type); - slash::PutFixed32(&binlog, 0); - slash::PutFixed32(&binlog, 0); - slash::PutFixed64(&binlog, 0); - slash::PutFixed32(&binlog, 0); - slash::PutFixed64(&binlog, 0); - int32_t content_len = size - BINLOG_ITEM_HEADER_SIZE; - int32_t parameter_len = content_len - PADDING_BINLOG_PROTOCOL_SIZE - - SPACE_STROE_PARAMETER_LENGTH; - if (parameter_len < 0) { - return std::string(); - } - - std::string content; - RedisAppendLen(content, 2, "*"); - RedisAppendLen(content, 7, "$"); - RedisAppendContent(content, "padding"); - - std::string parameter_len_str; - std::ostringstream os; - os << parameter_len; - std::istringstream is(os.str()); - is >> parameter_len_str; - if (parameter_len_str.size() > SPACE_STROE_PARAMETER_LENGTH) { - return std::string(); - } - - content.append("$"); - content.append(SPACE_STROE_PARAMETER_LENGTH - parameter_len_str.size(), '0'); - content.append(parameter_len_str); - content.append(kNewLine); - RedisAppendContent(content, std::string(parameter_len, '*')); - - slash::PutFixed32(&binlog, content_len); - binlog.append(content); - return binlog; -} - -bool PikaBinlogTransverter::BinlogItemWithoutContentDecode(BinlogType type, - const std::string& binlog, - BinlogItem* binlog_item) { - uint16_t binlog_type = 0; - std::string binlog_str = binlog; - slash::GetFixed16(&binlog_str, &binlog_type); - if (binlog_type != type) { - LOG(ERROR) << "Binlog Item type error, expect type:" << type << " actualy type: " << binlog_type; - return false; - } - slash::GetFixed32(&binlog_str, &binlog_item->exec_time_); - slash::GetFixed32(&binlog_str, &binlog_item->server_id_); - slash::GetFixed64(&binlog_str, &binlog_item->logic_id_); - slash::GetFixed32(&binlog_str, &binlog_item->filenum_); - slash::GetFixed64(&binlog_str, &binlog_item->offset_); - return true; -} diff --git a/tools/pika_migrate/src/pika_bit.cc b/tools/pika_migrate/src/pika_bit.cc deleted file mode 100644 index 0815acb040..0000000000 --- a/tools/pika_migrate/src/pika_bit.cc +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_bit.h" - -#include "slash/include/slash_string.h" - -#include "include/pika_define.h" - -void BitSetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitSet); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &bit_offset_)) { - res_.SetRes(CmdRes::kInvalidBitOffsetInt); - return; - } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &on_)) { - res_.SetRes(CmdRes::kInvalidBitInt); - return; - } - if (bit_offset_ < 0) { - res_.SetRes(CmdRes::kInvalidBitOffsetInt); - return; - } - // value no bigger than 2^18 - if ( (bit_offset_ >> kMaxBitOpInputBit) > 0) { - res_.SetRes(CmdRes::kInvalidBitOffsetInt); - return; - } - if (on_ & ~1) { - res_.SetRes(CmdRes::kInvalidBitInt); - return; - } - return; -} - -void BitSetCmd::Do(std::shared_ptr partition) { - std::string value; - int32_t bit_val = 0; - rocksdb::Status s = partition->db()->SetBit(key_, bit_offset_, on_, &bit_val); - if (s.ok()){ - res_.AppendInteger((int)bit_val); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void BitGetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitGet); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &bit_offset_)) { - res_.SetRes(CmdRes::kInvalidBitOffsetInt); - return; - } - if (bit_offset_ < 0) { - res_.SetRes(CmdRes::kInvalidBitOffsetInt); - return; - } - return; -} - -void BitGetCmd::Do(std::shared_ptr partition) { - int32_t bit_val = 0; - rocksdb::Status s = partition->db()->GetBit(key_, bit_offset_, &bit_val); - if (s.ok()) { - res_.AppendInteger((int)bit_val); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void BitCountCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitCount); - return; - } - key_ = argv_[1]; - if (argv_.size() == 4) { - count_all_ = false; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &end_offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else if (argv_.size() == 2) { - count_all_ = true; - } else { - res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitCount); - } - return; -} - -void BitCountCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s; - if (count_all_) { - s = partition->db()->BitCount(key_, start_offset_, end_offset_, &count, false); - } else { - s = partition->db()->BitCount(key_, start_offset_, end_offset_, &count, true); - } - - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void BitPosCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitPos); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &bit_val_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (bit_val_ & ~1) { - res_.SetRes(CmdRes::kInvalidBitPosArgument); - return; - } - if (argv_.size() == 3) { - pos_all_ = true; - endoffset_set_ = false; - } else if (argv_.size() == 4) { - pos_all_ = false; - endoffset_set_ = false; - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &start_offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else if (argv_.size() == 5) { - pos_all_ = false; - endoffset_set_ = true; - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &start_offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (!slash::string2l(argv_[4].data(), argv_[4].size(), &end_offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else - res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitPos); - return; -} - -void BitPosCmd::Do(std::shared_ptr partition) { - int64_t pos = 0; - rocksdb::Status s; - if (pos_all_) { - s = partition->db()->BitPos(key_, bit_val_, &pos); - } else if (!pos_all_ && !endoffset_set_) { - s = partition->db()->BitPos(key_, bit_val_, start_offset_, &pos); - } else if (!pos_all_ && endoffset_set_) { - s = partition->db()->BitPos(key_, bit_val_, start_offset_, end_offset_, &pos); - } - if (s.ok()) { - res_.AppendInteger((int)pos); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void BitOpCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); - return; - } - std::string op_str = argv_[1]; - if (!strcasecmp(op_str.data(), "not")) { - op_ = blackwidow::kBitOpNot; - } else if (!strcasecmp(op_str.data(), "and")) { - op_ = blackwidow::kBitOpAnd; - } else if (!strcasecmp(op_str.data(), "or")) { - op_ = blackwidow::kBitOpOr; - } else if (!strcasecmp(op_str.data(), "xor")) { - op_ = blackwidow::kBitOpXor; - } else { - res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitOp); - return; - } - if (op_ == blackwidow::kBitOpNot && argv_.size() != 4) { - res_.SetRes(CmdRes::kWrongBitOpNotNum, kCmdNameBitOp); - return; - } else if (op_ != blackwidow::kBitOpNot && argv_.size() < 4) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); - return; - } else if (argv_.size() >= kMaxBitOpInputKey) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); - return; - } - - dest_key_ = argv_[2].data(); - for(unsigned int i = 3; i <= argv_.size() - 1; i++) { - src_keys_.push_back(argv_[i].data()); - } - return; -} - -void BitOpCmd::Do(std::shared_ptr partition) { - int64_t result_length; - rocksdb::Status s = partition->db()->BitOp(op_, dest_key_, src_keys_, &result_length); - if (s.ok()) { - res_.AppendInteger(result_length); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} diff --git a/tools/pika_migrate/src/pika_client_conn.cc b/tools/pika_migrate/src/pika_client_conn.cc deleted file mode 100644 index fd51331438..0000000000 --- a/tools/pika_migrate/src/pika_client_conn.cc +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_client_conn.h" - -#include -#include - -#include - -#include "include/pika_conf.h" -#include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaConf* g_pika_conf; -extern PikaServer* g_pika_server; -extern PikaCmdTableManager* g_pika_cmd_table_manager; - -PikaClientConn::PikaClientConn(int fd, std::string ip_port, - pink::Thread* thread, - pink::PinkEpoll* pink_epoll, - const pink::HandleType& handle_type) - : RedisConn(fd, ip_port, thread, pink_epoll, handle_type), - server_thread_(reinterpret_cast(thread)), - current_table_(g_pika_conf->default_table()), - is_pubsub_(false) { - auth_stat_.Init(); -} - -std::string PikaClientConn::DoCmd(const PikaCmdArgsType& argv, - const std::string& opt) { - // Get command info - std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(opt); - if (!c_ptr) { - return "-Err unknown or unsupported command \'" + opt + "\'\r\n"; - } - c_ptr->SetConn(std::dynamic_pointer_cast(shared_from_this())); - - // Check authed - // AuthCmd will set stat_ - if (!auth_stat_.IsAuthed(c_ptr)) { - return "-ERR NOAUTH Authentication required.\r\n"; - } - - uint64_t start_us = 0; - if (g_pika_conf->slowlog_slower_than() >= 0) { - start_us = slash::NowMicros(); - } - - bool is_monitoring = g_pika_server->HasMonitorClients(); - if (is_monitoring) { - ProcessMonitor(argv); - } - - // Initial - c_ptr->Initial(argv, current_table_); - if (!c_ptr->res().ok()) { - return c_ptr->res().message(); - } - - g_pika_server->UpdateQueryNumAndExecCountTable(opt); - - // PubSub connection - // (P)SubscribeCmd will set is_pubsub_ - if (this->IsPubSub()) { - if (opt != kCmdNameSubscribe && - opt != kCmdNameUnSubscribe && - opt != kCmdNamePing && - opt != kCmdNamePSubscribe && - opt != kCmdNamePUnSubscribe) { - return "-ERR only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT allowed in this context\r\n"; - } - } - - if (!g_pika_server->IsCommandSupport(opt)) { - return "-ERR This command only support in classic mode\r\n"; - } - - if (!g_pika_server->IsTableExist(current_table_)) { - return "-ERR Table not found\r\n"; - } - - // TODO: Consider special commands, like flushall, flushdb? - if (c_ptr->is_write()) { - if (g_pika_server->IsTableBinlogIoError(current_table_)) { - return "-ERR Writing binlog failed, maybe no space left on device\r\n"; - } - std::vector cur_key = c_ptr->current_key(); - if (cur_key.empty()) { - return "-ERR Internal ERROR\r\n"; - } - if (g_pika_server->readonly(current_table_, cur_key.front())) { - return "-ERR Server in read-only\r\n"; - } - } - - // Process Command - c_ptr->Execute(); - - if (g_pika_conf->slowlog_slower_than() >= 0) { - ProcessSlowlog(argv, start_us); - } - - return c_ptr->res().message(); -} - -void PikaClientConn::ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t start_us) { - int32_t start_time = start_us / 1000000; - int64_t duration = slash::NowMicros() - start_us; - if (duration > g_pika_conf->slowlog_slower_than()) { - g_pika_server->SlowlogPushEntry(argv, start_time, duration); - if (g_pika_conf->slowlog_write_errorlog()) { - bool trim = false; - std::string slow_log; - uint32_t cmd_size = 0; - for (unsigned int i = 0; i < argv.size(); i++) { - cmd_size += 1 + argv[i].size(); // blank space and argument length - if (!trim) { - slow_log.append(" "); - slow_log.append(slash::ToRead(argv[i])); - if (slow_log.size() >= 1000) { - trim = true; - slow_log.resize(1000); - slow_log.append("...\""); - } - } - } - LOG(ERROR) << "ip_port: " << ip_port() << ", table: " << current_table_ - << ", command:" << slow_log << ", command_size: " << cmd_size - 1 - << ", arguments: " << argv.size() << ", start_time(s): " << start_time - << ", duration(us): " << duration; - } - } -} - -void PikaClientConn::ProcessMonitor(const PikaCmdArgsType& argv) { - std::string monitor_message; - std::string table_name = g_pika_conf->classic_mode() - ? current_table_.substr(2) : current_table_; - monitor_message = std::to_string(1.0*slash::NowMicros()/1000000) + - " [" + table_name + " " + this->ip_port() + "]"; - for (PikaCmdArgsType::const_iterator iter = argv.begin(); iter != argv.end(); iter++) { - monitor_message += " " + slash::ToRead(*iter); - } - g_pika_server->AddMonitorMessage(monitor_message); -} - -void PikaClientConn::AsynProcessRedisCmds(const std::vector& argvs, std::string* response) { - BgTaskArg* arg = new BgTaskArg(); - arg->redis_cmds = argvs; - arg->response = response; - arg->pcc = std::dynamic_pointer_cast(shared_from_this()); - g_pika_server->Schedule(&DoBackgroundTask, arg); -} - -void PikaClientConn::BatchExecRedisCmd(const std::vector& argvs, std::string* response) { - bool success = true; - for (const auto& argv : argvs) { - if (DealMessage(argv, response) != 0) { - success = false; - break; - } - } - if (!response->empty()) { - set_is_reply(true); - NotifyEpoll(success); - } -} - -int PikaClientConn::DealMessage(const PikaCmdArgsType& argv, std::string* response) { - - if (argv.empty()) return -2; - std::string opt = argv[0]; - if (opt == kClusterPrefix) { - if (argv.size() >=2 ) { - opt += argv[1]; - } - } - slash::StringToLower(opt); - - if (response->empty()) { - // Avoid memory copy - *response = std::move(DoCmd(argv, opt)); - } else { - // Maybe pipeline - response->append(DoCmd(argv, opt)); - } - return 0; -} - -void PikaClientConn::DoBackgroundTask(void* arg) { - BgTaskArg* bg_arg = reinterpret_cast(arg); - bg_arg->pcc->BatchExecRedisCmd(bg_arg->redis_cmds, bg_arg->response); - delete bg_arg; -} - -// Initial permission status -void PikaClientConn::AuthStat::Init() { - // Check auth required - stat_ = g_pika_conf->userpass() == "" ? - kLimitAuthed : kNoAuthed; - if (stat_ == kLimitAuthed - && g_pika_conf->requirepass() == "") { - stat_ = kAdminAuthed; - } -} - -// Check permission for current command -bool PikaClientConn::AuthStat::IsAuthed(const std::shared_ptr cmd_ptr) { - std::string opt = cmd_ptr->name(); - if (opt == kCmdNameAuth) { - return true; - } - const std::vector& blacklist = g_pika_conf->vuser_blacklist(); - switch (stat_) { - case kNoAuthed: - return false; - case kAdminAuthed: - break; - case kLimitAuthed: - if (cmd_ptr->is_admin_require() - || find(blacklist.begin(), blacklist.end(), opt) != blacklist.end()) { - return false; - } - break; - default: - LOG(WARNING) << "Invalid auth stat : " << static_cast(stat_); - return false; - } - return true; -} - -// Update permission status -bool PikaClientConn::AuthStat::ChecknUpdate(const std::string& message) { - // Situations to change auth status - if (message == "USER") { - stat_ = kLimitAuthed; - } else if (message == "ROOT") { - stat_ = kAdminAuthed; - } else { - return false; - } - return true; -} - -// compare addr in ClientInfo -bool AddrCompare(const ClientInfo& lhs, const ClientInfo& rhs) { - return rhs.ip_port < lhs.ip_port; -} - -bool IdleCompare(const ClientInfo& lhs, const ClientInfo& rhs) { - return lhs.last_interaction < rhs.last_interaction; -} - diff --git a/tools/pika_migrate/src/pika_cluster.cc b/tools/pika_migrate/src/pika_cluster.cc deleted file mode 100644 index 34d5b1630d..0000000000 --- a/tools/pika_migrate/src/pika_cluster.cc +++ /dev/null @@ -1,495 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_rm.h" -#include "include/pika_table.h" -#include "include/pika_server.h" -#include "include/pika_cluster.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaReplicaManager* g_pika_rm; -extern PikaServer* g_pika_server; -extern PikaConf* g_pika_conf; - -const std::string PkClusterInfoCmd::kSlotSection = "slot"; - -// pkcluster info slot table:slot -// pkcluster info table -// pkcluster info node -// pkcluster info cluster -void PkClusterInfoCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePkClusterInfo); - return; - } - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "PkClusterInfo only support on sharding mode"); - return; - } - if (!strcasecmp(argv_[2].data(), kSlotSection.data())) { - info_section_ = kInfoSlot; - if (!ParseInfoSlotSubCmd()) { - return; - } - } else { - info_section_ = kInfoErr; - } - return; -} - -void PkClusterInfoCmd::Do(std::shared_ptr partition) { - std::string info; - switch (info_section_) { - case kInfoSlot: - if (info_range_ == kAll) { - ClusterInfoSlotAll(&info); - } else if (info_range_ == kSingle) { - // doesn't process error, if error return nothing - GetSlotInfo(table_name_, partition_id_, &info); - } - break; - default: - break; - } - res_.AppendStringLen(info.size()); - res_.AppendContent(info); - return; -} - -bool PkClusterInfoCmd::ParseInfoSlotSubCmd() { - if (argv_.size() > 3) { - if (argv_.size() == 4) { - info_range_ = kSingle; - std::string tmp(argv_[3]); - size_t pos = tmp.find(':'); - std::string slot_num_str; - if (pos == std::string::npos) { - table_name_ = g_pika_conf->default_table(); - slot_num_str = tmp; - } else { - table_name_ = tmp.substr(0, pos); - slot_num_str = tmp.substr(pos + 1); - } - unsigned long partition_id; - if (!slash::string2ul(slot_num_str.c_str(), slot_num_str.size(), &partition_id)) { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNamePkClusterInfo); - return false; - } - partition_id_ = partition_id; - } else { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePkClusterInfo); - return false; - } - } - return true; -} - -void PkClusterInfoCmd::ClusterInfoSlotAll(std::string* info) { - std::stringstream tmp_stream; - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& partition_item : table_item.second->partitions_) { - std::string table_name = table_item.second->GetTableName(); - uint32_t partition_id = partition_item.second->GetPartitionId(); - std::string p_info; - Status s = GetSlotInfo(table_name, partition_id, &p_info); - if (!s.ok()) { - continue; - } - tmp_stream << p_info; - } - } - info->append(tmp_stream.str()); -} - -Status PkClusterInfoCmd::GetSlotInfo(const std::string table_name, - uint32_t partition_id, - std::string* info) { - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - return Status::NotFound("not found"); - } - Status s; - std::stringstream tmp_stream; - - // binlog offset section - uint32_t filenum = 0; - uint64_t offset = 0; - partition->logger()->GetProducerStatus(&filenum, &offset); - tmp_stream << partition->GetPartitionName() << " binlog_offset=" - << filenum << " " << offset; - - // safety purge section - std::string safety_purge; - s = g_pika_rm->GetSafetyPurgeBinlogFromSMP(table_name, partition_id, &safety_purge); - tmp_stream << ",safety_purge=" << (s.ok() ? safety_purge : "error") << "\r\n"; - - // partition info section - std::string p_info; - s = g_pika_rm->GetPartitionInfo(table_name, partition_id, &p_info); - if (!s.ok()) { - return s; - } - tmp_stream << p_info; - info->append(tmp_stream.str()); - return Status::OK(); -} - -Status ParseSlotGroup(const std::string& slot_group, - std::set* slots) { - std::set tmp_slots; - int64_t slot_idx, start_idx, end_idx; - std::string::size_type pos; - std::vector elems; - slash::StringSplit(slot_group, COMMA, elems); - for (const auto& elem : elems) { - if ((pos = elem.find("-")) == std::string::npos) { - if (!slash::string2l(elem.data(), elem.size(), &slot_idx) - || slot_idx < 0) { - return Status::Corruption("syntax error"); - } else { - tmp_slots.insert(static_cast(slot_idx)); - } - } else { - if (pos == 0 || pos == (elem.size() - 1)) { - return Status::Corruption("syntax error"); - } else { - std::string start_pos = elem.substr(0, pos); - std::string end_pos = elem.substr(pos + 1, elem.size() - pos); - if (!slash::string2l(start_pos.data(), start_pos.size(), &start_idx) - || !slash::string2l(end_pos.data(), end_pos.size(), &end_idx) - || start_idx < 0 || end_idx < 0 || start_idx > end_idx) { - return Status::Corruption("syntax error"); - } - for (int64_t idx = start_idx; idx <= end_idx; ++idx) { - tmp_slots.insert(static_cast(idx)); - } - } - } - } - slots->swap(tmp_slots); - return Status::OK(); -} - -void SlotParentCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "PkClusterAddSlots/PkClusterDelSlots only support on sharding mode"); - return; - } - - Status s = ParseSlotGroup(argv_[2], &slots_); - if (!s.ok()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - - std::string table_name = g_pika_conf->default_table(); - for (const auto& slot_id : slots_) { - p_infos_.insert(PartitionInfo(table_name, slot_id)); - } -} - -/* - * pkcluster addslots 0-3,8-11 - * pkcluster addslots 0-3,8,9,10,11 - * pkcluster addslots 0,2,4,6,8,10,12,14 - */ -void PkClusterAddSlotsCmd::DoInitial() { - SlotParentCmd::DoInitial(); - if (!res_.ok()) { - return; - } -} - -void PkClusterAddSlotsCmd::Do(std::shared_ptr partition) { - std::string table_name = g_pika_conf->default_table(); - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - res_.SetRes(CmdRes::kErrOther, "Internal error: table not found!"); - return; - } - - SlotState expected = INFREE; - if (!std::atomic_compare_exchange_strong(&g_pika_server->slot_state_, - &expected, INBUSY)) { - res_.SetRes(CmdRes::kErrOther, - "Slot in syncing or a change operation is under way, retry later"); - return; - } - - bool pre_success = true; - Status s = AddSlotsSanityCheck(table_name); - if (!s.ok()) { - LOG(WARNING) << "Addslots sanity check failed: " << s.ToString(); - pre_success = false; - } - if (pre_success) { - s = g_pika_conf->AddTablePartitions(table_name, slots_); - if (!s.ok()) { - LOG(WARNING) << "Addslots add to pika conf failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = table_ptr->AddPartitions(slots_); - if (!s.ok()) { - LOG(WARNING) << "Addslots add to table partition failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = g_pika_rm->AddSyncPartition(p_infos_); - if (!s.ok()) { - LOG(WARNING) << "Addslots add to sync partition failed: " << s.ToString(); - pre_success = false; - } - } - - g_pika_server->slot_state_.store(INFREE); - - if (!pre_success) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - - res_.SetRes(CmdRes::kOk); - LOG(INFO) << "Pika meta file overwrite success"; -} - -Status PkClusterAddSlotsCmd::AddSlotsSanityCheck(const std::string& table_name) { - Status s = g_pika_conf->TablePartitionsSanityCheck(table_name, slots_, true); - if (!s.ok()) { - return s; - } - - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - return Status::NotFound("table not found!"); - } - - for (uint32_t id : slots_) { - std::shared_ptr partition_ptr = table_ptr->GetPartitionById(id); - if (partition_ptr != nullptr) { - return Status::Corruption("partition " + std::to_string(id) + " already exist"); - } - } - s = g_pika_rm->AddSyncPartitionSanityCheck(p_infos_); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -/* pkcluster delslots 0-3,8-11 - * pkcluster delslots 0-3,8,9,10,11 - * pkcluster delslots 0,2,4,6,8,10,12,14 - */ -void PkClusterDelSlotsCmd::DoInitial() { - SlotParentCmd::DoInitial(); - if (!res_.ok()) { - return; - } -} - -void PkClusterDelSlotsCmd::Do(std::shared_ptr partition) { - std::string table_name = g_pika_conf->default_table(); - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - res_.SetRes(CmdRes::kErrOther, "Internal error: default table not found!"); - return; - } - - SlotState expected = INFREE; - if (!std::atomic_compare_exchange_strong(&g_pika_server->slot_state_, - &expected, INBUSY)) { - res_.SetRes(CmdRes::kErrOther, - "Slot in syncing or a change operation is under way, retry later"); - return; - } - - bool pre_success = true; - Status s = RemoveSlotsSanityCheck(table_name); - if (!s.ok()) { - LOG(WARNING) << "Removeslots sanity check failed: " << s.ToString(); - pre_success = false; - } - if (pre_success) { - s = g_pika_conf->RemoveTablePartitions(table_name, slots_); - if (!s.ok()) { - LOG(WARNING) << "Removeslots remove from pika conf failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = table_ptr->RemovePartitions(slots_); - if (!s.ok()) { - LOG(WARNING) << "Removeslots remove from table partition failed: " << s.ToString(); - pre_success = false; - } - } - if (pre_success) { - s = g_pika_rm->RemoveSyncPartition(p_infos_); - if (!s.ok()) { - LOG(WARNING) << "Remvoeslots remove from sync partition failed: " << s.ToString(); - pre_success = false; - } - } - - g_pika_server->slot_state_.store(INFREE); - - if (!pre_success) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - res_.SetRes(CmdRes::kOk); - LOG(INFO) << "Pika meta file overwrite success"; -} - -Status PkClusterDelSlotsCmd::RemoveSlotsSanityCheck(const std::string& table_name) { - Status s = g_pika_conf->TablePartitionsSanityCheck(table_name, slots_, false); - if (!s.ok()) { - return s; - } - - std::shared_ptr
table_ptr = g_pika_server->GetTable(table_name); - if (!table_ptr) { - return Status::NotFound("table not found"); - } - - for (uint32_t id : slots_) { - std::shared_ptr partition_ptr = table_ptr->GetPartitionById(id); - if (partition_ptr == nullptr) { - return Status::Corruption("partition " + std::to_string(id) + " not found"); - } - } - s = g_pika_rm->RemoveSyncPartitionSanityCheck(p_infos_); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -/* pkcluster slotsslaveof no one [0-3,8-11 | all] - * pkcluster slotsslaveof ip port [0-3,8,9,10,11 | all] - * pkcluster slotsslaveof ip port [0,2,4,6,7,8,9 | all] force - */ -void PkClusterSlotsSlaveofCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePkClusterSlotsSlaveof); - return; - } - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "PkClusterSlotSync only support on sharding mode"); - return; - } - - if (!strcasecmp(argv_[2].data(), "no") - && !strcasecmp(argv_[3].data(), "one")) { - is_noone_ = true; - } else { - ip_ = argv_[2]; - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &port_) - || port_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - - if ((ip_ == "127.0.0.1" || ip_ == g_pika_server->host()) - && port_ == g_pika_server->port()) { - res_.SetRes(CmdRes::kErrOther, "You fucked up"); - return; - } - } - - if (!strcasecmp(argv_[4].data(), "all")) { - std::string table_name = g_pika_conf->default_table(); - slots_ = g_pika_server->GetTablePartitionIds(table_name); - } else { - Status s = ParseSlotGroup(argv_[4], &slots_); - if (!s.ok()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - } - - if (slots_.empty()) { - res_.SetRes(CmdRes::kErrOther, "Slots set empty"); - } - - if (argv_.size() == 5) { - // do nothing - } else if (argv_.size() == 6 - && !strcasecmp(argv_[5].data(), "force")) { - force_sync_ = true; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - } -} - -void PkClusterSlotsSlaveofCmd::Do(std::shared_ptr partition) { - std::string table_name = g_pika_conf->default_table(); - std::vector to_del_slots; - for (const auto& slot : slots_) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, slot)); - if (!slave_partition) { - res_.SetRes(CmdRes::kErrOther, "Slot " + std::to_string(slot) + " not found!"); - return; - } - if (is_noone_) { - // check okay - } else if (slave_partition->State() == ReplState::kConnected - && slave_partition->MasterIp() == ip_ && slave_partition->MasterPort() == port_) { - to_del_slots.push_back(slot); - } - } - - for (auto to_del : to_del_slots) { - slots_.erase(to_del); - } - - Status s = Status::OK(); - ReplState state = force_sync_ - ? ReplState::kTryDBSync : ReplState::kTryConnect; - for (const auto& slot : slots_) { - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, slot)); - if (slave_partition->State() == ReplState::kConnected) { - s = g_pika_rm->SendRemoveSlaveNodeRequest(table_name, slot); - } - if (!s.ok()) { - break; - } - if (slave_partition->State() != ReplState::kNoConnect) { - // reset state - s = g_pika_rm->SetSlaveReplState( - PartitionInfo(table_name, slot), ReplState::kNoConnect); - if (!s.ok()) { - break; - } - } - if (is_noone_) { - } else { - s = g_pika_rm->ActivateSyncSlavePartition( - RmNode(ip_, port_, table_name, slot), state); - if (!s.ok()) { - break; - } - } - } - - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - diff --git a/tools/pika_migrate/src/pika_cmd_table_manager.cc b/tools/pika_migrate/src/pika_cmd_table_manager.cc deleted file mode 100644 index b046de878f..0000000000 --- a/tools/pika_migrate/src/pika_cmd_table_manager.cc +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_cmd_table_manager.h" - -#include -#include - -#include "include/pika_conf.h" -#include "slash/include/slash_mutex.h" - - -#define gettid() syscall(__NR_gettid) - -extern PikaConf* g_pika_conf; - -PikaCmdTableManager::PikaCmdTableManager() { - pthread_rwlock_init(&map_protector_, NULL); - cmds_ = new CmdTable(); - cmds_->reserve(300); - InitCmdTable(cmds_); -} - -PikaCmdTableManager::~PikaCmdTableManager() { - pthread_rwlock_destroy(&map_protector_); - for (const auto&item : thread_distribution_map_) { - delete item.second; - } - DestoryCmdTable(cmds_); - delete cmds_; -} - -std::shared_ptr PikaCmdTableManager::GetCmd(const std::string& opt) { - std::string internal_opt = opt; - if (!g_pika_conf->classic_mode()) { - TryChangeToAlias(&internal_opt); - } - return NewCommand(internal_opt); -} - -std::shared_ptr PikaCmdTableManager::NewCommand(const std::string& opt) { - Cmd* cmd = GetCmdFromTable(opt, *cmds_); - if (cmd) { - return std::shared_ptr(cmd->Clone()); - } - return nullptr; -} - -void PikaCmdTableManager::TryChangeToAlias(std::string *internal_opt) { - if (!strcasecmp(internal_opt->c_str(), kCmdNameSlaveof.c_str())) { - *internal_opt = kCmdNamePkClusterSlotsSlaveof; - } -} - -bool PikaCmdTableManager::CheckCurrentThreadDistributionMapExist(const pid_t& tid) { - slash::RWLock l(&map_protector_, false); - if (thread_distribution_map_.find(tid) == thread_distribution_map_.end()) { - return false; - } - return true; -} - -void PikaCmdTableManager::InsertCurrentThreadDistributionMap() { - pid_t tid = gettid(); - PikaDataDistribution* distribution = nullptr; - if (g_pika_conf->classic_mode()) { - distribution = new HashModulo(); - } else { - distribution = new Crc32(); - } - distribution->Init(); - slash::RWLock l(&map_protector_, true); - thread_distribution_map_.insert(std::make_pair(tid, distribution)); -} - -uint32_t PikaCmdTableManager::DistributeKey(const std::string& key, uint32_t partition_num) { - pid_t tid = gettid(); - PikaDataDistribution* data_dist = nullptr; - if (!CheckCurrentThreadDistributionMapExist(tid)) { - InsertCurrentThreadDistributionMap(); - } - - slash::RWLock l(&map_protector_, false); - data_dist = thread_distribution_map_[tid]; - return data_dist->Distribute(key, partition_num); -} diff --git a/tools/pika_migrate/src/pika_command.cc b/tools/pika_migrate/src/pika_command.cc deleted file mode 100644 index 5e40cf6416..0000000000 --- a/tools/pika_migrate/src/pika_command.cc +++ /dev/null @@ -1,763 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_command.h" - -#include "include/pika_kv.h" -#include "include/pika_bit.h" -#include "include/pika_set.h" -#include "include/pika_geo.h" -#include "include/pika_list.h" -#include "include/pika_zset.h" -#include "include/pika_hash.h" -#include "include/pika_admin.h" -#include "include/pika_pubsub.h" -#include "include/pika_server.h" -#include "include/pika_hyperloglog.h" -#include "include/pika_slot.h" -#include "include/pika_cluster.h" - -extern PikaServer* g_pika_server; - -void InitCmdTable(std::unordered_map *cmd_table) { - //Admin - ////Slaveof - Cmd* slaveofptr = new SlaveofCmd(kCmdNameSlaveof, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlaveof, slaveofptr)); - Cmd* dbslaveofptr = new DbSlaveofCmd(kCmdNameDbSlaveof, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDbSlaveof, dbslaveofptr)); - Cmd* authptr = new AuthCmd(kCmdNameAuth, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameAuth, authptr)); - Cmd* bgsaveptr = new BgsaveCmd(kCmdNameBgsave, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); - cmd_table->insert(std::pair(kCmdNameBgsave, bgsaveptr)); - Cmd* compactptr = new CompactCmd(kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameCompact, compactptr)); - Cmd* purgelogsto = new PurgelogstoCmd(kCmdNamePurgelogsto, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePurgelogsto, purgelogsto)); - Cmd* pingptr = new PingCmd(kCmdNamePing, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePing, pingptr)); - Cmd* selectptr = new SelectCmd(kCmdNameSelect, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSelect, selectptr)); - Cmd* flushallptr = new FlushallCmd(kCmdNameFlushall, 1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameFlushall, flushallptr)); - Cmd* flushdbptr = new FlushdbCmd(kCmdNameFlushdb, -1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameFlushdb, flushdbptr)); - Cmd* clientptr = new ClientCmd(kCmdNameClient, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameClient, clientptr)); - Cmd* shutdownptr = new ShutdownCmd(kCmdNameShutdown, 1, kCmdFlagsRead | kCmdFlagsLocal | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameShutdown, shutdownptr)); - Cmd* infoptr = new InfoCmd(kCmdNameInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameInfo, infoptr)); - Cmd* configptr = new ConfigCmd(kCmdNameConfig, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameConfig, configptr)); - Cmd* monitorptr = new MonitorCmd(kCmdNameMonitor, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameMonitor, monitorptr)); - Cmd* dbsizeptr = new DbsizeCmd(kCmdNameDbsize, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDbsize, dbsizeptr)); - Cmd* timeptr = new TimeCmd(kCmdNameTime, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameTime, timeptr)); - Cmd* delbackupptr = new DelbackupCmd(kCmdNameDelbackup, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameDelbackup, delbackupptr)); - Cmd* echoptr = new EchoCmd(kCmdNameEcho, 2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameEcho, echoptr)); - Cmd* scandbptr = new ScandbCmd(kCmdNameScandb, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameScandb, scandbptr)); - Cmd* slowlogptr = new SlowlogCmd(kCmdNameSlowlog, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlowlog, slowlogptr)); - Cmd* paddingptr = new PaddingCmd(kCmdNamePadding, 2, kCmdFlagsWrite | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePadding, paddingptr)); - Cmd* pkpatternmatchdelptr = new PKPatternMatchDelCmd(kCmdNamePKPatternMatchDel, 3, kCmdFlagsWrite | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePKPatternMatchDel, pkpatternmatchdelptr)); - - // Slots related - Cmd* slotsinfoptr = new SlotsInfoCmd(kCmdNameSlotsInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsInfo, slotsinfoptr)); - Cmd* slotshashkeyptr = new SlotsHashKeyCmd(kCmdNameSlotsHashKey, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsHashKey, slotshashkeyptr)); - Cmd* slotmgrtslotasyncptr = new SlotsMgrtSlotAsyncCmd(kCmdNameSlotsMgrtSlotAsync, 8, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtSlotAsync, slotmgrtslotasyncptr)); - Cmd* slotmgrttagslotasyncptr = new SlotsMgrtTagSlotAsyncCmd(kCmdNameSlotsMgrtTagSlotAsync, 8, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtTagSlotAsync, slotmgrttagslotasyncptr)); - Cmd* slotsdelptr = new SlotsDelCmd(kCmdNameSlotsDel, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsDel, slotsdelptr)); - Cmd* slotsscanptr = new SlotsScanCmd(kCmdNameSlotsScan, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsScan, slotsscanptr)); - Cmd* slotmgrtexecwrapper = new SlotsMgrtExecWrapperCmd(kCmdNameSlotsMgrtExecWrapper, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtExecWrapper, slotmgrtexecwrapper)); - Cmd* slotmgrtasyncstatus = new SlotsMgrtAsyncStatusCmd(kCmdNameSlotsMgrtAsyncStatus, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtAsyncStatus, slotmgrtasyncstatus)); - Cmd* slotmgrtasynccancel = new SlotsMgrtAsyncCancelCmd(kCmdNameSlotsMgrtAsyncCancel, 1, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtAsyncCancel, slotmgrtasynccancel)); - Cmd* slotmgrtslotptr = new SlotsMgrtSlotCmd(kCmdNameSlotsMgrtSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtSlot, slotmgrtslotptr)); - Cmd* slotmgrttagslotptr = new SlotsMgrtTagSlotCmd(kCmdNameSlotsMgrtTagSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtTagSlot, slotmgrttagslotptr)); - Cmd* slotmgrtoneptr = new SlotsMgrtOneCmd(kCmdNameSlotsMgrtOne, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtOne, slotmgrtoneptr)); - Cmd* slotmgrttagoneptr = new SlotsMgrtTagOneCmd(kCmdNameSlotsMgrtTagOne, 5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameSlotsMgrtTagOne, slotmgrttagoneptr)); - - // Cluster related - Cmd* pkclusterinfoptr = new PkClusterInfoCmd(kCmdNamePkClusterInfo, -3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterInfo, pkclusterinfoptr)); - Cmd* pkclusteraddslotsptr = new PkClusterAddSlotsCmd(kCmdNamePkClusterAddSlots, 3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterAddSlots, pkclusteraddslotsptr)); - Cmd* pkclusterdelslotsptr = new PkClusterDelSlotsCmd(kCmdNamePkClusterDelSlots, 3, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterDelSlots, pkclusterdelslotsptr)); - Cmd* pkclusterslotsslaveofptr = new PkClusterSlotsSlaveofCmd(kCmdNamePkClusterSlotsSlaveof, -5, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNamePkClusterSlotsSlaveof, pkclusterslotsslaveofptr)); - -#ifdef TCMALLOC_EXTENSION - Cmd* tcmallocptr = new TcmallocCmd(kCmdNameTcmalloc, -2, kCmdFlagsRead | kCmdFlagsAdmin); - cmd_table->insert(std::pair(kCmdNameTcmalloc, tcmallocptr)); -#endif - - //Kv - ////SetCmd - Cmd* setptr = new SetCmd(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSet, setptr)); - ////GetCmd - Cmd* getptr = new GetCmd(kCmdNameGet, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGet, getptr)); - ////DelCmd - Cmd* delptr = new DelCmd(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDel, delptr)); - ////IncrCmd - Cmd* incrptr = new IncrCmd(kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncr, incrptr)); - ////IncrbyCmd - Cmd* incrbyptr = new IncrbyCmd(kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncrby, incrbyptr)); - ////IncrbyfloatCmd - Cmd* incrbyfloatptr = new IncrbyfloatCmd(kCmdNameIncrbyfloat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameIncrbyfloat, incrbyfloatptr)); - ////DecrCmd - Cmd* decrptr = new DecrCmd(kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDecr, decrptr)); - ////DecrbyCmd - Cmd* decrbyptr = new DecrbyCmd(kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDecrby, decrbyptr)); - ////GetsetCmd - Cmd* getsetptr = new GetsetCmd(kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGetset, getsetptr)); - ////AppendCmd - Cmd* appendptr = new AppendCmd(kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameAppend, appendptr)); - ////MgetCmd - Cmd* mgetptr = new MgetCmd(kCmdNameMget, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMget, mgetptr)); - ////KeysCmd - Cmd* keysptr = new KeysCmd(kCmdNameKeys, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameKeys, keysptr)); - ////SetnxCmd - Cmd* setnxptr = new SetnxCmd(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetnx, setnxptr)); - ////SetexCmd - Cmd* setexptr = new SetexCmd(kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetex, setexptr)); - ////PsetexCmd - Cmd* psetexptr = new PsetexCmd(kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePsetex, psetexptr)); - ////DelvxCmd - Cmd* delvxptr = new DelvxCmd(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameDelvx, delvxptr)); - ////MSetCmd - Cmd* msetptr = new MsetCmd(kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMset, msetptr)); - ////MSetnxCmd - Cmd* msetnxptr = new MsetnxCmd(kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameMsetnx, msetnxptr)); - ////GetrangeCmd - Cmd* getrangeptr = new GetrangeCmd(kCmdNameGetrange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameGetrange, getrangeptr)); - ////SetrangeCmd - Cmd* setrangeptr = new SetrangeCmd(kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameSetrange, setrangeptr)); - ////StrlenCmd - Cmd* strlenptr = new StrlenCmd(kCmdNameStrlen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameStrlen, strlenptr)); - ////ExistsCmd - Cmd* existsptr = new ExistsCmd(kCmdNameExists, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExists, existsptr)); - ////ExpireCmd - Cmd* expireptr = new ExpireCmd(kCmdNameExpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExpire, expireptr)); - ////PexpireCmd - Cmd* pexpireptr = new PexpireCmd(kCmdNamePexpire, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePexpire, pexpireptr)); - ////ExpireatCmd - Cmd* expireatptr = new ExpireatCmd(kCmdNameExpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameExpireat, expireatptr)); - ////PexpireatCmd - Cmd* pexpireatptr = new PexpireatCmd(kCmdNamePexpireat, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePexpireat, pexpireatptr)); - ////TtlCmd - Cmd* ttlptr = new TtlCmd(kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameTtl, ttlptr)); - ////PttlCmd - Cmd* pttlptr = new PttlCmd(kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePttl, pttlptr)); - ////PersistCmd - Cmd* persistptr = new PersistCmd(kCmdNamePersist, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePersist, persistptr)); - ////TypeCmd - Cmd* typeptr = new TypeCmd(kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameType, typeptr)); - ////ScanCmd - Cmd* scanptr = new ScanCmd(kCmdNameScan, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameScan, scanptr)); - ////ScanxCmd - Cmd* scanxptr = new ScanxCmd(kCmdNameScanx, -3, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNameScanx, scanxptr)); - ////PKSetexAtCmd - Cmd* pksetexatptr = new PKSetexAtCmd(kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKSetexAt, pksetexatptr)); - ////PKScanRange - Cmd* pkscanrangeptr = new PKScanRangeCmd(kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKScanRange, pkscanrangeptr)); - ////PKRScanRange - Cmd* pkrscanrangeptr = new PKRScanRangeCmd(kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsKv); - cmd_table->insert(std::pair(kCmdNamePKRScanRange, pkrscanrangeptr)); - - //Hash - ////HDelCmd - Cmd* hdelptr = new HDelCmd(kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHDel, hdelptr)); - ////HSetCmd - Cmd* hsetptr = new HSetCmd(kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHSet, hsetptr)); - ////HGetCmd - Cmd* hgetptr = new HGetCmd(kCmdNameHGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHGet, hgetptr)); - ////HGetallCmd - Cmd* hgetallptr = new HGetallCmd(kCmdNameHGetall, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHGetall, hgetallptr)); - ////HExistsCmd - Cmd* hexistsptr = new HExistsCmd(kCmdNameHExists, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHExists, hexistsptr)); - ////HIncrbyCmd - Cmd* hincrbyptr = new HIncrbyCmd(kCmdNameHIncrby, 4, kCmdFlagsWrite |kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHIncrby, hincrbyptr)); - ////HIncrbyfloatCmd - Cmd* hincrbyfloatptr = new HIncrbyfloatCmd(kCmdNameHIncrbyfloat, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHIncrbyfloat, hincrbyfloatptr)); - ////HKeysCmd - Cmd* hkeysptr = new HKeysCmd(kCmdNameHKeys, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHKeys, hkeysptr)); - ////HLenCmd - Cmd* hlenptr = new HLenCmd(kCmdNameHLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHLen, hlenptr)); - ////HMgetCmd - Cmd* hmgetptr = new HMgetCmd(kCmdNameHMget, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHMget, hmgetptr)); - ////HMsetCmd - Cmd* hmsetptr = new HMsetCmd(kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHMset, hmsetptr)); - ////HSetnxCmd - Cmd* hsetnxptr = new HSetnxCmd(kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHSetnx, hsetnxptr)); - ////HStrlenCmd - Cmd* hstrlenptr = new HStrlenCmd(kCmdNameHStrlen, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHStrlen, hstrlenptr)); - ////HValsCmd - Cmd* hvalsptr = new HValsCmd(kCmdNameHVals, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHVals, hvalsptr)); - ////HScanCmd - Cmd* hscanptr = new HScanCmd(kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHScan, hscanptr)); - ////HScanxCmd - Cmd* hscanxptr = new HScanxCmd(kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNameHScanx, hscanxptr)); - ////PKHScanRange - Cmd* pkhscanrangeptr = new PKHScanRangeCmd(kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNamePKHScanRange, pkhscanrangeptr)); - ////PKHRScanRange - Cmd* pkhrscanrangeptr = new PKHRScanRangeCmd(kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsHash); - cmd_table->insert(std::pair(kCmdNamePKHRScanRange, pkhrscanrangeptr)); - - //List - Cmd* lindexptr = new LIndexCmd(kCmdNameLIndex, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLIndex, lindexptr)); - Cmd* linsertptr = new LInsertCmd(kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLInsert, linsertptr)); - Cmd* llenptr = new LLenCmd(kCmdNameLLen, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLLen, llenptr)); - Cmd* lpopptr = new LPopCmd(kCmdNameLPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPop, lpopptr)); - Cmd* lpushptr = new LPushCmd(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPush, lpushptr)); - Cmd* lpushxptr = new LPushxCmd(kCmdNameLPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLPushx, lpushxptr)); - Cmd* lrangeptr = new LRangeCmd(kCmdNameLRange, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLRange, lrangeptr)); - Cmd* lremptr = new LRemCmd(kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLRem, lremptr)); - Cmd* lsetptr = new LSetCmd(kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLSet, lsetptr)); - Cmd* ltrimptr = new LTrimCmd(kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameLTrim, ltrimptr)); - Cmd* rpopptr = new RPopCmd(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPop, rpopptr)); - Cmd* rpoplpushptr = new RPopLPushCmd(kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPopLPush, rpoplpushptr)); - Cmd* rpushptr = new RPushCmd(kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPush, rpushptr)); - Cmd* rpushxptr = new RPushxCmd(kCmdNameRPushx, 3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsList); - cmd_table->insert(std::pair(kCmdNameRPushx, rpushxptr)); - - //Zset - ////ZAddCmd - Cmd* zaddptr = new ZAddCmd(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZAdd, zaddptr)); - ////ZCardCmd - Cmd* zcardptr = new ZCardCmd(kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZCard, zcardptr)); - ////ZScanCmd - Cmd* zscanptr = new ZScanCmd(kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZScan, zscanptr)); - ////ZIncrbyCmd - Cmd* zincrbyptr = new ZIncrbyCmd(kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZIncrby, zincrbyptr)); - ////ZRangeCmd - Cmd* zrangeptr = new ZRangeCmd(kCmdNameZRange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRange, zrangeptr)); - ////ZRevrangeCmd - Cmd* zrevrangeptr = new ZRevrangeCmd(kCmdNameZRevrange, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrange, zrevrangeptr)); - ////ZRangebyscoreCmd - Cmd* zrangebyscoreptr = new ZRangebyscoreCmd(kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRangebyscore, zrangebyscoreptr)); - ////ZRevrangebyscoreCmd - Cmd* zrevrangebyscoreptr = new ZRevrangebyscoreCmd(kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrangebyscore, zrevrangebyscoreptr)); - ////ZCountCmd - Cmd* zcountptr = new ZCountCmd(kCmdNameZCount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZCount, zcountptr)); - ////ZRemCmd - Cmd* zremptr = new ZRemCmd(kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRem, zremptr)); - ////ZUnionstoreCmd - Cmd* zunionstoreptr = new ZUnionstoreCmd(kCmdNameZUnionstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZUnionstore, zunionstoreptr)); - ////ZInterstoreCmd - Cmd* zinterstoreptr = new ZInterstoreCmd(kCmdNameZInterstore, -4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZInterstore, zinterstoreptr)); - ////ZRankCmd - Cmd* zrankptr = new ZRankCmd(kCmdNameZRank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRank, zrankptr)); - ////ZRevrankCmd - Cmd* zrevrankptr = new ZRevrankCmd(kCmdNameZRevrank, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrank, zrevrankptr)); - ////ZScoreCmd - Cmd* zscoreptr = new ZScoreCmd(kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZScore, zscoreptr)); - ////ZRangebylexCmd - Cmd* zrangebylexptr = new ZRangebylexCmd(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRangebylex, zrangebylexptr)); - ////ZRevrangebylexCmd - Cmd* zrevrangebylexptr = new ZRevrangebylexCmd(kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRevrangebylex, zrevrangebylexptr)); - ////ZLexcountCmd - Cmd* zlexcountptr = new ZLexcountCmd(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZLexcount, zlexcountptr)); - ////ZRemrangebyrankCmd - Cmd* zremrangebyrankptr = new ZRemrangebyrankCmd(kCmdNameZRemrangebyrank, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebyrank, zremrangebyrankptr)); - ////ZRemrangebyscoreCmd - Cmd* zremrangebyscoreptr = new ZRemrangebyscoreCmd(kCmdNameZRemrangebyscore, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebyscore, zremrangebyscoreptr)); - ////ZRemrangebylexCmd - Cmd* zremrangebylexptr = new ZRemrangebylexCmd(kCmdNameZRemrangebylex, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZRemrangebylex, zremrangebylexptr)); - ////ZPopmax - Cmd* zpopmaxptr = new ZPopmaxCmd(kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZPopmax, zpopmaxptr)); - ////ZPopmin - Cmd* zpopminptr = new ZPopminCmd(kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsZset); - cmd_table->insert(std::pair(kCmdNameZPopmin, zpopminptr)); - - //Set - ////SAddCmd - Cmd* saddptr = new SAddCmd(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSAdd, saddptr)); - ////SPopCmd - Cmd* spopptr = new SPopCmd(kCmdNameSPop, 2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSPop, spopptr)); - ////SCardCmd - Cmd* scardptr = new SCardCmd(kCmdNameSCard, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSCard, scardptr)); - ////SMembersCmd - Cmd* smembersptr = new SMembersCmd(kCmdNameSMembers, 2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSMembers, smembersptr)); - ////SScanCmd - Cmd* sscanptr = new SScanCmd(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSScan, sscanptr)); - ////SRemCmd - Cmd* sremptr = new SRemCmd(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSRem, sremptr)); - ////SUnionCmd - Cmd* sunionptr = new SUnionCmd(kCmdNameSUnion, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSUnion, sunionptr)); - ////SUnionstoreCmd - Cmd* sunionstoreptr = new SUnionstoreCmd(kCmdNameSUnionstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSUnionstore, sunionstoreptr)); - ////SInterCmd - Cmd* sinterptr = new SInterCmd(kCmdNameSInter, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSInter, sinterptr)); - ////SInterstoreCmd - Cmd* sinterstoreptr = new SInterstoreCmd(kCmdNameSInterstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSInterstore, sinterstoreptr)); - ////SIsmemberCmd - Cmd* sismemberptr = new SIsmemberCmd(kCmdNameSIsmember, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSIsmember, sismemberptr)); - ////SDiffCmd - Cmd* sdiffptr = new SDiffCmd(kCmdNameSDiff, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSDiff, sdiffptr)); - ////SDiffstoreCmd - Cmd* sdiffstoreptr = new SDiffstoreCmd(kCmdNameSDiffstore, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSDiffstore, sdiffstoreptr)); - ////SMoveCmd - Cmd* smoveptr = new SMoveCmd(kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSMove, smoveptr)); - ////SRandmemberCmd - Cmd* srandmemberptr = new SRandmemberCmd(kCmdNameSRandmember, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsSet); - cmd_table->insert(std::pair(kCmdNameSRandmember, srandmemberptr)); - - //BitMap - ////bitsetCmd - Cmd* bitsetptr = new BitSetCmd(kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitSet, bitsetptr)); - ////bitgetCmd - Cmd* bitgetptr = new BitGetCmd(kCmdNameBitGet, 3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitGet, bitgetptr)); - ////bitcountCmd - Cmd* bitcountptr = new BitCountCmd(kCmdNameBitCount, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitCount, bitcountptr)); - ////bitposCmd - Cmd* bitposptr = new BitPosCmd(kCmdNameBitPos, -3, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitPos, bitposptr)); - ////bitopCmd - Cmd* bitopptr = new BitOpCmd(kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsBit); - cmd_table->insert(std::pair(kCmdNameBitOp, bitopptr)); - - //HyperLogLog - ////pfaddCmd - Cmd * pfaddptr = new PfAddCmd(kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfAdd, pfaddptr)); - ////pfcountCmd - Cmd * pfcountptr = new PfCountCmd(kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfCount, pfcountptr)); - ////pfmergeCmd - Cmd * pfmergeptr = new PfMergeCmd(kCmdNamePfMerge, -3, kCmdFlagsWrite | kCmdFlagsMultiPartition | kCmdFlagsHyperLogLog); - cmd_table->insert(std::pair(kCmdNamePfMerge, pfmergeptr)); - - //GEO - ////GepAdd - Cmd * geoaddptr = new GeoAddCmd(kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoAdd, geoaddptr)); - ////GeoPos - Cmd * geoposptr = new GeoPosCmd(kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoPos, geoposptr)); - ////GeoDist - Cmd * geodistptr = new GeoDistCmd(kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoDist, geodistptr)); - ////GeoHash - Cmd * geohashptr = new GeoHashCmd(kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsSinglePartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoHash, geohashptr)); - ////GeoRadius - Cmd * georadiusptr = new GeoRadiusCmd(kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoRadius, georadiusptr)); - ////GeoRadiusByMember - Cmd * georadiusbymemberptr = new GeoRadiusByMemberCmd(kCmdNameGeoRadiusByMember, -5, kCmdFlagsRead | kCmdFlagsMultiPartition | kCmdFlagsGeo); - cmd_table->insert(std::pair(kCmdNameGeoRadiusByMember, georadiusbymemberptr)); - - //PubSub - ////Publish - Cmd * publishptr = new PublishCmd(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePublish, publishptr)); - ////Subscribe - Cmd * subscribeptr = new SubscribeCmd(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNameSubscribe, subscribeptr)); - ////UnSubscribe - Cmd * unsubscribeptr = new UnSubscribeCmd(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNameUnSubscribe, unsubscribeptr)); - ////PSubscribe - Cmd * psubscribeptr = new PSubscribeCmd(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePSubscribe, psubscribeptr)); - ////PUnSubscribe - Cmd * punsubscribeptr = new PUnSubscribeCmd(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePUnSubscribe, punsubscribeptr)); - ////PubSub - Cmd * pubsubptr = new PubSubCmd(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub); - cmd_table->insert(std::pair(kCmdNamePubSub, pubsubptr)); -} - -Cmd* GetCmdFromTable(const std::string& opt, const CmdTable& cmd_table) { - CmdTable::const_iterator it = cmd_table.find(opt); - if (it != cmd_table.end()) { - return it->second; - } - return NULL; -} - -void DestoryCmdTable(CmdTable* cmd_table) { - CmdTable::const_iterator it = cmd_table->begin(); - for (; it != cmd_table->end(); ++it) { - delete it->second; - } -} - -void TryAliasChange(std::vector* argv) { - if (argv->empty()) { - return; - } - if (!strcasecmp(argv->front().c_str(), kCmdNameSlaveof.c_str())) { - argv->front() = "slotsslaveof"; - argv->insert(argv->begin(), kClusterPrefix); - if (!strcasecmp(argv->back().c_str(), "force")) { - argv->back() = "all"; - argv->push_back("force"); - } else { - argv->push_back("all"); - } - } -} - -void Cmd::Initial(const PikaCmdArgsType& argv, - const std::string& table_name) { - argv_ = argv; - if (!g_pika_conf->classic_mode()) { - TryAliasChange(&argv_); - } - table_name_ = table_name; - res_.clear(); // Clear res content - Clear(); // Clear cmd, Derived class can has own implement - DoInitial(); -}; - -std::vector Cmd::current_key() const { - std::vector res; - res.push_back(""); - return res; -} - -void Cmd::Execute() { - if (name_ == kCmdNameFlushdb) { - ProcessFlushDBCmd(); - } else if (name_ == kCmdNameFlushall) { - ProcessFlushAllCmd(); - } else if (name_ == kCmdNameInfo || name_ == kCmdNameConfig) { - ProcessDoNotSpecifyPartitionCmd(); - } else if (is_single_partition() || g_pika_conf->classic_mode()) { - ProcessSinglePartitionCmd(); - } else if (is_multi_partition()) { - ProcessMultiPartitionCmd(); - } else { - ProcessDoNotSpecifyPartitionCmd(); - } -} - -void Cmd::ProcessFlushDBCmd() { - std::shared_ptr
table = g_pika_server->GetTable(table_name_); - if (!table) { - res_.SetRes(CmdRes::kInvalidTable); - } else { - if (table->IsKeyScaning()) { - res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); - } else { - slash::RWLock l_prw(&table->partitions_rw_, true); - for (const auto& partition_item : table->partitions_) { - ProcessCommand(partition_item.second); - } - res_.SetRes(CmdRes::kOk); - } - } -} - -void Cmd::ProcessFlushAllCmd() { - slash::RWLock l_trw(&g_pika_server->tables_rw_, true); - for (const auto& table_item : g_pika_server->tables_) { - if (table_item.second->IsKeyScaning()) { - res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); - return; - } - } - - for (const auto& table_item : g_pika_server->tables_) { - slash::RWLock l_prw(&table_item.second->partitions_rw_, true); - for (const auto& partition_item : table_item.second->partitions_) { - ProcessCommand(partition_item.second); - } - } - res_.SetRes(CmdRes::kOk); -} - -void Cmd::ProcessSinglePartitionCmd() { - std::shared_ptr partition; - if (g_pika_conf->classic_mode()) { - // in classic mode a table has only one partition - partition = g_pika_server->GetPartitionByDbName(table_name_); - } else { - std::vector cur_key = current_key(); - if (cur_key.empty()) { - res_.SetRes(CmdRes::kErrOther, "Internal Error"); - return; - } - // in sharding mode we select partition by key - partition = g_pika_server->GetTablePartitionByKey(table_name_, cur_key.front()); - } - - if (!partition) { - res_.SetRes(CmdRes::kErrOther, "Partition not found"); - return; - } - ProcessCommand(partition); -} - -void Cmd::ProcessCommand(std::shared_ptr partition) { - slash::lock::MultiRecordLock record_lock(partition->LockMgr()); - if (is_write()) { - record_lock.Lock(current_key()); - } - - DoCommand(partition); - - DoBinlog(partition); - - if (is_write()) { - record_lock.Unlock(current_key()); - } - -} - -void Cmd::DoCommand(std::shared_ptr partition) { - if (!is_suspend()) { - partition->DbRWLockReader(); - } - - Do(partition); - - if (!is_suspend()) { - partition->DbRWUnLock(); - } - -} - -void Cmd::DoBinlog(std::shared_ptr partition) { - if (res().ok() - && is_write() - && g_pika_conf->write_binlog()) { - - uint32_t filenum = 0; - uint64_t offset = 0; - uint64_t logic_id = 0; - - partition->logger()->Lock(); - partition->logger()->GetProducerStatus(&filenum, &offset, &logic_id); - uint32_t exec_time = time(nullptr); - std::string binlog = ToBinlog(exec_time, - g_pika_conf->server_id(), - logic_id, - filenum, - offset); - - Status s = partition->WriteBinlog(binlog); - partition->logger()->Unlock(); - - if (!s.ok()) { - res().SetRes(CmdRes::kErrOther, s.ToString()); - } - } -} - -void Cmd::ProcessMultiPartitionCmd() { - if (argv_.size() == static_cast(arity_ < 0 ? -arity_ : arity_)) { - ProcessSinglePartitionCmd(); - } else { - res_.SetRes(CmdRes::kErrOther, "This command usage only support in classic mode\r\n"); - return; - } -} - -void Cmd::ProcessDoNotSpecifyPartitionCmd() { - Do(); -} - -bool Cmd::is_write() const { - return ((flag_ & kCmdFlagsMaskRW) == kCmdFlagsWrite); -} -bool Cmd::is_local() const { - return ((flag_ & kCmdFlagsMaskLocal) == kCmdFlagsLocal); -} -// Others need to be suspended when a suspend command run -bool Cmd::is_suspend() const { - return ((flag_ & kCmdFlagsMaskSuspend) == kCmdFlagsSuspend); -} -// Must with admin auth -bool Cmd::is_admin_require() const { - return ((flag_ & kCmdFlagsMaskAdminRequire) == kCmdFlagsAdminRequire); -} -bool Cmd::is_single_partition() const { - return ((flag_ & kCmdFlagsMaskPartition) == kCmdFlagsSinglePartition); -} -bool Cmd::is_multi_partition() const { - return ((flag_ & kCmdFlagsMaskPartition) == kCmdFlagsMultiPartition); -} - -std::string Cmd::name() const { - return name_; -} -CmdRes& Cmd::res() { - return res_; -} - -std::string Cmd::ToBinlog(uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, argv_.size(), "*"); - - for (const auto& v : argv_) { - RedisAppendLen(content, v.size(), "$"); - RedisAppendContent(content, v); - } - - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -bool Cmd::CheckArg(int num) const { - if ((arity_ > 0 && num != arity_) - || (arity_ < 0 && num < -arity_)) { - return false; - } - return true; -} - -void Cmd::LogCommand() const { - std::string command; - for (const auto& item : argv_) { - command.append(" "); - command.append(item); - } - LOG(INFO) << "command:" << command; -} - -void Cmd::SetConn(const std::shared_ptr conn) { - conn_ = conn; -} - -std::shared_ptr Cmd::GetConn() { - return conn_.lock(); -} diff --git a/tools/pika_migrate/src/pika_conf.cc b/tools/pika_migrate/src/pika_conf.cc deleted file mode 100644 index ca19667eb7..0000000000 --- a/tools/pika_migrate/src/pika_conf.cc +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_conf.h" - -#include - -#include -#include - -#include "slash/include/env.h" - -#include "include/pika_define.h" - -PikaConf::PikaConf(const std::string& path) - : slash::BaseConf(path), conf_path_(path) { - pthread_rwlock_init(&rwlock_, NULL); - local_meta_ = new PikaMeta(); -} - -PikaConf::~PikaConf() { - pthread_rwlock_destroy(&rwlock_); - delete local_meta_; -} - -Status PikaConf::InternalGetTargetTable(const std::string& table_name, uint32_t* const target) { - int32_t table_index = -1; - for (size_t idx = 0; table_structs_.size(); ++idx) { - if (table_structs_[idx].table_name == table_name) { - table_index = idx; - break; - } - } - if (table_index == -1) { - return Status::NotFound("table : " + table_name + " not found"); - } - *target = table_index; - return Status::OK(); -} - -Status PikaConf::TablePartitionsSanityCheck(const std::string& table_name, - const std::set& partition_ids, - bool is_add) { - RWLock l(&rwlock_, false); - uint32_t table_index = 0; - Status s = InternalGetTargetTable(table_name, &table_index); - if (!s.ok()) { - return s; - } - // Sanity Check - for (const auto& id : partition_ids) { - if (id >= table_structs_[table_index].partition_num) { - return Status::Corruption("partition index out of range"); - } else if (is_add && table_structs_[table_index].partition_ids.count(id) != 0) { - return Status::Corruption("partition : " + std::to_string(id) + " exist"); - } else if (!is_add && table_structs_[table_index].partition_ids.count(id) == 0) { - return Status::Corruption("partition : " + std::to_string(id) + " not exist"); - } - } - return Status::OK(); -} - -Status PikaConf::AddTablePartitions(const std::string& table_name, - const std::set& partition_ids) { - Status s = TablePartitionsSanityCheck(table_name, partition_ids, true); - if (!s.ok()) { - return s; - } - - RWLock l(&rwlock_, true); - uint32_t index = 0; - s = InternalGetTargetTable(table_name, &index); - if (s.ok()) { - for (const auto& id : partition_ids) { - table_structs_[index].partition_ids.insert(id); - } - s = local_meta_->StableSave(table_structs_); - } - return s; -} - -Status PikaConf::RemoveTablePartitions(const std::string& table_name, - const std::set& partition_ids) { - Status s = TablePartitionsSanityCheck(table_name, partition_ids, false); - if (!s.ok()) { - return s; - } - - RWLock l(&rwlock_, true); - uint32_t index = 0; - s = InternalGetTargetTable(table_name, &index); - if (s.ok()) { - for (const auto& id : partition_ids) { - table_structs_[index].partition_ids.erase(id); - } - s = local_meta_->StableSave(table_structs_); - } - return s; -} - -int PikaConf::Load() -{ - int ret = LoadConf(); - if (ret != 0) { - return ret; - } - - GetConfInt("timeout", &timeout_); - if (timeout_ < 0) { - timeout_ = 60; // 60s - } - GetConfStr("server-id", &server_id_); - if (server_id_.empty()) { - server_id_ = "1"; - } - GetConfStr("requirepass", &requirepass_); - GetConfStr("masterauth", &masterauth_); - GetConfStr("userpass", &userpass_); - GetConfInt("maxclients", &maxclients_); - if (maxclients_ <= 0) { - maxclients_ = 20000; - } - GetConfInt("root-connection-num", &root_connection_num_); - if (root_connection_num_ < 0) { - root_connection_num_ = 2; - } - - std::string swe; - GetConfStr("slowlog-write-errorlog", &swe); - slowlog_write_errorlog_.store(swe == "yes" ? true : false); - - int tmp_slowlog_log_slower_than; - GetConfInt("slowlog-log-slower-than", &tmp_slowlog_log_slower_than); - slowlog_log_slower_than_.store(tmp_slowlog_log_slower_than); - GetConfInt("slowlog-max-len", &slowlog_max_len_); - if (slowlog_max_len_ == 0) { - slowlog_max_len_ = 128; - } - std::string user_blacklist; - GetConfStr("userblacklist", &user_blacklist); - slash::StringSplit(user_blacklist, COMMA, user_blacklist_); - for (auto& item : user_blacklist_) { - slash::StringToLower(item); - } - - GetConfStr("dump-path", &bgsave_path_); - bgsave_path_ = bgsave_path_.empty() ? "./dump/" : bgsave_path_; - if (bgsave_path_[bgsave_path_.length() - 1] != '/') { - bgsave_path_ += "/"; - } - GetConfInt("dump-expire", &expire_dump_days_); - if (expire_dump_days_ < 0 ) { - expire_dump_days_ = 0; - } - GetConfStr("dump-prefix", &bgsave_prefix_); - - GetConfInt("expire-logs-nums", &expire_logs_nums_); - if (expire_logs_nums_ <= 10 ) { - expire_logs_nums_ = 10; - } - GetConfInt("expire-logs-days", &expire_logs_days_); - if (expire_logs_days_ <= 0 ) { - expire_logs_days_ = 1; - } - GetConfStr("compression", &compression_); - // set slave read only true as default - slave_read_only_ = true; - GetConfInt("slave-priority", &slave_priority_); - - // - // Immutable Sections - // - GetConfInt("port", &port_); - GetConfStr("log-path", &log_path_); - log_path_ = log_path_.empty() ? "./log/" : log_path_; - if (log_path_[log_path_.length() - 1] != '/') { - log_path_ += "/"; - } - GetConfStr("db-path", &db_path_); - db_path_ = db_path_.empty() ? "./db/" : db_path_; - if (db_path_[db_path_.length() - 1] != '/') { - db_path_ += "/"; - } - local_meta_->SetPath(db_path_); - - GetConfInt("thread-num", &thread_num_); - if (thread_num_ <= 0) { - thread_num_ = 12; - } - if (thread_num_ > 24) { - thread_num_ = 24; - } - GetConfInt("thread-pool-size", &thread_pool_size_); - if (thread_pool_size_ <= 0) { - thread_pool_size_ = 12; - } - if (thread_pool_size_ > 24) { - thread_pool_size_ = 24; - } - GetConfInt("sync-thread-num", &sync_thread_num_); - if (sync_thread_num_ <= 0) { - sync_thread_num_ = 3; - } - if (sync_thread_num_ > 24) { - sync_thread_num_ = 24; - } - - std::string instance_mode; - GetConfStr("instance-mode", &instance_mode); - classic_mode_.store(instance_mode.empty() - || !strcasecmp(instance_mode.data(), "classic")); - - if (classic_mode_.load()) { - GetConfInt("databases", &databases_); - if (databases_ < 1 || databases_ > 8) { - LOG(FATAL) << "config databases error, limit [1 ~ 8], the actual is: " - << databases_; - } - for (int idx = 0; idx < databases_; ++idx) { - table_structs_.push_back({"db" + std::to_string(idx), 1, {0}}); - } - } else { - GetConfInt("default-slot-num", &default_slot_num_); - if (default_slot_num_ <= 0) { - LOG(FATAL) << "config default-slot-num error," - << " it should greater than zero, the actual is: " - << default_slot_num_; - } - std::string pika_meta_path = db_path_ + kPikaMeta; - if (!slash::FileExists(pika_meta_path)) { - local_meta_->StableSave({{"db0", static_cast(default_slot_num_), {}}}); - } - Status s = local_meta_->ParseMeta(&table_structs_); - if (!s.ok()) { - LOG(FATAL) << "parse meta file error"; - } - } - default_table_ = table_structs_[0].table_name; - - compact_cron_ = ""; - GetConfStr("compact-cron", &compact_cron_); - if (compact_cron_ != "") { - bool have_week = false; - std::string compact_cron, week_str; - int slash_num = count(compact_cron_.begin(), compact_cron_.end(), '/'); - if (slash_num == 2) { - have_week = true; - std::string::size_type first_slash = compact_cron_.find("/"); - week_str = compact_cron_.substr(0, first_slash); - compact_cron = compact_cron_.substr(first_slash + 1); - } else { - compact_cron = compact_cron_; - } - - std::string::size_type len = compact_cron.length(); - std::string::size_type colon = compact_cron.find("-"); - std::string::size_type underline = compact_cron.find("/"); - if (colon == std::string::npos || underline == std::string::npos || - colon >= underline || colon + 1 >= len || - colon + 1 == underline || underline + 1 >= len) { - compact_cron_ = ""; - } else { - int week = std::atoi(week_str.c_str()); - int start = std::atoi(compact_cron.substr(0, colon).c_str()); - int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); - int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); - if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || usage > 100) { - compact_cron_ = ""; - } - } - } - - compact_interval_ = ""; - GetConfStr("compact-interval", &compact_interval_); - if (compact_interval_ != "") { - std::string::size_type len = compact_interval_.length(); - std::string::size_type slash = compact_interval_.find("/"); - if (slash == std::string::npos || slash + 1 >= len) { - compact_interval_ = ""; - } else { - int interval = std::atoi(compact_interval_.substr(0, slash).c_str()); - int usage = std::atoi(compact_interval_.substr(slash+1).c_str()); - if (interval <= 0 || usage < 0 || usage > 100) { - compact_interval_ = ""; - } - } - } - - // write_buffer_size - GetConfInt64("write-buffer-size", &write_buffer_size_); - if (write_buffer_size_ <= 0 ) { - write_buffer_size_ = 268435456; // 256Mb - } - - // max_write_buffer_size - GetConfInt64("max-write-buffer-size", &max_write_buffer_size_); - if (max_write_buffer_size_ <= 0) { - max_write_buffer_size_ = 10737418240; // 10Gb - } - - // max_client_response_size - GetConfInt64("max-client-response-size", &max_client_response_size_); - if (max_client_response_size_ <= 0) { - max_client_response_size_ = 1073741824; // 1Gb - } - - // target_file_size_base - GetConfInt("target-file-size-base", &target_file_size_base_); - if (target_file_size_base_ <= 0) { - target_file_size_base_ = 1048576; // 10Mb - } - - max_cache_statistic_keys_ = 0; - GetConfInt("max-cache-statistic-keys", &max_cache_statistic_keys_); - if (max_cache_statistic_keys_ <= 0) { - max_cache_statistic_keys_ = 0; - } - - small_compaction_threshold_ = 5000; - GetConfInt("small-compaction-threshold", &small_compaction_threshold_); - if (small_compaction_threshold_ <= 0 - || small_compaction_threshold_ >= 100000) { - small_compaction_threshold_ = 5000; - } - - max_background_flushes_ = 1; - GetConfInt("max-background-flushes", &max_background_flushes_); - if (max_background_flushes_ <= 0) { - max_background_flushes_ = 1; - } - if (max_background_flushes_ >= 4) { - max_background_flushes_ = 4; - } - - max_background_compactions_ = 2; - GetConfInt("max-background-compactions", &max_background_compactions_); - if (max_background_compactions_ <= 0) { - max_background_compactions_ = 2; - } - if (max_background_compactions_ >= 8) { - max_background_compactions_ = 8; - } - - max_cache_files_ = 5000; - GetConfInt("max-cache-files", &max_cache_files_); - if (max_cache_files_ < -1) { - max_cache_files_ = 5000; - } - max_bytes_for_level_multiplier_ = 10; - GetConfInt("max-bytes-for-level-multiplier", &max_bytes_for_level_multiplier_); - if (max_bytes_for_level_multiplier_ < 10) { - max_bytes_for_level_multiplier_ = 5; - } - - block_size_ = 4 * 1024; - GetConfInt64("block-size", &block_size_); - if (block_size_ <= 0) { - block_size_ = 4 * 1024; - } - - block_cache_ = 8 * 1024 * 1024; - GetConfInt64("block-cache", &block_cache_); - if (block_cache_ < 0) { - block_cache_ = 8 * 1024 * 1024; - } - - std::string sbc; - GetConfStr("share-block-cache", &sbc); - share_block_cache_ = (sbc == "yes") ? true : false; - - std::string ciafb; - GetConfStr("cache-index-and-filter-blocks", &ciafb); - cache_index_and_filter_blocks_ = (ciafb == "yes") ? true : false; - - std::string offh; - GetConfStr("optimize-filters-for-hits", &offh); - optimize_filters_for_hits_ = (offh == "yes") ? true : false; - - std::string lcdlb; - GetConfStr("level-compaction-dynamic-level-bytes", &lcdlb); - level_compaction_dynamic_level_bytes_ = (lcdlb == "yes") ? true : false; - - // daemonize - std::string dmz; - GetConfStr("daemonize", &dmz); - daemonize_ = (dmz == "yes") ? true : false; - - // binlog - std::string wb; - GetConfStr("write-binlog", &wb); - write_binlog_ = (wb == "no") ? false : true; - GetConfInt("binlog-file-size", &binlog_file_size_); - if (binlog_file_size_ < 1024 - || static_cast(binlog_file_size_) > (1024LL * 1024 * 1024)) { - binlog_file_size_ = 100 * 1024 * 1024; // 100M - } - GetConfStr("pidfile", &pidfile_); - - // db sync - GetConfStr("db-sync-path", &db_sync_path_); - db_sync_path_ = db_sync_path_.empty() ? "./dbsync/" : db_sync_path_; - if (db_sync_path_[db_sync_path_.length() - 1] != '/') { - db_sync_path_ += "/"; - } - GetConfInt("db-sync-speed", &db_sync_speed_); - if (db_sync_speed_ < 0 || db_sync_speed_ > 1024) { - db_sync_speed_ = 1024; - } - // network interface - network_interface_ = ""; - GetConfStr("network-interface", &network_interface_); - - // slaveof - slaveof_ = ""; - GetConfStr("slaveof", &slaveof_); - - // sync window size - int tmp_sync_window_size = kBinlogReadWinDefaultSize; - GetConfInt("sync-window-size", &tmp_sync_window_size); - if (tmp_sync_window_size <= 0) { - sync_window_size_.store(kBinlogReadWinDefaultSize); - } else if (tmp_sync_window_size > kBinlogReadWinMaxSize) { - sync_window_size_.store(kBinlogReadWinMaxSize); - } else { - sync_window_size_.store(tmp_sync_window_size); - } - - target_redis_host_ = "127.0.0.1"; - GetConfStr("target-redis-host", &target_redis_host_); - - target_redis_port_ = 6379; - GetConfInt("target-redis-port", &target_redis_port_); - - target_redis_pwd_ = ""; - GetConfStr("target-redis-pwd" , &target_redis_pwd_); - - sync_batch_num_ = 100; - GetConfInt("sync-batch-num", &sync_batch_num_); - - redis_sender_num_ = 8; - GetConfInt("redis-sender-num", &redis_sender_num_); - return ret; -} - -void PikaConf::TryPushDiffCommands(const std::string& command, const std::string& value) { - if (!CheckConfExist(command)) { - diff_commands_[command] = value; - } -} - -int PikaConf::ConfigRewrite() { - std::string userblacklist = suser_blacklist(); - - RWLock l(&rwlock_, true); - // Only set value for config item that can be config set. - SetConfInt("timeout", timeout_); - SetConfStr("requirepass", requirepass_); - SetConfStr("masterauth", masterauth_); - SetConfStr("userpass", userpass_); - SetConfStr("userblacklist", userblacklist); - SetConfStr("dump-prefix", bgsave_prefix_); - SetConfInt("maxclients", maxclients_); - SetConfInt("dump-expire", expire_dump_days_); - SetConfInt("expire-logs-days", expire_logs_days_); - SetConfInt("expire-logs-nums", expire_logs_nums_); - SetConfInt("root-connection-num", root_connection_num_); - SetConfStr("slowlog-write-errorlog", slowlog_write_errorlog_.load() ? "yes" : "no"); - SetConfInt("slowlog-log-slower-than", slowlog_log_slower_than_.load()); - SetConfInt("slowlog-max-len", slowlog_max_len_); - SetConfStr("write-binlog", write_binlog_ ? "yes" : "no"); - SetConfInt("max-cache-statistic-keys", max_cache_statistic_keys_); - SetConfInt("small-compaction-threshold", small_compaction_threshold_); - SetConfInt("max-client-response-size", max_client_response_size_); - SetConfInt("db-sync-speed", db_sync_speed_); - SetConfStr("compact-cron", compact_cron_); - SetConfStr("compact-interval", compact_interval_); - SetConfInt("slave-priority", slave_priority_); - SetConfInt("sync-window-size", sync_window_size_.load()); - // slaveof config item is special - SetConfStr("slaveof", slaveof_); - - if (!diff_commands_.empty()) { - std::vector filtered_items; - for (const auto& diff_command : diff_commands_) { - if (!diff_command.second.empty()) { - slash::BaseConf::Rep::ConfItem item(slash::BaseConf::Rep::kConf, diff_command.first, diff_command.second); - filtered_items.push_back(item); - } - } - if (!filtered_items.empty()) { - slash::BaseConf::Rep::ConfItem comment_item(slash::BaseConf::Rep::kComment, "# Generated by CONFIG REWRITE\n"); - PushConfItem(comment_item); - for (const auto& item : filtered_items) { - PushConfItem(item); - } - } - diff_commands_.clear(); - } - return WriteBack(); -} diff --git a/tools/pika_migrate/src/pika_data_distribution.cc b/tools/pika_migrate/src/pika_data_distribution.cc deleted file mode 100644 index e5e55dc51a..0000000000 --- a/tools/pika_migrate/src/pika_data_distribution.cc +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_data_distribution.h" - -void HashModulo::Init() { -} - -uint32_t HashModulo::Distribute(const std::string& str, uint32_t partition_num) { - return std::hash()(str) % partition_num; -} - -void Crc32::Init() { - Crc32TableInit(IEEE_POLY); -} - -void Crc32::Crc32TableInit(uint32_t poly) { - int i, j; - for (i = 0; i < 256; i ++) { - uint32_t crc = i; - for (j = 0; j < 8; j ++) { - if (crc & 1) { - crc = (crc >> 1) ^ poly; - } else { - crc = (crc >> 1); - } - } - crc32tab[i] = crc; - } -} - -uint32_t Crc32::Distribute(const std::string &str, uint32_t partition_num) { - uint32_t crc = Crc32Update(0, str.data(), (int)str.size()); - // partition_num need to minus 1 - assert(partition_num > 1); - return (int)(crc & (partition_num == 0 ? 0 : (partition_num - 1))); -} - -uint32_t Crc32::Crc32Update(uint32_t crc, const char* buf, int len) { - int i; - crc = ~crc; - for (i = 0; i < len; i ++) { - crc = crc32tab[(uint8_t)((char)crc ^ buf[i])] ^ (crc >> 8); - } - return ~crc; -} diff --git a/tools/pika_migrate/src/pika_dispatch_thread.cc b/tools/pika_migrate/src/pika_dispatch_thread.cc deleted file mode 100644 index 676f34843e..0000000000 --- a/tools/pika_migrate/src/pika_dispatch_thread.cc +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_dispatch_thread.h" - -#include - -#include "include/pika_conf.h" -#include "include/pika_server.h" - -extern PikaConf* g_pika_conf; -extern PikaServer* g_pika_server; - -PikaDispatchThread::PikaDispatchThread(std::set &ips, int port, int work_num, - int cron_interval, int queue_limit) - : handles_(this) { - thread_rep_ = pink::NewDispatchThread(ips, port, work_num, &conn_factory_, - cron_interval, queue_limit, &handles_); - thread_rep_->set_thread_name("Dispatcher"); -} - -PikaDispatchThread::~PikaDispatchThread() { - thread_rep_->StopThread(); - LOG(INFO) << "dispatch thread " << thread_rep_->thread_id() << " exit!!!"; - delete thread_rep_; -} - -int PikaDispatchThread::StartThread() { - return thread_rep_->StartThread(); -} - -int64_t PikaDispatchThread::ThreadClientList(std::vector *clients) { - std::vector conns_info = - thread_rep_->conns_info(); - if (clients != nullptr) { - for (auto& info : conns_info) { - clients->push_back({ - info.fd, - info.ip_port, - info.last_interaction.tv_sec, - nullptr /* PinkConn pointer, doesn't need here */ - }); - } - } - return conns_info.size(); -} - -bool PikaDispatchThread::ClientKill(const std::string& ip_port) { - return thread_rep_->KillConn(ip_port); -} - -void PikaDispatchThread::ClientKillAll() { - thread_rep_->KillAllConns(); -} - -bool PikaDispatchThread::Handles::AccessHandle(std::string& ip) const { - if (ip == "127.0.0.1") { - ip = g_pika_server->host(); - } - - int client_num = pika_disptcher_->thread_rep_->conn_num(); - if ((client_num >= g_pika_conf->maxclients() + g_pika_conf->root_connection_num()) - || (client_num >= g_pika_conf->maxclients() && ip != g_pika_server->host())) { - LOG(WARNING) << "Max connections reach, Deny new comming: " << ip; - return false; - } - - DLOG(INFO) << "new clinet comming, ip: " << ip; - g_pika_server->incr_accumulative_connections(); - return true; -} - -void PikaDispatchThread::Handles::CronHandle() const { - pika_disptcher_->thread_rep_->set_keepalive_timeout(g_pika_conf->timeout()); - g_pika_server->ResetLastSecQuerynum(); -} diff --git a/tools/pika_migrate/src/pika_geo.cc b/tools/pika_migrate/src/pika_geo.cc deleted file mode 100644 index 64005920c3..0000000000 --- a/tools/pika_migrate/src/pika_geo.cc +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_geo.h" - -#include - -#include "slash/include/slash_string.h" - -#include "include/pika_geohash_helper.h" - -void GeoAddCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoAdd); - return; - } - size_t argc = argv_.size(); - if ((argc - 2) % 3 != 0) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoAdd); - return; - } - key_ = argv_[1]; - pos_.clear(); - struct GeoPoint point; - double longitude, latitude; - for (size_t index = 2; index < argc; index += 3) { - if (!slash::string2d(argv_[index].data(), argv_[index].size(), &longitude)) { - res_.SetRes(CmdRes::kInvalidFloat); - return; - } - if (!slash::string2d(argv_[index + 1].data(), argv_[index + 1].size(), &latitude)) { - res_.SetRes(CmdRes::kInvalidFloat); - return; - } - point.member = argv_[index + 2]; - point.longitude = longitude; - point.latitude = latitude; - pos_.push_back(point); - } - return; -} - -void GeoAddCmd::Do(std::shared_ptr partition) { - std::vector score_members; - for (const auto& geo_point : pos_) { - // Convert coordinates to geohash - GeoHashBits hash; - geohashEncodeWGS84(geo_point.longitude, geo_point.latitude, GEO_STEP_MAX, &hash); - GeoHashFix52Bits bits = geohashAlign52Bits(hash); - // Convert uint64 to double - double score; - std::string str_bits = std::to_string(bits); - slash::string2d(str_bits.data(), str_bits.size(), &score); - score_members.push_back({score, geo_point.member}); - } - int32_t count = 0; - rocksdb::Status s = partition->db()->ZAdd(key_, score_members, &count); - if (s.ok()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void GeoPosCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoPos); - return; - } - key_ = argv_[1]; - members_.clear(); - size_t pos = 2; - while (pos < argv_.size()) { - members_.push_back(argv_[pos++]); - } -} - -void GeoPosCmd::Do(std::shared_ptr partition) { - double score; - res_.AppendArrayLen(members_.size()); - for (const auto& member : members_) { - rocksdb::Status s = partition->db()->ZScore(key_, member, &score); - if (s.ok()) { - double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, xy); - - res_.AppendArrayLen(2); - char longitude[32]; - int64_t len = slash::d2string(longitude, sizeof(longitude), xy[0]); - res_.AppendStringLen(len); - res_.AppendContent(longitude); - - char latitude[32]; - len = slash::d2string(latitude, sizeof(latitude), xy[1]); - res_.AppendStringLen(len); - res_.AppendContent(latitude); - - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - continue; - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - continue; - } - } -} - -static double length_converter(double meters, const std::string & unit) { - if (unit == "m") { - return meters; - } else if (unit == "km") { - return meters / 1000; - } else if (unit == "ft") { - return meters / 0.3048; - } else if (unit == "mi") { - return meters / 1609.34; - } else { - return -1; - } -} - -static bool check_unit(const std::string & unit) { - if (unit == "m" || unit == "km" || unit == "ft" || unit == "mi") { - return true; - } else { - return false; - } -} - -void GeoDistCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoDist); - return; - } - if (argv_.size() < 4) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoDist); - return; - } else if (argv_.size() > 5) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - key_ = argv_[1]; - first_pos_ = argv_[2]; - second_pos_ = argv_[3]; - if (argv_.size() == 5) { - unit_ = argv_[4]; - } else { - unit_ = "m"; - } - if (!check_unit(unit_)) { - res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); - return; - } -} - -void GeoDistCmd::Do(std::shared_ptr partition) { - double first_score, second_score, first_xy[2], second_xy[2]; - rocksdb::Status s = partition->db()->ZScore(key_, first_pos_, &first_score); - if (s.ok()) { - GeoHashBits hash = { .bits = (uint64_t)first_score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, first_xy); - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - return; - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - - s = partition->db()->ZScore(key_, second_pos_, &second_score); - if (s.ok()) { - GeoHashBits hash = { .bits = (uint64_t)second_score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, second_xy); - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - return; - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - - double distance = geohashGetDistance(first_xy[0], first_xy[1], second_xy[0], second_xy[1]); - distance = length_converter(distance, unit_); - char buf[32]; - sprintf(buf, "%.4f", distance); - res_.AppendStringLen(strlen(buf)); - res_.AppendContent(buf); -} - -void GeoHashCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoHash); - return; - } - key_ = argv_[1]; - members_.clear(); - size_t pos = 2; - while (pos < argv_.size()) { - members_.push_back(argv_[pos++]); - } -} - -void GeoHashCmd::Do(std::shared_ptr partition) { - const char * geoalphabet= "0123456789bcdefghjkmnpqrstuvwxyz"; - res_.AppendArrayLen(members_.size()); - for (const auto& member : members_) { - double score; - rocksdb::Status s = partition->db()->ZScore(key_, member, &score); - if (s.ok()) { - double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, xy); - GeoHashRange r[2]; - GeoHashBits encode_hash; - r[0].min = -180; - r[0].max = 180; - r[1].min = -90; - r[1].max = 90; - geohashEncode(&r[0], &r[1], xy[0], xy[1], 26, &encode_hash); - - char buf[12]; - int i; - for (i = 0; i < 11; i++) { - int idx = (encode_hash.bits >> (52-((i+1)*5))) & 0x1f; - buf[i] = geoalphabet[idx]; - } - buf[11] = '\0'; - res_.AppendStringLen(11); - res_.AppendContent(buf); - continue; - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - continue; - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - continue; - } - } -} - -static bool sort_distance_asc(const NeighborPoint & pos1, const NeighborPoint & pos2) { - return pos1.distance < pos2.distance; -} - -static bool sort_distance_desc(const NeighborPoint & pos1, const NeighborPoint & pos2) { - return pos1.distance > pos2.distance; -} - -static void GetAllNeighbors(std::shared_ptr partition, std::string & key, GeoRange & range, CmdRes & res) { - rocksdb::Status s; - double longitude = range.longitude, latitude = range.latitude, distance = range.distance; - int count_limit = 0; - // Convert other units to meters - if (range.unit == "m") { - distance = distance; - } else if (range.unit == "km") { - distance = distance * 1000; - } else if (range.unit == "ft") { - distance = distance * 0.3048; - } else if (range.unit == "mi") { - distance = distance * 1609.34; - } else { - distance = -1; - } - // Search the zset for all matching points - GeoHashRadius georadius = geohashGetAreasByRadiusWGS84(longitude, latitude, distance); - GeoHashBits neighbors[9]; - neighbors[0] = georadius.hash; - neighbors[1] = georadius.neighbors.north; - neighbors[2] = georadius.neighbors.south; - neighbors[3] = georadius.neighbors.east; - neighbors[4] = georadius.neighbors.west; - neighbors[5] = georadius.neighbors.north_east; - neighbors[6] = georadius.neighbors.north_west; - neighbors[7] = georadius.neighbors.south_east; - neighbors[8] = georadius.neighbors.south_west; - - // For each neighbor, get all the matching - // members and add them to the potential result list. - std::vector result; - int last_processed = 0; - for (size_t i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { - GeoHashFix52Bits min, max; - if (HASHISZERO(neighbors[i])) - continue; - min = geohashAlign52Bits(neighbors[i]); - neighbors[i].bits++; - max = geohashAlign52Bits(neighbors[i]); - // When a huge Radius (in the 5000 km range or more) is used, - // adjacent neighbors can be the same, so need to remove duplicated elements - if(last_processed && neighbors[i].bits == neighbors[last_processed].bits && neighbors[i].step == neighbors[last_processed].step) { - continue; - } - std::vector score_members; - s = partition->db()->ZRangebyscore(key, (double)min, (double)max, true, true, &score_members); - if (!s.ok() && !s.IsNotFound()) { - res.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - // Insert into result only if the point is within the search area. - for (size_t i = 0; i < score_members.size(); ++i) { - double xy[2], real_distance; - GeoHashBits hash = { .bits = (uint64_t)score_members[i].score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, xy); - if(geohashGetDistanceIfInRadiusWGS84(longitude, latitude, xy[0], xy[1], distance, &real_distance)) { - NeighborPoint item; - item.member = score_members[i].member; - item.score = score_members[i].score; - item.distance = real_distance; - result.push_back(item); - } - } - last_processed = i; - } - - // If using the count opiton - if (range.count) { - count_limit = static_cast(result.size()) < range.count_limit ? result.size() : range.count_limit; - } else { - count_limit = result.size(); - } - // If using sort option - if (range.sort == Asc) { - std::sort(result.begin(), result.end(), sort_distance_asc); - } else if(range.sort == Desc) { - std::sort(result.begin(), result.end(), sort_distance_desc); - } - - if (range.store || range.storedist) { - // Target key, create a sorted set with the results. - std::vector score_members; - for (int i = 0; i < count_limit; ++i) { - double distance = length_converter(result[i].distance, range.unit); - double score = range.store ? result[i].score : distance; - score_members.push_back({score, result[i].member}); - } - int32_t count = 0; - s = partition->db()->ZAdd(range.storekey, score_members, &count); - if (!s.ok()) { - res.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - res.AppendInteger(count_limit); - return; - } else { - // No target key, return results to user. - - // For each the result - res.AppendArrayLen(count_limit); - for (int i = 0; i < count_limit; ++i) { - if (range.option_num != 0) { - res.AppendArrayLen(range.option_num+1); - } - // Member - res.AppendStringLen(result[i].member.size()); - res.AppendContent(result[i].member); - - // If using withdist option - if (range.withdist) { - double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)result[i].score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, xy); - double distance = geohashGetDistance(longitude, latitude, xy[0], xy[1]); - distance = length_converter(distance, range.unit); - char buf[32]; - sprintf(buf, "%.4f", distance); - res.AppendStringLen(strlen(buf)); - res.AppendContent(buf); - } - // If using withhash option - if (range.withhash) { - res.AppendInteger(result[i].score); - } - // If using withcoord option - if (range.withcoord) { - res.AppendArrayLen(2); - double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)result[i].score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, xy); - - char longitude[32]; - int64_t len = slash::d2string(longitude, sizeof(longitude), xy[0]); - res.AppendStringLen(len); - res.AppendContent(longitude); - - char latitude[32]; - len = slash::d2string(latitude, sizeof(latitude), xy[1]); - res.AppendStringLen(len); - res.AppendContent(latitude); - } - } - } -} - -void GeoRadiusCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoRadius); - return; - } - key_ = argv_[1]; - slash::string2d(argv_[2].data(), argv_[2].size(), &range_.longitude); - slash::string2d(argv_[3].data(), argv_[3].size(), &range_.latitude); - slash::string2d(argv_[4].data(), argv_[4].size(), &range_.distance); - range_.unit = argv_[5]; - if (!check_unit(range_.unit)) { - res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); - return; - } - size_t pos = 6; - while (pos < argv_.size()) { - if (!strcasecmp(argv_[pos].c_str(), "withdist")) { - range_.withdist = true; - range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withhash")) { - range_.withhash = true; - range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withcoord")) { - range_.withcoord = true; - range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "count")) { - range_.count = true; - if (argv_.size() < (pos+2)) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - std::string str_count = argv_[++pos]; - for (auto s : str_count) { - if (!isdigit(s)) { - res_.SetRes(CmdRes::kErrOther, "value is not an integer or out of range"); - return; - } - } - range_.count_limit = std::stoi(str_count); - } else if (!strcasecmp(argv_[pos].c_str(), "store")) { - range_.store = true; - if (argv_.size() < (pos+2)) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "storedist")) { - range_.storedist = true; - if (argv_.size() < (pos+2)) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "asc")) { - range_.sort = Asc; - } else if (!strcasecmp(argv_[pos].c_str(), "desc")) { - range_.sort = Desc; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - pos++; - } - if (range_.store && (range_.withdist || range_.withcoord || range_.withhash)) { - res_.SetRes(CmdRes::kErrOther, "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); - return; - } -} - -void GeoRadiusCmd::Do(std::shared_ptr partition) { - GetAllNeighbors(partition, key_, range_, this->res_); -} - -void GeoRadiusByMemberCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoRadius); - return; - } - key_ = argv_[1]; - range_.member = argv_[2]; - slash::string2d(argv_[3].data(), argv_[3].size(), &range_.distance); - range_.unit = argv_[4]; - if (!check_unit(range_.unit)) { - res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); - return; - } - size_t pos = 5; - while (pos < argv_.size()) { - if (!strcasecmp(argv_[pos].c_str(), "withdist")) { - range_.withdist = true; - range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withhash")) { - range_.withhash = true; - range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "withcoord")) { - range_.withcoord = true; - range_.option_num++; - } else if (!strcasecmp(argv_[pos].c_str(), "count")) { - range_.count = true; - if (argv_.size() < (pos+2)) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - std::string str_count = argv_[++pos]; - for (auto s : str_count) { - if (!isdigit(s)) { - res_.SetRes(CmdRes::kErrOther, "value is not an integer or out of range"); - return; - } - } - range_.count_limit = std::stoi(str_count); - } else if (!strcasecmp(argv_[pos].c_str(), "store")) { - range_.store = true; - if (argv_.size() < (pos+2)) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "storedist")) { - range_.storedist = true; - if (argv_.size() < (pos+2)) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - range_.storekey = argv_[++pos]; - } else if (!strcasecmp(argv_[pos].c_str(), "asc")) { - range_.sort = Asc; - } else if (!strcasecmp(argv_[pos].c_str(), "desc")) { - range_.sort = Desc; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - pos++; - } - if (range_.store && (range_.withdist || range_.withcoord || range_.withhash)) { - res_.SetRes(CmdRes::kErrOther, "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); - return; - } -} - -void GeoRadiusByMemberCmd::Do(std::shared_ptr partition) { - double score; - rocksdb::Status s = partition->db()->ZScore(key_, range_.member, &score); - if (s.ok()) { - double xy[2]; - GeoHashBits hash = { .bits = (uint64_t)score, .step = GEO_STEP_MAX }; - geohashDecodeToLongLatWGS84(hash, xy); - range_.longitude = xy[0]; - range_.latitude = xy[1]; - } - GetAllNeighbors(partition, key_, range_, this->res_); -} diff --git a/tools/pika_migrate/src/pika_geohash.cc b/tools/pika_migrate/src/pika_geohash.cc deleted file mode 100644 index 2ad66314b8..0000000000 --- a/tools/pika_migrate/src/pika_geohash.cc +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (c) 2013-2014, yinqiwen - * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015-2016, Salvatore Sanfilippo . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ -#include "include/pika_geohash.h" - -/** - * Hashing works like this: - * Divide the world into 4 buckets. Label each one as such: - * ----------------- - * | | | - * | | | - * | 0,1 | 1,1 | - * ----------------- - * | | | - * | | | - * | 0,0 | 1,0 | - * ----------------- - */ - -/* Interleave lower bits of x and y, so the bits of x - * are in the even positions and bits from y in the odd; - * x and y must initially be less than 2**32 (65536). - * From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN - */ -static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) { - static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, - 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, - 0x0000FFFF0000FFFFULL}; - static const unsigned int S[] = {1, 2, 4, 8, 16}; - - uint64_t x = xlo; - uint64_t y = ylo; - - x = (x | (x << S[4])) & B[4]; - y = (y | (y << S[4])) & B[4]; - - x = (x | (x << S[3])) & B[3]; - y = (y | (y << S[3])) & B[3]; - - x = (x | (x << S[2])) & B[2]; - y = (y | (y << S[2])) & B[2]; - - x = (x | (x << S[1])) & B[1]; - y = (y | (y << S[1])) & B[1]; - - x = (x | (x << S[0])) & B[0]; - y = (y | (y << S[0])) & B[0]; - - return x | (y << 1); -} - -/* reverse the interleave process - * derived from http://stackoverflow.com/questions/4909263 - */ -static inline uint64_t deinterleave64(uint64_t interleaved) { - static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, - 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, - 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; - static const unsigned int S[] = {0, 1, 2, 4, 8, 16}; - - uint64_t x = interleaved; - uint64_t y = interleaved >> 1; - - x = (x | (x >> S[0])) & B[0]; - y = (y | (y >> S[0])) & B[0]; - - x = (x | (x >> S[1])) & B[1]; - y = (y | (y >> S[1])) & B[1]; - - x = (x | (x >> S[2])) & B[2]; - y = (y | (y >> S[2])) & B[2]; - - x = (x | (x >> S[3])) & B[3]; - y = (y | (y >> S[3])) & B[3]; - - x = (x | (x >> S[4])) & B[4]; - y = (y | (y >> S[4])) & B[4]; - - x = (x | (x >> S[5])) & B[5]; - y = (y | (y >> S[5])) & B[5]; - - return x | (y << 32); -} - -void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) { - /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ - /* We can't geocode at the north/south pole. */ - long_range->max = GEO_LONG_MAX; - long_range->min = GEO_LONG_MIN; - lat_range->max = GEO_LAT_MAX; - lat_range->min = GEO_LAT_MIN; -} - -int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, - double longitude, double latitude, uint8_t step, - GeoHashBits *hash) { - /* Check basic arguments sanity. */ - if (hash == NULL || step > 32 || step == 0 || - RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) return 0; - - /* Return an error when trying to index outside the supported - * constraints. */ - if (longitude > 180 || longitude < -180 || - latitude > 85.05112878 || latitude < -85.05112878) return 0; - - hash->bits = 0; - hash->step = step; - - if (latitude < lat_range->min || latitude > lat_range->max || - longitude < long_range->min || longitude > long_range->max) { - return 0; - } - - double lat_offset = - (latitude - lat_range->min) / (lat_range->max - lat_range->min); - double long_offset = - (longitude - long_range->min) / (long_range->max - long_range->min); - - /* convert to fixed point based on the step size */ - lat_offset *= (1ULL << step); - long_offset *= (1ULL << step); - hash->bits = interleave64(lat_offset, long_offset); - return 1; -} - -int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash) { - GeoHashRange r[2] = {{0}}; - geohashGetCoordRange(&r[0], &r[1]); - return geohashEncode(&r[0], &r[1], longitude, latitude, step, hash); -} - -int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, - GeoHashBits *hash) { - return geohashEncodeType(longitude, latitude, step, hash); -} - -int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, - const GeoHashBits hash, GeoHashArea *area) { - if (HASHISZERO(hash) || NULL == area || RANGEISZERO(lat_range) || - RANGEISZERO(long_range)) { - return 0; - } - - area->hash = hash; - uint8_t step = hash.step; - uint64_t hash_sep = deinterleave64(hash.bits); /* hash = [LAT][LONG] */ - - double lat_scale = lat_range.max - lat_range.min; - double long_scale = long_range.max - long_range.min; - - uint32_t ilato = hash_sep; /* get lat part of deinterleaved hash */ - uint32_t ilono = hash_sep >> 32; /* shift over to get long part of hash */ - - /* divide by 2**step. - * Then, for 0-1 coordinate, multiply times scale and add - to the min to get the absolute coordinate. */ - area->latitude.min = - lat_range.min + (ilato * 1.0 / (1ull << step)) * lat_scale; - area->latitude.max = - lat_range.min + ((ilato + 1) * 1.0 / (1ull << step)) * lat_scale; - area->longitude.min = - long_range.min + (ilono * 1.0 / (1ull << step)) * long_scale; - area->longitude.max = - long_range.min + ((ilono + 1) * 1.0 / (1ull << step)) * long_scale; - - return 1; -} - -int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area) { - GeoHashRange r[2] = {{0}}; - geohashGetCoordRange(&r[0], &r[1]); - return geohashDecode(r[0], r[1], hash, area); -} - -int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) { - return geohashDecodeType(hash, area); -} - -int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy) { - if (!xy) return 0; - xy[0] = (area->longitude.min + area->longitude.max) / 2; - xy[1] = (area->latitude.min + area->latitude.max) / 2; - return 1; -} - -int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy) { - GeoHashArea area = {{0}}; - if (!xy || !geohashDecodeType(hash, &area)) - return 0; - return geohashDecodeAreaToLongLat(&area, xy); -} - -int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy) { - return geohashDecodeToLongLatType(hash, xy); -} - -static void geohash_move_x(GeoHashBits *hash, int8_t d) { - if (d == 0) - return; - - uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; - uint64_t y = hash->bits & 0x5555555555555555ULL; - - uint64_t zz = 0x5555555555555555ULL >> (64 - hash->step * 2); - - if (d > 0) { - x = x + (zz + 1); - } else { - x = x | zz; - x = x - (zz + 1); - } - - x &= (0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2)); - hash->bits = (x | y); -} - -static void geohash_move_y(GeoHashBits *hash, int8_t d) { - if (d == 0) - return; - - uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; - uint64_t y = hash->bits & 0x5555555555555555ULL; - - uint64_t zz = 0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2); - if (d > 0) { - y = y + (zz + 1); - } else { - y = y | zz; - y = y - (zz + 1); - } - y &= (0x5555555555555555ULL >> (64 - hash->step * 2)); - hash->bits = (x | y); -} - -void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors) { - neighbors->east = *hash; - neighbors->west = *hash; - neighbors->north = *hash; - neighbors->south = *hash; - neighbors->south_east = *hash; - neighbors->south_west = *hash; - neighbors->north_east = *hash; - neighbors->north_west = *hash; - - geohash_move_x(&neighbors->east, 1); - geohash_move_y(&neighbors->east, 0); - - geohash_move_x(&neighbors->west, -1); - geohash_move_y(&neighbors->west, 0); - - geohash_move_x(&neighbors->south, 0); - geohash_move_y(&neighbors->south, -1); - - geohash_move_x(&neighbors->north, 0); - geohash_move_y(&neighbors->north, 1); - - geohash_move_x(&neighbors->north_west, -1); - geohash_move_y(&neighbors->north_west, 1); - - geohash_move_x(&neighbors->north_east, 1); - geohash_move_y(&neighbors->north_east, 1); - - geohash_move_x(&neighbors->south_east, 1); - geohash_move_y(&neighbors->south_east, -1); - - geohash_move_x(&neighbors->south_west, -1); - geohash_move_y(&neighbors->south_west, -1); -} diff --git a/tools/pika_migrate/src/pika_geohash_helper.cc b/tools/pika_migrate/src/pika_geohash_helper.cc deleted file mode 100644 index a2f18d7090..0000000000 --- a/tools/pika_migrate/src/pika_geohash_helper.cc +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright (c) 2013-2014, yinqiwen - * Copyright (c) 2014, Matt Stancliff . - * Copyright (c) 2015-2016, Salvatore Sanfilippo . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * * Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of Redis nor the names of its contributors may be used - * to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* This is a C++ to C conversion from the ardb project. - * This file started out as: - * https://github.com/yinqiwen/ardb/blob/d42503/src/geo/geohash_helper.cpp - */ - -//#include "fmacros.h" -#include "include/pika_geohash_helper.h" -//#include "debugmacro.h" -#include - -#define D_R (M_PI / 180.0) -#define R_MAJOR 6378137.0 -#define R_MINOR 6356752.3142 -#define RATIO (R_MINOR / R_MAJOR) -#define ECCENT (sqrt(1.0 - (RATIO *RATIO))) -#define COM (0.5 * ECCENT) - -/// @brief The usual PI/180 constant -const double DEG_TO_RAD = 0.017453292519943295769236907684886; -/// @brief Earth's quatratic mean radius for WGS-84 -const double EARTH_RADIUS_IN_METERS = 6372797.560856; - -const double MERCATOR_MAX = 20037726.37; -const double MERCATOR_MIN = -20037726.37; - -static inline double deg_rad(double ang) { return ang * D_R; } -static inline double rad_deg(double ang) { return ang / D_R; } - -/* This function is used in order to estimate the step (bits precision) - * of the 9 search area boxes during radius queries. */ -uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { - if (range_meters == 0) return 26; - int step = 1; - while (range_meters < MERCATOR_MAX) { - range_meters *= 2; - step++; - } - step -= 2; /* Make sure range is included in most of the base cases. */ - - /* Wider range torwards the poles... Note: it is possible to do better - * than this approximation by computing the distance between meridians - * at this latitude, but this does the trick for now. */ - if (lat > 66 || lat < -66) { - step--; - if (lat > 80 || lat < -80) step--; - } - - /* Frame to valid range. */ - if (step < 1) step = 1; - if (step > 26) step = 26; - return step; -} - -/* Return the bounding box of the search area centered at latitude,longitude - * having a radius of radius_meter. bounds[0] - bounds[2] is the minimum - * and maxium longitude, while bounds[1] - bounds[3] is the minimum and - * maximum latitude. - * - * This function does not behave correctly with very large radius values, for - * instance for the coordinates 81.634948934258375 30.561509253718668 and a - * radius of 7083 kilometers, it reports as bounding boxes: - * - * min_lon 7.680495, min_lat -33.119473, max_lon 155.589402, max_lat 94.242491 - * - * However, for instance, a min_lon of 7.680495 is not correct, because the - * point -1.27579540014266968 61.33421815228281559 is at less than 7000 - * kilometers away. - * - * Since this function is currently only used as an optimization, the - * optimization is not used for very big radiuses, however the function - * should be fixed. */ -int geohashBoundingBox(double longitude, double latitude, double radius_meters, - double *bounds) { - if (!bounds) return 0; - - bounds[0] = longitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude))); - bounds[2] = longitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude))); - bounds[1] = latitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS); - bounds[3] = latitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS); - return 1; -} - -/* Return a set of areas (center + 8) that are able to cover a range query - * for the specified position and radius. */ -GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters) { - GeoHashRange long_range, lat_range; - GeoHashRadius radius; - GeoHashBits hash; - GeoHashNeighbors neighbors; - GeoHashArea area; - double min_lon, max_lon, min_lat, max_lat; - double bounds[4]; - int steps; - - geohashBoundingBox(longitude, latitude, radius_meters, bounds); - min_lon = bounds[0]; - min_lat = bounds[1]; - max_lon = bounds[2]; - max_lat = bounds[3]; - - steps = geohashEstimateStepsByRadius(radius_meters,latitude); - - geohashGetCoordRange(&long_range,&lat_range); - geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash); - geohashNeighbors(&hash,&neighbors); - geohashDecode(long_range,lat_range,hash,&area); - - /* Check if the step is enough at the limits of the covered area. - * Sometimes when the search area is near an edge of the - * area, the estimated step is not small enough, since one of the - * north / south / west / east square is too near to the search area - * to cover everything. */ - int decrease_step = 0; - { - GeoHashArea north, south, east, west; - - geohashDecode(long_range, lat_range, neighbors.north, &north); - geohashDecode(long_range, lat_range, neighbors.south, &south); - geohashDecode(long_range, lat_range, neighbors.east, &east); - geohashDecode(long_range, lat_range, neighbors.west, &west); - - if (geohashGetDistance(longitude,latitude,longitude,north.latitude.max) - < radius_meters) decrease_step = 1; - if (geohashGetDistance(longitude,latitude,longitude,south.latitude.min) - < radius_meters) decrease_step = 1; - if (geohashGetDistance(longitude,latitude,east.longitude.max,latitude) - < radius_meters) decrease_step = 1; - if (geohashGetDistance(longitude,latitude,west.longitude.min,latitude) - < radius_meters) decrease_step = 1; - } - - if (steps > 1 && decrease_step) { - steps--; - geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash); - geohashNeighbors(&hash,&neighbors); - geohashDecode(long_range,lat_range,hash,&area); - } - - /* Exclude the search areas that are useless. */ - if (steps >= 2) { - if (area.latitude.min < min_lat) { - GZERO(neighbors.south); - GZERO(neighbors.south_west); - GZERO(neighbors.south_east); - } - if (area.latitude.max > max_lat) { - GZERO(neighbors.north); - GZERO(neighbors.north_east); - GZERO(neighbors.north_west); - } - if (area.longitude.min < min_lon) { - GZERO(neighbors.west); - GZERO(neighbors.south_west); - GZERO(neighbors.north_west); - } - if (area.longitude.max > max_lon) { - GZERO(neighbors.east); - GZERO(neighbors.south_east); - GZERO(neighbors.north_east); - } - } - radius.hash = hash; - radius.neighbors = neighbors; - radius.area = area; - return radius; -} - -GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, - double radius_meters) { - return geohashGetAreasByRadius(longitude, latitude, radius_meters); -} - -GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { - uint64_t bits = hash.bits; - bits <<= (52 - hash.step * 2); - return bits; -} - -/* Calculate distance using haversin great circle distance formula. */ -double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) { - double lat1r, lon1r, lat2r, lon2r, u, v; - lat1r = deg_rad(lat1d); - lon1r = deg_rad(lon1d); - lat2r = deg_rad(lat2d); - lon2r = deg_rad(lon2d); - u = sin((lat2r - lat1r) / 2); - v = sin((lon2r - lon1r) / 2); - return 2.0 * EARTH_RADIUS_IN_METERS * - asin(sqrt(u * u + cos(lat1r) * cos(lat2r) * v * v)); -} - -int geohashGetDistanceIfInRadius(double x1, double y1, - double x2, double y2, double radius, - double *distance) { - *distance = geohashGetDistance(x1, y1, x2, y2); - if (*distance > radius) return 0; - return 1; -} - -int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, - double y2, double radius, - double *distance) { - return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance); -} diff --git a/tools/pika_migrate/src/pika_hash.cc b/tools/pika_migrate/src/pika_hash.cc deleted file mode 100644 index 2f0e4fdf7d..0000000000 --- a/tools/pika_migrate/src/pika_hash.cc +++ /dev/null @@ -1,609 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_hash.h" - -#include "slash/include/slash_string.h" - -#include "include/pika_conf.h" - -extern PikaConf *g_pika_conf; - -void HDelCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHDel); - return; - } - key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; - iter++; - fields_.assign(iter, argv_.end()); - return; -} - -void HDelCmd::Do(std::shared_ptr partition) { - int32_t num = 0; - rocksdb::Status s = partition->db()->HDel(key_, fields_, &num); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(num); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HSetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHSet); - return; - } - key_ = argv_[1]; - field_ = argv_[2]; - value_ = argv_[3]; - return; -} - -void HSetCmd::Do(std::shared_ptr partition) { - int32_t ret = 0; - rocksdb::Status s = partition->db()->HSet(key_, field_, value_, &ret); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(ret)); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HGetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHGet); - return; - } - key_ = argv_[1]; - field_ = argv_[2]; - return; -} - -void HGetCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->HGet(key_, field_, &value); - if (s.ok()) { - res_.AppendStringLen(value.size()); - res_.AppendContent(value); - } else if (s.IsNotFound()) { - res_.AppendContent("$-1"); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void HGetallCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHGetall); - return; - } - key_ = argv_[1]; - return; -} - -void HGetallCmd::Do(std::shared_ptr partition) { - int64_t total_fv = 0; - int64_t cursor = 0, next_cursor = 0; - size_t raw_limit = g_pika_conf->max_client_response_size(); - std::string raw; - rocksdb::Status s; - std::vector fvs; - - do { - fvs.clear(); - s = partition->db()->HScan(key_, cursor, "*", PIKA_SCAN_STEP_LENGTH, &fvs, &next_cursor); - if (!s.ok()) { - raw.clear(); - total_fv = 0; - break; - } else { - for (const auto& fv : fvs) { - RedisAppendLen(raw, fv.field.size(), "$"); - RedisAppendContent(raw, fv.field); - RedisAppendLen(raw, fv.value.size(), "$"); - RedisAppendContent(raw, fv.value); - } - if (raw.size() >= raw_limit) { - res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); - return; - } - total_fv += fvs.size(); - cursor = next_cursor; - } - } while (cursor != 0); - - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(total_fv * 2); - res_.AppendStringRaw(raw); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - - -void HExistsCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHExists); - return; - } - key_ = argv_[1]; - field_ = argv_[2]; - return; -} - -void HExistsCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->HExists(key_, field_); - if (s.ok()) { - res_.AppendContent(":1"); - } else if (s.IsNotFound()) { - res_.AppendContent(":0"); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void HIncrbyCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHIncrby); - return; - } - key_ = argv_[1]; - field_ = argv_[2]; - if (argv_[3].find(" ") != std::string::npos || !slash::string2l(argv_[3].data(), argv_[3].size(), &by_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void HIncrbyCmd::Do(std::shared_ptr partition) { - int64_t new_value; - rocksdb::Status s = partition->db()->HIncrby(key_, field_, by_, &new_value); - if (s.ok() || s.IsNotFound()) { - res_.AppendContent(":" + std::to_string(new_value)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: hash value is not an integer") { - res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HIncrbyfloatCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHIncrbyfloat); - return; - } - key_ = argv_[1]; - field_ = argv_[2]; - by_ = argv_[3]; - return; -} - -void HIncrbyfloatCmd::Do(std::shared_ptr partition) { - std::string new_value; - rocksdb::Status s = partition->db()->HIncrbyfloat(key_, field_, by_, &new_value); - if (s.ok()) { - res_.AppendStringLen(new_value.size()); - res_.AppendContent(new_value); - } else if (s.IsCorruption() && s.ToString() == "Corruption: value is not a vaild float") { - res_.SetRes(CmdRes::kInvalidFloat); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HKeysCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHKeys); - return; - } - key_ = argv_[1]; - return; -} - -void HKeysCmd::Do(std::shared_ptr partition) { - std::vector fields; - rocksdb::Status s = partition->db()->HKeys(key_, &fields); - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(fields.size()); - for (const auto& field : fields) { - res_.AppendString(field); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HLenCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHLen); - return; - } - key_ = argv_[1]; - return; -} - -void HLenCmd::Do(std::shared_ptr partition) { - int32_t len = 0; - rocksdb::Status s = partition->db()->HLen(key_, &len); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(len); - } else { - res_.SetRes(CmdRes::kErrOther, "something wrong in hlen"); - } - return; -} - -void HMgetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHMget); - return; - } - key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; - iter++; - fields_.assign(iter, argv_.end()); - return; -} - -void HMgetCmd::Do(std::shared_ptr partition) { - std::vector vss; - rocksdb::Status s = partition->db()->HMGet(key_, fields_, &vss); - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(vss.size()); - for (const auto& vs : vss) { - if (vs.status.ok()) { - res_.AppendStringLen(vs.value.size()); - res_.AppendContent(vs.value); - } else { - res_.AppendContent("$-1"); - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HMsetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHMset); - return; - } - key_ = argv_[1]; - size_t argc = argv_.size(); - if (argc % 2 != 0) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHMset); - return; - } - size_t index = 2; - fvs_.clear(); - for (; index < argc; index += 2) { - fvs_.push_back({argv_[index], argv_[index + 1]}); - } - return; -} - -void HMsetCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->HMSet(key_, fvs_); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HSetnxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHSetnx); - return; - } - key_ = argv_[1]; - field_ = argv_[2]; - value_ = argv_[3]; - return; -} - -void HSetnxCmd::Do(std::shared_ptr partition) { - int32_t ret = 0; - rocksdb::Status s = partition->db()->HSetnx(key_, field_, value_, &ret); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(ret)); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void HStrlenCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHStrlen); - return; - } - key_ = argv_[1]; - field_ = argv_[2]; - return; -} - -void HStrlenCmd::Do(std::shared_ptr partition) { - int32_t len = 0; - rocksdb::Status s = partition->db()->HStrlen(key_, field_, &len); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(len); - } else { - res_.SetRes(CmdRes::kErrOther, "something wrong in hstrlen"); - } - return; -} - -void HValsCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHVals); - return; - } - key_ = argv_[1]; - return; -} - -void HValsCmd::Do(std::shared_ptr partition) { - std::vector values; - rocksdb::Status s = partition->db()->HVals(key_, &values); - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(values.size()); - for (const auto& value : values) { - res_.AppendStringLen(value.size()); - res_.AppendContent(value); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HScanCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHScan); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - size_t index = 3, argc = argv_.size(); - - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - if (count_ < 0) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - return; -} - -void HScanCmd::Do(std::shared_ptr partition) { - int64_t next_cursor = 0; - std::vector field_values; - rocksdb::Status s = partition->db()->HScan(key_, cursor_, pattern_, count_, &field_values, &next_cursor); - - if (s.ok() || s.IsNotFound()) { - res_.AppendContent("*2"); - char buf[32]; - int32_t len = slash::ll2string(buf, sizeof(buf), next_cursor); - res_.AppendStringLen(len); - res_.AppendContent(buf); - - res_.AppendArrayLen(field_values.size()*2); - for (const auto& field_value : field_values) { - res_.AppendString(field_value.field); - res_.AppendString(field_value.value); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void HScanxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameHScan); - return; - } - key_ = argv_[1]; - start_field_ = argv_[2]; - - size_t index = 3, argc = argv_.size(); - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - if (count_ < 0) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - return; -} - -void HScanxCmd::Do(std::shared_ptr partition) { - std::string next_field; - std::vector field_values; - rocksdb::Status s = partition->db()->HScanx(key_, start_field_, pattern_, count_, &field_values, &next_field); - - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(2); - res_.AppendStringLen(next_field.size()); - res_.AppendContent(next_field); - - res_.AppendArrayLen(2 * field_values.size()); - for (const auto& field_value : field_values) { - res_.AppendString(field_value.field); - res_.AppendString(field_value.value); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void PKHScanRangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHScanRange); - return; - } - key_ = argv_[1]; - field_start_ = argv_[2]; - field_end_ = argv_[3]; - - size_t index = 4, argc = argv_.size(); - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void PKHScanRangeCmd::Do(std::shared_ptr partition) { - std::string next_field; - std::vector field_values; - rocksdb::Status s = partition->db()->PKHScanRange(key_, field_start_, field_end_, - pattern_, limit_, &field_values, &next_field); - - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(2); - res_.AppendString(next_field); - - res_.AppendArrayLen(2 * field_values.size()); - for (const auto& field_value : field_values) { - res_.AppendString(field_value.field); - res_.AppendString(field_value.value); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void PKHRScanRangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHRScanRange); - return; - } - key_ = argv_[1]; - field_start_ = argv_[2]; - field_end_ = argv_[3]; - - size_t index = 4, argc = argv_.size(); - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void PKHRScanRangeCmd::Do(std::shared_ptr partition) { - std::string next_field; - std::vector field_values; - rocksdb::Status s = partition->db()->PKHRScanRange(key_, field_start_, field_end_, - pattern_, limit_, &field_values, &next_field); - - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(2); - res_.AppendString(next_field); - - res_.AppendArrayLen(2 * field_values.size()); - for (const auto& field_value : field_values) { - res_.AppendString(field_value.field); - res_.AppendString(field_value.value); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} diff --git a/tools/pika_migrate/src/pika_hyperloglog.cc b/tools/pika_migrate/src/pika_hyperloglog.cc deleted file mode 100644 index e36cff7d81..0000000000 --- a/tools/pika_migrate/src/pika_hyperloglog.cc +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_hyperloglog.h" - -void PfAddCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePfAdd); - return; - } - if (argv_.size() > 1) { - key_ = argv_[1]; - size_t pos = 2; - while (pos < argv_.size()) { - values_.push_back(argv_[pos++]); - } - } -} - -void PfAddCmd::Do(std::shared_ptr partition) { - bool update = false; - rocksdb::Status s = partition->db()->PfAdd(key_, values_, &update); - if (s.ok() && update) { - res_.AppendInteger(1); - } else if (s.ok() && !update) { - res_.AppendInteger(0); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void PfCountCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePfCount); - return; - } - size_t pos = 1; - while (pos < argv_.size()) { - keys_.push_back(argv_[pos++]); - } -} - -void PfCountCmd::Do(std::shared_ptr partition) { - int64_t value_ = 0; - rocksdb::Status s = partition->db()->PfCount(keys_, &value_); - if (s.ok()) { - res_.AppendInteger(value_); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void PfMergeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePfMerge); - return; - } - size_t pos = 1; - while (pos < argv_.size()) { - keys_.push_back(argv_[pos++]); - } -} - -void PfMergeCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->PfMerge(keys_); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} diff --git a/tools/pika_migrate/src/pika_inner_message.proto b/tools/pika_migrate/src/pika_inner_message.proto deleted file mode 100644 index 713c05077a..0000000000 --- a/tools/pika_migrate/src/pika_inner_message.proto +++ /dev/null @@ -1,145 +0,0 @@ -package InnerMessage; - -enum Type { - kMetaSync = 1; - kTrySync = 2; - kDBSync = 3; - kBinlogSync = 4; - kHeatBeat = 5; - kRemoveSlaveNode = 6; -} - -enum StatusCode { - kOk = 1; - kError = 2; -} - -message BinlogOffset { - required uint32 filenum = 1; - required uint64 offset = 2; -} - -message Node { - required string ip = 1; - required int32 port = 2; -} - -message Partition { - required string table_name = 1; - required uint32 partition_id = 2; -} - -message TableInfo { - required string table_name = 1; - required uint32 partition_num = 2; - repeated uint32 partition_ids = 3; -} - -message PikaMeta { - repeated TableInfo table_infos = 1; -} - -// Request message -message InnerRequest { - // slave to master - message MetaSync { - required Node node = 1; - optional string auth = 2; - } - - // slave to master - message TrySync { - required Node node = 1; - required Partition partition = 2; - required BinlogOffset binlog_offset = 3; - } - - // slave to master - message DBSync { - required Node node = 1; - required Partition partition = 2; - required BinlogOffset binlog_offset = 3; - } - - message BinlogSync { - required Node node = 1; - required string table_name = 2; - required uint32 partition_id = 3; - required BinlogOffset ack_range_start = 4; - required BinlogOffset ack_range_end = 5; - required int32 session_id = 6; - required bool first_send = 7; - } - - message RemoveSlaveNode { - required Node node = 1; - required Partition partition = 2; - } - - required Type type = 1; - optional MetaSync meta_sync = 2; - optional TrySync try_sync = 3; - optional DBSync db_sync = 4; - optional BinlogSync binlog_sync = 5; - repeated RemoveSlaveNode remove_slave_node = 6; -} - -message PartitionInfo { - required uint32 partition_id = 1; - required Node master = 2; - repeated Node slaves = 3; -} - -// Response message -message InnerResponse { - // master to slave - message MetaSync { - message TableInfo { - required string table_name = 1; - required int32 partition_num = 2; - } - required bool classic_mode = 1; - repeated TableInfo tables_info = 2; - } - - // master to slave - message TrySync { - enum ReplyCode { - kOk = 1; - kSyncPointBePurged = 2; - kSyncPointLarger = 3; - kError = 4; - } - required ReplyCode reply_code = 1; - required Partition partition = 2; - optional BinlogOffset binlog_offset = 3; - optional int32 session_id = 4; - } - - message DBSync { - required Partition partition = 1; - required int32 session_id = 2; - } - - // master to slave - message BinlogSync { - required Partition partition = 1; - required BinlogOffset binlog_offset = 2; - required bytes binlog = 3; - required int32 session_id = 4; - } - - message RemoveSlaveNode { - required Node node = 1; - required Partition partition = 2; - } - - required Type type = 1; - required StatusCode code = 2; - optional string reply = 3; - optional MetaSync meta_sync = 4; - optional DBSync db_sync = 5; - optional TrySync try_sync = 6; - repeated BinlogSync binlog_sync = 7; - repeated RemoveSlaveNode remove_slave_node = 8; -} diff --git a/tools/pika_migrate/src/pika_kv.cc b/tools/pika_migrate/src/pika_kv.cc deleted file mode 100644 index 732878b05b..0000000000 --- a/tools/pika_migrate/src/pika_kv.cc +++ /dev/null @@ -1,1447 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_kv.h" - -#include "slash/include/slash_string.h" - -#include "include/pika_conf.h" -#include "include/pika_binlog_transverter.h" - -extern PikaConf *g_pika_conf; - -/* SET key value [NX] [XX] [EX ] [PX ] */ -void SetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSet); - return; - } - key_ = argv_[1]; - value_ = argv_[2]; - condition_ = SetCmd::kNONE; - sec_ = 0; - size_t index = 3; - while (index != argv_.size()) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "xx")) { - condition_ = SetCmd::kXX; - } else if (!strcasecmp(opt.data(), "nx")) { - condition_ = SetCmd::kNX; - } else if (!strcasecmp(opt.data(), "vx")) { - condition_ = SetCmd::kVX; - index++; - if (index == argv_.size()) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } else { - target_ = argv_[index]; - } - } else if (!strcasecmp(opt.data(), "ex") || !strcasecmp(opt.data(), "px")) { - condition_ = (condition_ == SetCmd::kNONE) ? SetCmd::kEXORPX : condition_; - index++; - if (index == argv_.size()) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!slash::string2l(argv_[index].data(), argv_[index].size(), &sec_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } else if (sec_ <= 0) { - res_.SetRes(CmdRes::kErrOther, "invalid expire time in set"); - return; - } - - if (!strcasecmp(opt.data(), "px")) { - sec_ /= 1000; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void SetCmd::Do(std::shared_ptr partition) { - rocksdb::Status s; - int32_t res = 1; - switch (condition_) { - case SetCmd::kXX: - s = partition->db()->Setxx(key_, value_, &res, sec_); - break; - case SetCmd::kNX: - s = partition->db()->Setnx(key_, value_, &res, sec_); - break; - case SetCmd::kVX: - s = partition->db()->Setvx(key_, target_, value_, &success_, sec_); - break; - case SetCmd::kEXORPX: - s = partition->db()->Setex(key_, value_, sec_); - break; - default: - s = partition->db()->Set(key_, value_); - break; - } - - if (s.ok() || s.IsNotFound()) { - if (condition_ == SetCmd::kVX) { - res_.AppendInteger(success_); - } else { - if (res == 1) { - res_.SetRes(CmdRes::kOk); - } else { - res_.AppendArrayLen(-1);; - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -std::string SetCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - if (condition_ == SetCmd::kEXORPX) { - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, 4, "*"); - - // to pksetexat cmd - std::string pksetexat_cmd("pksetexat"); - RedisAppendLen(content, pksetexat_cmd.size(), "$"); - RedisAppendContent(content, pksetexat_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // time_stamp - char buf[100]; - int32_t time_stamp = time(nullptr) + sec_; - slash::ll2string(buf, 100, time_stamp); - std::string at(buf); - RedisAppendLen(content, at.size(), "$"); - RedisAppendContent(content, at); - // value - RedisAppendLen(content, value_.size(), "$"); - RedisAppendContent(content, value_); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); - } else { - return Cmd::ToBinlog(exec_time, server_id, logic_id, filenum, offset); - } -} - -void GetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGet); - return; - } - key_ = argv_[1]; - return; -} - -void GetCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->Get(key_, &value); - if (s.ok()) { - res_.AppendStringLen(value.size()); - res_.AppendContent(value); - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void DelCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDel); - return; - } - std::vector::iterator iter = argv_.begin(); - keys_.assign(++iter, argv_.end()); - return; -} - -void DelCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t count = partition->db()->Del(keys_, &type_status); - if (count >= 0) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, "delete error"); - } - return; -} - -void IncrCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameIncr); - return; - } - key_ = argv_[1]; - return; -} - -void IncrCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Incrby(key_, 1, &new_value_); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { - res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void IncrbyCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameIncrby); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &by_)) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameIncrby); - return; - } - return; -} - -void IncrbyCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Incrby(key_, by_, &new_value_); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { - res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void IncrbyfloatCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameIncrbyfloat); - return; - } - key_ = argv_[1]; - value_ = argv_[2]; - if (!slash::string2d(argv_[2].data(), argv_[2].size(), &by_)) { - res_.SetRes(CmdRes::kInvalidFloat); - return; - } - return; -} - -void IncrbyfloatCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Incrbyfloat(key_, value_, &new_value_); - if (s.ok()) { - res_.AppendStringLen(new_value_.size()); - res_.AppendContent(new_value_); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a vaild float"){ - res_.SetRes(CmdRes::kInvalidFloat); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void DecrCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDecr); - return; - } - key_ = argv_[1]; - return; -} - -void DecrCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Decrby(key_, 1, &new_value_); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { - res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void DecrbyCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDecrby); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &by_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void DecrbyCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Decrby(key_, by_, &new_value_); - if (s.ok()) { - res_.AppendContent(":" + std::to_string(new_value_)); - } else if (s.IsCorruption() && s.ToString() == "Corruption: Value is not a integer") { - res_.SetRes(CmdRes::kInvalidInt); - } else if (s.IsInvalidArgument()) { - res_.SetRes(CmdRes::kOverFlow); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void GetsetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGetset); - return; - } - key_ = argv_[1]; - new_value_ = argv_[2]; - return; -} - -void GetsetCmd::Do(std::shared_ptr partition) { - std::string old_value; - rocksdb::Status s = partition->db()->GetSet(key_, new_value_, &old_value); - if (s.ok()) { - if (old_value.empty()) { - res_.AppendContent("$-1"); - } else { - res_.AppendStringLen(old_value.size()); - res_.AppendContent(old_value); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void AppendCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameAppend); - return; - } - key_ = argv_[1]; - value_ = argv_[2]; - return; -} - -void AppendCmd::Do(std::shared_ptr partition) { - int32_t new_len = 0; - rocksdb::Status s = partition->db()->Append(key_, value_, &new_len); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(new_len); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void MgetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameMget); - return; - } - keys_ = argv_; - keys_.erase(keys_.begin()); - return; -} - -void MgetCmd::Do(std::shared_ptr partition) { - std::vector vss; - rocksdb::Status s = partition->db()->MGet(keys_, &vss); - if (s.ok()) { - res_.AppendArrayLen(vss.size()); - for (const auto& vs : vss) { - if (vs.status.ok()) { - res_.AppendStringLen(vs.value.size()); - res_.AppendContent(vs.value); - } else { - res_.AppendContent("$-1"); - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void KeysCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameKeys); - return; - } - pattern_ = argv_[1]; - if (argv_.size() == 3) { - std::string opt = argv_[2]; - if (!strcasecmp(opt.data(), "string")) { - type_ = blackwidow::DataType::kStrings; - } else if (!strcasecmp(opt.data(), "zset")) { - type_ = blackwidow::DataType::kZSets; - } else if (!strcasecmp(opt.data(), "set")) { - type_ = blackwidow::DataType::kSets; - } else if (!strcasecmp(opt.data(), "list")) { - type_ = blackwidow::DataType::kLists; - } else if (!strcasecmp(opt.data(), "hash")) { - type_ = blackwidow::DataType::kHashes; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - } - } else if (argv_.size() > 3) { - res_.SetRes(CmdRes::kSyntaxErr); - } - return; -} - -void KeysCmd::Do(std::shared_ptr partition) { - int64_t total_key = 0; - int64_t cursor = 0; - size_t raw_limit = g_pika_conf->max_client_response_size(); - std::string raw; - std::vector keys; - do { - keys.clear(); - cursor = partition->db()->Scan(type_, cursor, pattern_, PIKA_SCAN_STEP_LENGTH, &keys); - for (const auto& key : keys) { - RedisAppendLen(raw, key.size(), "$"); - RedisAppendContent(raw, key); - } - if (raw.size() >= raw_limit) { - res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); - return; - } - total_key += keys.size(); - } while (cursor != 0); - - res_.AppendArrayLen(total_key); - res_.AppendStringRaw(raw); - return; -} - -void SetnxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSetnx); - return; - } - key_ = argv_[1]; - value_ = argv_[2]; - return; -} - -void SetnxCmd::Do(std::shared_ptr partition) { - success_ = 0; - rocksdb::Status s = partition->db()->Setnx(key_, value_, &success_); - if (s.ok()) { - res_.AppendInteger(success_); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -std::string SetnxCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - std::string content; - if (success_) { - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, 3, "*"); - - // to set cmd - std::string set_cmd("set"); - RedisAppendLen(content, set_cmd.size(), "$"); - RedisAppendContent(content, set_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // value - RedisAppendLen(content, value_.size(), "$"); - RedisAppendContent(content, value_); - - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); - } - return content; -} - -void SetexCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSetex); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &sec_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - value_ = argv_[3]; - return; -} - -void SetexCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Setex(key_, value_, sec_); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -std::string SetexCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, 4, "*"); - - // to pksetexat cmd - std::string pksetexat_cmd("pksetexat"); - RedisAppendLen(content, pksetexat_cmd.size(), "$"); - RedisAppendContent(content, pksetexat_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // time_stamp - char buf[100]; - int32_t time_stamp = time(nullptr) + sec_; - slash::ll2string(buf, 100, time_stamp); - std::string at(buf); - RedisAppendLen(content, at.size(), "$"); - RedisAppendContent(content, at); - // value - RedisAppendLen(content, value_.size(), "$"); - RedisAppendContent(content, value_); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -void PsetexCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePsetex); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &usec_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - value_ = argv_[3]; - return; -} - -void PsetexCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Setex(key_, value_, usec_ / 1000); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -std::string PsetexCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, 4, "*"); - - // to pksetexat cmd - std::string pksetexat_cmd("pksetexat"); - RedisAppendLen(content, pksetexat_cmd.size(), "$"); - RedisAppendContent(content, pksetexat_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // time_stamp - char buf[100]; - int32_t time_stamp = time(nullptr) + usec_ / 1000; - slash::ll2string(buf, 100, time_stamp); - std::string at(buf); - RedisAppendLen(content, at.size(), "$"); - RedisAppendContent(content, at); - // value - RedisAppendLen(content, value_.size(), "$"); - RedisAppendContent(content, value_); - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -void DelvxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameDelvx); - return; - } - key_ = argv_[1]; - value_ = argv_[2]; - return; -} - -void DelvxCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->Delvx(key_, value_, &success_); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(success_); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void MsetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameMset); - return; - } - size_t argc = argv_.size(); - if (argc % 2 == 0) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameMset); - return; - } - kvs_.clear(); - for (size_t index = 1; index != argc; index += 2) { - kvs_.push_back({argv_[index], argv_[index + 1]}); - } - return; -} - -void MsetCmd::Do(std::shared_ptr partition) { - blackwidow::Status s = partition->db()->MSet(kvs_); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void MsetnxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameMsetnx); - return; - } - size_t argc = argv_.size(); - if (argc % 2 == 0) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameMsetnx); - return; - } - kvs_.clear(); - for (size_t index = 1; index != argc; index += 2) { - kvs_.push_back({argv_[index], argv_[index + 1]}); - } - return; -} - -void MsetnxCmd::Do(std::shared_ptr partition) { - success_ = 0; - rocksdb::Status s = partition->db()->MSetnx(kvs_, &success_); - if (s.ok()) { - res_.AppendInteger(success_); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void GetrangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameGetrange); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &end_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void GetrangeCmd::Do(std::shared_ptr partition) { - std::string substr; - rocksdb::Status s = partition->db()->Getrange(key_, start_, end_, &substr); - if (s.ok() || s.IsNotFound()) { - res_.AppendStringLen(substr.size()); - res_.AppendContent(substr); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void SetrangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSetrange); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - value_ = argv_[3]; - return; -} - -void SetrangeCmd::Do(std::shared_ptr partition) { - int32_t new_len; - rocksdb::Status s = partition->db()->Setrange(key_, offset_, value_, &new_len); - if (s.ok()) { - res_.AppendInteger(new_len); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void StrlenCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameStrlen); - return; - } - key_ = argv_[1]; - return; -} - -void StrlenCmd::Do(std::shared_ptr partition) { - int32_t len = 0; - rocksdb::Status s = partition->db()->Strlen(key_, &len); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(len); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ExistsCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameExists); - return; - } - keys_ = argv_; - keys_.erase(keys_.begin()); - return; -} - -void ExistsCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t res = partition->db()->Exists(keys_, &type_status); - if (res != -1) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, "exists internal error"); - } - return; -} - -void ExpireCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameExpire); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &sec_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void ExpireCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t res = partition->db()->Expire(key_, sec_, &type_status); - if (res != -1) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, "expire internal error"); - } - return; -} - -std::string ExpireCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, 3, "*"); - - // to expireat cmd - std::string expireat_cmd("expireat"); - RedisAppendLen(content, expireat_cmd.size(), "$"); - RedisAppendContent(content, expireat_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // sec - char buf[100]; - int64_t expireat = time(nullptr) + sec_; - slash::ll2string(buf, 100, expireat); - std::string at(buf); - RedisAppendLen(content, at.size(), "$"); - RedisAppendContent(content, at); - - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -void PexpireCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePexpire); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &msec_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void PexpireCmd::Do(std::shared_ptr partition) { - std::map type_status; - int64_t res = partition->db()->Expire(key_, msec_/1000, &type_status); - if (res != -1) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, "expire internal error"); - } - return; -} - -std::string PexpireCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, argv_.size(), "*"); - - // to expireat cmd - std::string expireat_cmd("expireat"); - RedisAppendLen(content, expireat_cmd.size(), "$"); - RedisAppendContent(content, expireat_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // sec - char buf[100]; - int64_t expireat = time(nullptr) + msec_ / 1000; - slash::ll2string(buf, 100, expireat); - std::string at(buf); - RedisAppendLen(content, at.size(), "$"); - RedisAppendContent(content, at); - - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -void ExpireatCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameExpireat); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &time_stamp_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void ExpireatCmd::Do(std::shared_ptr partition) { - std::map type_status; - int32_t res = partition->db()->Expireat(key_, time_stamp_, &type_status); - if (res != -1) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, "expireat internal error"); - } -} - -void PexpireatCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePexpireat); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &time_stamp_ms_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -std::string PexpireatCmd::ToBinlog( - uint32_t exec_time, - const std::string& server_id, - uint64_t logic_id, - uint32_t filenum, - uint64_t offset) { - std::string content; - content.reserve(RAW_ARGS_LEN); - RedisAppendLen(content, argv_.size(), "*"); - - // to expireat cmd - std::string expireat_cmd("expireat"); - RedisAppendLen(content, expireat_cmd.size(), "$"); - RedisAppendContent(content, expireat_cmd); - // key - RedisAppendLen(content, key_.size(), "$"); - RedisAppendContent(content, key_); - // sec - char buf[100]; - int64_t expireat = time_stamp_ms_ / 1000; - slash::ll2string(buf, 100, expireat); - std::string at(buf); - RedisAppendLen(content, at.size(), "$"); - RedisAppendContent(content, at); - - return PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, - exec_time, - std::stoi(server_id), - logic_id, - filenum, - offset, - content, - {}); -} - -void PexpireatCmd::Do(std::shared_ptr partition) { - std::map type_status; - int32_t res = partition->db()->Expireat(key_, time_stamp_ms_/1000, &type_status); - if (res != -1) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, "pexpireat internal error"); - } - return; -} - -void TtlCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameTtl); - return; - } - key_ = argv_[1]; - return; -} - -void TtlCmd::Do(std::shared_ptr partition) { - std::map type_timestamp; - std::map type_status; - type_timestamp = partition->db()->TTL(key_, &type_status); - for (const auto& item : type_timestamp) { - // mean operation exception errors happen in database - if (item.second == -3) { - res_.SetRes(CmdRes::kErrOther, "ttl internal error"); - return; - } - } - if (type_timestamp[blackwidow::kStrings] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kStrings]); - } else if (type_timestamp[blackwidow::kHashes] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kHashes]); - } else if (type_timestamp[blackwidow::kLists] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kLists]); - } else if (type_timestamp[blackwidow::kZSets] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kZSets]); - } else if (type_timestamp[blackwidow::kSets] != -2) { - res_.AppendInteger(type_timestamp[blackwidow::kSets]); - } else { - // mean this key not exist - res_.AppendInteger(-2); - } - return; -} - -void PttlCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePttl); - return; - } - key_ = argv_[1]; - return; -} - -void PttlCmd::Do(std::shared_ptr partition) { - std::map type_timestamp; - std::map type_status; - type_timestamp = partition->db()->TTL(key_, &type_status); - for (const auto& item : type_timestamp) { - // mean operation exception errors happen in database - if (item.second == -3) { - res_.SetRes(CmdRes::kErrOther, "ttl internal error"); - return; - } - } - if (type_timestamp[blackwidow::kStrings] != -2) { - if (type_timestamp[blackwidow::kStrings] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kStrings] * 1000); - } - } else if (type_timestamp[blackwidow::kHashes] != -2) { - if (type_timestamp[blackwidow::kHashes] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kHashes] * 1000); - } - } else if (type_timestamp[blackwidow::kLists] != -2) { - if (type_timestamp[blackwidow::kLists] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kLists] * 1000); - } - } else if (type_timestamp[blackwidow::kSets] != -2) { - if (type_timestamp[blackwidow::kSets] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kSets] * 1000); - } - } else if (type_timestamp[blackwidow::kZSets] != -2) { - if (type_timestamp[blackwidow::kZSets] == -1) { - res_.AppendInteger(-1); - } else { - res_.AppendInteger(type_timestamp[blackwidow::kZSets] * 1000); - } - } else { - // mean this key not exist - res_.AppendInteger(-2); - } - return; -} - -void PersistCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePersist); - return; - } - key_ = argv_[1]; - return; -} - -void PersistCmd::Do(std::shared_ptr partition) { - std::map type_status; - int32_t res = partition->db()->Persist(key_, &type_status); - if (res != -1) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, "persist internal error"); - } - return; -} - -void TypeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameType); - return; - } - key_ = argv_[1]; - return; -} - -void TypeCmd::Do(std::shared_ptr partition) { - std::string res; - rocksdb::Status s = partition->db()->Type(key_, &res); - if (s.ok()) { - res_.AppendContent("+" + res); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ScanCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameScan); - return; - } - if (!slash::string2l(argv_[1].data(), argv_[1].size(), &cursor_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - size_t index = 2, argc = argv_.size(); - - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_) || count_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void ScanCmd::Do(std::shared_ptr partition) { - int64_t total_key = 0; - int64_t batch_count = 0; - int64_t left = count_; - int64_t cursor_ret = cursor_; - size_t raw_limit = g_pika_conf->max_client_response_size(); - std::string raw; - std::vector keys; - // To avoid memory overflow, we call the Scan method in batches - do { - keys.clear(); - batch_count = left < PIKA_SCAN_STEP_LENGTH ? left : PIKA_SCAN_STEP_LENGTH; - left = left > PIKA_SCAN_STEP_LENGTH ? left - PIKA_SCAN_STEP_LENGTH : 0; - cursor_ret = partition->db()->Scan(blackwidow::DataType::kAll, cursor_ret, - pattern_, batch_count, &keys); - for (const auto& key : keys) { - RedisAppendLen(raw, key.size(), "$"); - RedisAppendContent(raw, key); - } - if (raw.size() >= raw_limit) { - res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); - return; - } - total_key += keys.size(); - } while (cursor_ret != 0 && left); - - res_.AppendArrayLen(2); - - char buf[32]; - int len = slash::ll2string(buf, sizeof(buf), cursor_ret); - res_.AppendStringLen(len); - res_.AppendContent(buf); - - res_.AppendArrayLen(total_key); - res_.AppendStringRaw(raw); - return; -} - -void ScanxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameScanx); - return; - } - if (!strcasecmp(argv_[1].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; - } else { - res_.SetRes(CmdRes::kInvalidDbType); - return; - } - - start_key_ = argv_[2]; - size_t index = 3, argc = argv_.size(); - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_) || count_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void ScanxCmd::Do(std::shared_ptr partition) { - std::string next_key; - std::vector keys; - rocksdb::Status s = partition->db()->Scanx(type_, start_key_, pattern_, count_, &keys, &next_key); - - if (s.ok()) { - res_.AppendArrayLen(2); - res_.AppendStringLen(next_key.size()); - res_.AppendContent(next_key); - - res_.AppendArrayLen(keys.size()); - std::vector::iterator iter; - for (const auto& key : keys){ - res_.AppendString(key); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void PKSetexAtCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePKSetexAt); - return; - } - key_ = argv_[1]; - value_ = argv_[3]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &time_stamp_) - || time_stamp_ >= INT32_MAX) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void PKSetexAtCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->PKSetexAt(key_, value_, time_stamp_); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void PKScanRangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePKScanRange); - return; - } - if (!strcasecmp(argv_[1].data(), "string_with_value")) { - type_ = blackwidow::kStrings; - string_with_value = true; - } else if (!strcasecmp(argv_[1].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; - } else { - res_.SetRes(CmdRes::kInvalidDbType); - return; - } - - key_start_ = argv_[2]; - key_end_ = argv_[3]; - size_t index = 4, argc = argv_.size(); - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void PKScanRangeCmd::Do(std::shared_ptr partition) { - std::string next_key; - std::vector keys; - std::vector kvs; - rocksdb::Status s = partition->db()->PKScanRange(type_, key_start_, key_end_, pattern_, limit_, &keys, &kvs, &next_key); - - if (s.ok()) { - res_.AppendArrayLen(2); - res_.AppendStringLen(next_key.size()); - res_.AppendContent(next_key); - - if (type_ == blackwidow::kStrings) { - res_.AppendArrayLen(string_with_value ? 2 * kvs.size() : kvs.size()); - for (const auto& kv : kvs) { - res_.AppendString(kv.key); - if (string_with_value) { - res_.AppendString(kv.value); - } - } - } else { - res_.AppendArrayLen(keys.size()); - for (const auto& key : keys){ - res_.AppendString(key); - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void PKRScanRangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePKRScanRange); - return; - } - if (!strcasecmp(argv_[1].data(), "string_with_value")) { - type_ = blackwidow::kStrings; - string_with_value = true; - } else if (!strcasecmp(argv_[1].data(), "string")) { - type_ = blackwidow::kStrings; - } else if (!strcasecmp(argv_[1].data(), "hash")) { - type_ = blackwidow::kHashes; - } else if (!strcasecmp(argv_[1].data(), "set")) { - type_ = blackwidow::kSets; - } else if (!strcasecmp(argv_[1].data(), "zset")) { - type_ = blackwidow::kZSets; - } else if (!strcasecmp(argv_[1].data(), "list")) { - type_ = blackwidow::kLists; - } else { - res_.SetRes(CmdRes::kInvalidDbType); - return; - } - - key_start_ = argv_[2]; - key_end_ = argv_[3]; - size_t index = 4, argc = argv_.size(); - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "limit")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &limit_) || limit_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void PKRScanRangeCmd::Do(std::shared_ptr partition) { - std::string next_key; - std::vector keys; - std::vector kvs; - rocksdb::Status s = partition->db()->PKRScanRange(type_, key_start_, key_end_, pattern_, limit_, &keys, &kvs, &next_key); - - if (s.ok()) { - res_.AppendArrayLen(2); - res_.AppendStringLen(next_key.size()); - res_.AppendContent(next_key); - - if (type_ == blackwidow::kStrings) { - res_.AppendArrayLen(string_with_value ? 2 * kvs.size() : kvs.size()); - for (const auto& kv : kvs) { - res_.AppendString(kv.key); - if (string_with_value) { - res_.AppendString(kv.value); - } - } - } else { - res_.AppendArrayLen(keys.size()); - for (const auto& key : keys){ - res_.AppendString(key); - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} diff --git a/tools/pika_migrate/src/pika_list.cc b/tools/pika_migrate/src/pika_list.cc deleted file mode 100644 index cf4442dab4..0000000000 --- a/tools/pika_migrate/src/pika_list.cc +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_list.h" - -#include "slash/include/slash_string.h" - -void LIndexCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLIndex); - return; - } - key_ = argv_[1]; - std::string index = argv_[2]; - if (!slash::string2l(index.data(), index.size(), &index_)) { - res_.SetRes(CmdRes::kInvalidInt); - } - return; -} -void LIndexCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->LIndex(key_, index_, &value); - if (s.ok()) { - res_.AppendString(value); - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LInsertCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLInsert); - return; - } - key_ = argv_[1]; - std::string dir = argv_[2]; - if (!strcasecmp(dir.data(), "before")) { - dir_ = blackwidow::Before; - } else if (!strcasecmp(dir.data(), "after")) { - dir_ = blackwidow::After; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - pivot_ = argv_[3]; - value_ = argv_[4]; -} -void LInsertCmd::Do(std::shared_ptr partition) { - int64_t llen = 0; - rocksdb::Status s = partition->db()->LInsert(key_, dir_, pivot_, value_, &llen); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(llen); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LLenCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLLen); - return; - } - key_ = argv_[1]; -} -void LLenCmd::Do(std::shared_ptr partition) { - uint64_t llen = 0; - rocksdb::Status s = partition->db()->LLen(key_, &llen); - if (s.ok() || s.IsNotFound()){ - res_.AppendInteger(llen); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LPushCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLPush); - return; - } - key_ = argv_[1]; - size_t pos = 2; - while (pos < argv_.size()) { - values_.push_back(argv_[pos++]); - } -} -void LPushCmd::Do(std::shared_ptr partition) { - uint64_t llen = 0; - rocksdb::Status s = partition->db()->LPush(key_, values_, &llen); - if (s.ok()) { - res_.AppendInteger(llen); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LPopCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLPop); - return; - } - key_ = argv_[1]; -} -void LPopCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->LPop(key_, &value); - if (s.ok()) { - res_.AppendString(value); - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LPushxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLPushx); - return; - } - key_ = argv_[1]; - value_ = argv_[2]; -} -void LPushxCmd::Do(std::shared_ptr partition) { - uint64_t llen = 0; - rocksdb::Status s = partition->db()->LPushx(key_, value_, &llen); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(llen); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LRangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLRange); - return; - } - key_ = argv_[1]; - std::string left = argv_[2]; - if (!slash::string2l(left.data(), left.size(), &left_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - std::string right = argv_[3]; - if (!slash::string2l(right.data(), right.size(), &right_)) { - res_.SetRes(CmdRes::kInvalidInt); - } - return; -} -void LRangeCmd::Do(std::shared_ptr partition) { - std::vector values; - rocksdb::Status s = partition->db()->LRange(key_, left_, right_, &values); - if (s.ok()) { - res_.AppendArrayLen(values.size()); - for (const auto& value : values) { - res_.AppendString(value); - } - } else if (s.IsNotFound()) { - res_.AppendArrayLen(0); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LRemCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLRem); - return; - } - key_ = argv_[1]; - std::string count = argv_[2]; - if (!slash::string2l(count.data(), count.size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - value_ = argv_[3]; -} -void LRemCmd::Do(std::shared_ptr partition) { - uint64_t res = 0; - rocksdb::Status s = partition->db()->LRem(key_, count_, value_, &res); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LSetCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLSet); - return; - } - key_ = argv_[1]; - std::string index = argv_[2]; - if (!slash::string2l(index.data(), index.size(), &index_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - value_ = argv_[3]; -} -void LSetCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->LSet(key_, index_, value_); - if (s.ok()) { - res_.SetRes(CmdRes::kOk); - } else if (s.IsNotFound()) { - res_.SetRes(CmdRes::kNotFound); - } else if (s.IsCorruption() && s.ToString() == "Corruption: index out of range") { - //TODO refine return value - res_.SetRes(CmdRes::kOutOfRange); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void LTrimCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameLSet); - return; - } - key_ = argv_[1]; - std::string start = argv_[2]; - if (!slash::string2l(start.data(), start.size(), &start_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - std::string stop = argv_[3]; - if (!slash::string2l(stop.data(), stop.size(), &stop_)) { - res_.SetRes(CmdRes::kInvalidInt); - } - return; -} -void LTrimCmd::Do(std::shared_ptr partition) { - rocksdb::Status s = partition->db()->LTrim(key_, start_, stop_); - if (s.ok() || s.IsNotFound()) { - res_.SetRes(CmdRes::kOk); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void RPopCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameRPop); - return; - } - key_ = argv_[1]; -} -void RPopCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->RPop(key_, &value); - if (s.ok()) { - res_.AppendString(value); - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void RPopLPushCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameRPopLPush); - return; - } - source_ = argv_[1]; - receiver_ = argv_[2]; -} -void RPopLPushCmd::Do(std::shared_ptr partition) { - std::string value; - rocksdb::Status s = partition->db()->RPoplpush(source_, receiver_, &value); - if (s.ok()) { - res_.AppendString(value); - } else if (s.IsNotFound()) { - res_.AppendStringLen(-1); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void RPushCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameRPush); - return; - } - key_ = argv_[1]; - size_t pos = 2; - while (pos < argv_.size()) { - values_.push_back(argv_[pos++]); - } -} -void RPushCmd::Do(std::shared_ptr partition) { - uint64_t llen = 0; - rocksdb::Status s = partition->db()->RPush(key_, values_, &llen); - if (s.ok()) { - res_.AppendInteger(llen); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void RPushxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameRPushx); - return; - } - key_ = argv_[1]; - value_ = argv_[2]; -} -void RPushxCmd::Do(std::shared_ptr partition) { - uint64_t llen = 0; - rocksdb::Status s = partition->db()->RPushx(key_, value_, &llen); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(llen); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} diff --git a/tools/pika_migrate/src/pika_meta.cc b/tools/pika_migrate/src/pika_meta.cc deleted file mode 100644 index 48e11de7e2..0000000000 --- a/tools/pika_migrate/src/pika_meta.cc +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_meta.h" -#include "src/pika_inner_message.pb.h" - -const uint32_t VERSION = 1; - -PikaMeta::PikaMeta() - : local_meta_path_("") { - pthread_rwlock_init(&rwlock_, NULL); -} - -PikaMeta::~PikaMeta() { - pthread_rwlock_destroy(&rwlock_); -} - -void PikaMeta::SetPath(const std::string& path) { - local_meta_path_ = path; -} - -/* - * ******************* Meta File Format ****************** - * | | | | - * 4 Bytes 4 Bytes meta size Bytes - */ -Status PikaMeta::StableSave(const std::vector& table_structs) { - slash::RWLock l(&rwlock_, true); - if (local_meta_path_.empty()) { - LOG(WARNING) << "Local meta file path empty"; - return Status::Corruption("local meta file path empty"); - } - std::string local_meta_file = local_meta_path_ + kPikaMeta; - std::string tmp_file = local_meta_file; - tmp_file.append("_tmp"); - - slash::RWFile* saver = NULL; - slash::CreatePath(local_meta_path_); - Status s = slash::NewRWFile(tmp_file, &saver); - if (!s.ok()) { - delete saver; - LOG(WARNING) << "Open local meta file failed"; - return Status::Corruption("open local meta file failed"); - } - - InnerMessage::PikaMeta meta; - for (const auto& ts : table_structs) { - InnerMessage::TableInfo* table_info = meta.add_table_infos(); - table_info->set_table_name(ts.table_name); - table_info->set_partition_num(ts.partition_num); - for (const auto& id : ts.partition_ids) { - table_info->add_partition_ids(id); - } - } - - std::string meta_str; - if (!meta.SerializeToString(&meta_str)) { - delete saver; - LOG(WARNING) << "Serialize meta string failed"; - return Status::Corruption("serialize meta string failed"); - } - uint32_t meta_str_size = meta_str.size(); - - char *p = saver->GetData(); - memcpy(p, &VERSION, sizeof(uint32_t)); - p += sizeof(uint32_t); - memcpy(p, &meta_str_size, sizeof(uint32_t)); - p += sizeof(uint32_t); - memcpy(p, meta_str.data(), meta_str.size()); - delete saver; - - slash::DeleteFile(local_meta_file); - if (slash::RenameFile(tmp_file, local_meta_file)) { - LOG(WARNING) << "Failed to rename file, error: " << strerror(errno); - return Status::Corruption("faild to rename file"); - } - return Status::OK(); -} - -Status PikaMeta::ParseMeta(std::vector* const table_structs) { - slash::RWLock l(&rwlock_, false); - std::string local_meta_file = local_meta_path_ + kPikaMeta; - if (!slash::FileExists(local_meta_file)) { - LOG(WARNING) << "Local meta file not found, path: " << local_meta_file; - return Status::Corruption("meta file not found"); - } - - slash::RWFile* reader = NULL; - Status s = slash::NewRWFile(local_meta_file, &reader); - if (!s.ok()) { - delete reader; - LOG(WARNING) << "Open local meta file failed"; - return Status::Corruption("open local meta file failed"); - } - - if (reader->GetData() == NULL) { - delete reader; - LOG(WARNING) << "Meta file init error"; - return Status::Corruption("meta file init error"); - } - - uint32_t version = 0; - uint32_t meta_size = 0; - memcpy((char*)(&version), reader->GetData(), sizeof(uint32_t)); - memcpy((char*)(&meta_size), reader->GetData() + sizeof(uint32_t), sizeof(uint32_t)); - char* const buf = new char[meta_size]; - memcpy(buf, reader->GetData() + 2 * sizeof(uint32_t), meta_size); - - InnerMessage::PikaMeta meta; - if (!meta.ParseFromArray(buf, meta_size)) { - delete[] buf; - delete reader; - LOG(WARNING) << "Parse meta string failed"; - return Status::Corruption("parse meta string failed"); - } - delete[] buf; - delete reader; - - table_structs->clear(); - for (int idx = 0; idx < meta.table_infos_size(); ++idx) { - InnerMessage::TableInfo ti = meta.table_infos(idx); - std::set partition_ids; - for (int sidx = 0; sidx < ti.partition_ids_size(); ++sidx) { - partition_ids.insert(ti.partition_ids(sidx)); - } - table_structs->emplace_back(ti.table_name(), ti.partition_num(), partition_ids); - } - return Status::OK(); -} diff --git a/tools/pika_migrate/src/pika_monitor_thread.cc b/tools/pika_migrate/src/pika_monitor_thread.cc deleted file mode 100644 index 746aa09080..0000000000 --- a/tools/pika_migrate/src/pika_monitor_thread.cc +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_monitor_thread.h" - -#include - -PikaMonitorThread::PikaMonitorThread() - : pink::Thread(), - monitor_cond_(&monitor_mutex_protector_) { - set_thread_name("MonitorThread"); - has_monitor_clients_.store(false); -} - -PikaMonitorThread::~PikaMonitorThread() { - set_should_stop(); - if (is_running()) { - monitor_cond_.SignalAll(); - StopThread(); - } - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - ++iter) { - close(iter->fd); - } - LOG(INFO) << "PikaMonitorThread " << pthread_self() << " exit!!!"; -} - -void PikaMonitorThread::AddMonitorClient(std::shared_ptr client_ptr) { - StartThread(); - slash::MutexLock lm(&monitor_mutex_protector_); - monitor_clients_.push_back(ClientInfo{client_ptr->fd(), client_ptr->ip_port(), 0, client_ptr}); - has_monitor_clients_.store(true); -} - -void PikaMonitorThread::RemoveMonitorClient(const std::string& ip_port) { - std::list::iterator iter = monitor_clients_.begin(); - for (; iter != monitor_clients_.end(); ++iter) { - if (ip_port == "all") { - close(iter->fd); - continue; - } - if (iter->ip_port == ip_port) { - close(iter->fd); - break; - } - } - if (ip_port == "all") { - monitor_clients_.clear(); - } else if (iter != monitor_clients_.end()) { - monitor_clients_.erase(iter); - } - has_monitor_clients_.store(!monitor_clients_.empty()); -} - -void PikaMonitorThread::AddMonitorMessage(const std::string &monitor_message) { - slash::MutexLock lm(&monitor_mutex_protector_); - if (monitor_messages_.empty() && cron_tasks_.empty()) { - monitor_messages_.push_back(monitor_message); - monitor_cond_.Signal(); - } else { - monitor_messages_.push_back(monitor_message); - } -} - -int32_t PikaMonitorThread::ThreadClientList(std::vector* clients_ptr) { - if (clients_ptr != NULL) { - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - iter++) { - clients_ptr->push_back(*iter); - } - } - return monitor_clients_.size(); -} - -void PikaMonitorThread::AddCronTask(MonitorCronTask task) { - slash::MutexLock lm(&monitor_mutex_protector_); - if (monitor_messages_.empty() && cron_tasks_.empty()) { - cron_tasks_.push(task); - monitor_cond_.Signal(); - } else { - cron_tasks_.push(task); - } -} - -bool PikaMonitorThread::FindClient(const std::string &ip_port) { - slash::MutexLock lm(&monitor_mutex_protector_); - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - ++iter) { - if (iter->ip_port == ip_port) { - return true; - } - } - return false; -} - -bool PikaMonitorThread::ThreadClientKill(const std::string& ip_port) { - if (is_running()) { - if (ip_port == "all") { - AddCronTask({TASK_KILLALL, "all"}); - } else if (FindClient(ip_port)) { - AddCronTask({TASK_KILL, ip_port}); - } else { - return false; - } - } - return true; -} - -bool PikaMonitorThread::HasMonitorClients() { - return has_monitor_clients_.load(); -} - -pink::WriteStatus PikaMonitorThread::SendMessage(int32_t fd, std::string& message) { - size_t retry = 0; - ssize_t nwritten = 0, message_len_sended = 0, message_len_left = message.size(); - while (message_len_left > 0) { - nwritten = write(fd, message.data() + message_len_sended, message_len_left); - if (nwritten == -1 && errno == EAGAIN) { - // If the write buffer is full, but the client no longer consumes, it will - // get stuck in the loop and cause the entire Pika to block becase of monitor_mutex_protector_. - // So we put a limit on the number of retries - if (++retry >= 10) { - return pink::kWriteError; - } else { - // Sleep one second wait for client consume message - sleep(1); - continue; - } - } else if (nwritten == -1) { - return pink::kWriteError; - } - if (retry > 0) retry = 0; - message_len_sended += nwritten; - message_len_left -= nwritten; - } - return pink::kWriteAll; -} - -void* PikaMonitorThread::ThreadMain() { - std::deque messages_deque; - std::string messages_transfer; - MonitorCronTask task; - pink::WriteStatus write_status; - while (!should_stop()) { - { - slash::MutexLock lm(&monitor_mutex_protector_); - while (monitor_messages_.empty() && cron_tasks_.empty() && !should_stop()) { - monitor_cond_.Wait(); - } - } - if (should_stop()) { - break; - } - { - slash::MutexLock lm(&monitor_mutex_protector_); - while (!cron_tasks_.empty()) { - task = cron_tasks_.front(); - cron_tasks_.pop(); - RemoveMonitorClient(task.ip_port); - if (task.task == TASK_KILLALL) { - std::queue empty_queue; - cron_tasks_.swap(empty_queue); - } - } - } - - messages_deque.clear(); - { - slash::MutexLock lm(&monitor_mutex_protector_); - messages_deque.swap(monitor_messages_); - if (monitor_clients_.empty() || messages_deque.empty()) { - continue; - } - } - messages_transfer = "+"; - for (std::deque::iterator iter = messages_deque.begin(); - iter != messages_deque.end(); - ++iter) { - messages_transfer.append(iter->data(), iter->size()); - messages_transfer.append("\n"); - } - if (messages_transfer == "+") { - continue; - } - messages_transfer.replace(messages_transfer.size()-1, 1, "\r\n", 0, 2); - monitor_mutex_protector_.Lock(); - for (std::list::iterator iter = monitor_clients_.begin(); - iter != monitor_clients_.end(); - ++iter) { - write_status = SendMessage(iter->fd, messages_transfer); - if (write_status == pink::kWriteError) { - cron_tasks_.push({TASK_KILL, iter->ip_port}); - } - } - monitor_mutex_protector_.Unlock(); - } - return NULL; -} diff --git a/tools/pika_migrate/src/pika_partition.cc b/tools/pika_migrate/src/pika_partition.cc deleted file mode 100644 index 5d4c014135..0000000000 --- a/tools/pika_migrate/src/pika_partition.cc +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_partition.h" - -#include - -#include "include/pika_conf.h" -#include "include/pika_server.h" -#include "include/pika_rm.h" - -#include "slash/include/mutex_impl.h" - -extern PikaConf* g_pika_conf; -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -std::string PartitionPath(const std::string& table_path, - uint32_t partition_id) { - char buf[100]; - snprintf(buf, sizeof(buf), "%u/", partition_id); - return table_path + buf; -} - -std::string PartitionName(const std::string& table_name, - uint32_t partition_id) { - char buf[256]; - snprintf(buf, sizeof(buf), "(%s:%u)", table_name.data(), partition_id); - return std::string(buf); -} - -std::string BgsaveSubPath(const std::string& table_name, - uint32_t partition_id) { - char buf[256]; - std::string partition_id_str = std::to_string(partition_id); - snprintf(buf, sizeof(buf), "%s/%s", table_name.data(), partition_id_str.data()); - return std::string(buf); -} - -std::string DbSyncPath(const std::string& sync_path, - const std::string& table_name, - const uint32_t partition_id, - bool classic_mode) { - char buf[256]; - std::string partition_id_str = std::to_string(partition_id); - if (classic_mode) { - snprintf(buf, sizeof(buf), "%s/", table_name.data()); - } else { - snprintf(buf, sizeof(buf), "%s/%s/", table_name.data(), partition_id_str.data()); - } - return sync_path + buf; -} - -Partition::Partition(const std::string& table_name, - uint32_t partition_id, - const std::string& table_db_path, - const std::string& table_log_path) : - table_name_(table_name), - partition_id_(partition_id), - binlog_io_error_(false), - bgsave_engine_(NULL), - purging_(false) { - - db_path_ = g_pika_conf->classic_mode() ? - table_db_path : PartitionPath(table_db_path, partition_id_); - log_path_ = g_pika_conf->classic_mode() ? - table_log_path : PartitionPath(table_log_path, partition_id_); - bgsave_sub_path_ = g_pika_conf->classic_mode() ? - table_name : BgsaveSubPath(table_name_, partition_id_); - dbsync_path_ = DbSyncPath(g_pika_conf->db_sync_path(), table_name_, - partition_id_, g_pika_conf->classic_mode()); - partition_name_ = g_pika_conf->classic_mode() ? - table_name : PartitionName(table_name_, partition_id_); - - pthread_rwlockattr_t attr; - pthread_rwlockattr_init(&attr); - pthread_rwlockattr_setkind_np(&attr, - PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); - - pthread_rwlock_init(&db_rwlock_, &attr); - - db_ = std::shared_ptr(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - - lock_mgr_ = new slash::lock::LockMgr(1000, 0, std::make_shared()); - - opened_ = s.ok() ? true : false; - assert(db_); - assert(s.ok()); - LOG(INFO) << partition_name_ << " DB Success"; - - logger_ = std::shared_ptr( - new Binlog(log_path_, g_pika_conf->binlog_file_size())); -} - -Partition::~Partition() { - Close(); - delete bgsave_engine_; - pthread_rwlock_destroy(&db_rwlock_); - delete lock_mgr_; -} - -void Partition::Leave() { - Close(); - MoveToTrash(); -} - -void Partition::Close() { - if (!opened_) { - return; - } - slash::RWLock rwl(&db_rwlock_, true); - db_.reset(); - logger_.reset(); - opened_ = false; -} - -// Before call this function, should -// close db and log first -void Partition::MoveToTrash() { - if (opened_) { - return; - } - - std::string dbpath = db_path_; - if (dbpath[dbpath.length() - 1] == '/') { - dbpath.erase(dbpath.length() - 1); - } - dbpath.append("_deleting/"); - if (slash::RenameFile(db_path_, dbpath.c_str())) { - LOG(WARNING) << "Failed to move db to trash, error: " << strerror(errno); - return; - } - g_pika_server->PurgeDir(dbpath); - - std::string logpath = log_path_; - if (logpath[logpath.length() - 1] == '/') { - logpath.erase(logpath.length() - 1); - } - logpath.append("_deleting/"); - if (slash::RenameFile(log_path_, logpath.c_str())) { - LOG(WARNING) << "Failed to move log to trash, error: " << strerror(errno); - return; - } - g_pika_server->PurgeDir(logpath); - - LOG(WARNING) << "Partition: " << partition_name_ << " move to trash success"; -} - -std::string Partition::GetTableName() const { - return table_name_; -} - -uint32_t Partition::GetPartitionId() const { - return partition_id_; -} - -std::string Partition::GetPartitionName() const { - return partition_name_; -} - -std::shared_ptr Partition::logger() const { - return logger_; -} - -std::shared_ptr Partition::db() const { - return db_; -} - -Status Partition::WriteBinlog(const std::string& binlog) { - if (!opened_) { - LOG(WARNING) << partition_name_ << " not opened, failed to exec command"; - return Status::Corruption("Partition Not Opened"); - } - slash::Status s; - if (!binlog.empty()) { - s = logger_->Put(binlog); - } - - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " Writing binlog failed, maybe no space left on device"; - SetBinlogIoError(true); - return Status::Corruption("Writing binlog failed, maybe no space left on device"); - } - return Status::OK(); -} - -void Partition::Compact(const blackwidow::DataType& type) { - if (!opened_) return; - db_->Compact(type); -} - -void Partition::DbRWLockWriter() { - pthread_rwlock_wrlock(&db_rwlock_); -} - -void Partition::DbRWLockReader() { - pthread_rwlock_rdlock(&db_rwlock_); -} - -void Partition::DbRWUnLock() { - pthread_rwlock_unlock(&db_rwlock_); -} - -slash::lock::LockMgr* Partition::LockMgr() { - return lock_mgr_; -} - -void Partition::SetBinlogIoError(bool error) { - binlog_io_error_ = error; -} - -bool Partition::IsBinlogIoError() { - return binlog_io_error_; -} - -bool Partition::GetBinlogOffset(BinlogOffset* const boffset) { - if (opened_) { - logger_->GetProducerStatus(&boffset->filenum, &boffset->offset); - return true; - } - return false; -} - -bool Partition::SetBinlogOffset(const BinlogOffset& boffset) { - if (opened_) { - logger_->SetProducerStatus(boffset.filenum, boffset.offset); - return true; - } - return false; -} - -void Partition::PrepareRsync() { - slash::DeleteDirIfExist(dbsync_path_); - slash::CreatePath(dbsync_path_ + "strings"); - slash::CreatePath(dbsync_path_ + "hashes"); - slash::CreatePath(dbsync_path_ + "lists"); - slash::CreatePath(dbsync_path_ + "sets"); - slash::CreatePath(dbsync_path_ + "zsets"); -} - -// Try to update master offset -// This may happend when dbsync from master finished -// Here we do: -// 1, Check dbsync finished, got the new binlog offset -// 2, Replace the old db -// 3, Update master offset, and the PikaAuxiliaryThread cron will connect and do slaveof task with master -bool Partition::TryUpdateMasterOffset() { - std::string info_path = dbsync_path_ + kBgsaveInfoFile; - if (!slash::FileExists(info_path)) { - return false; - } - - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name_, partition_id_)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << partition_name_ << " not exist"; - return false; - } - - // Got new binlog offset - std::ifstream is(info_path); - if (!is) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to open info file after db sync"; - slave_partition->SetReplState(ReplState::kError); - return false; - } - std::string line, master_ip; - int lineno = 0; - int64_t filenum = 0, offset = 0, tmp = 0, master_port = 0; - while (std::getline(is, line)) { - lineno++; - if (lineno == 2) { - master_ip = line; - } else if (lineno > 2 && lineno < 6) { - if (!slash::string2l(line.data(), line.size(), &tmp) || tmp < 0) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Format of info file after db sync error, line : " << line; - is.close(); - slave_partition->SetReplState(ReplState::kError); - return false; - } - if (lineno == 3) { master_port = tmp; } - else if (lineno == 4) { filenum = tmp; } - else { offset = tmp; } - - } else if (lineno > 5) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Format of info file after db sync error, line : " << line; - is.close(); - slave_partition->SetReplState(ReplState::kError); - return false; - } - } - is.close(); - - LOG(INFO) << "Partition: " << partition_name_ << " Information from dbsync info" - << ", master_ip: " << master_ip - << ", master_port: " << master_port - << ", filenum: " << filenum - << ", offset: " << offset; - - // Sanity check - if (master_ip != slave_partition->MasterIp() - || master_port != slave_partition->MasterPort()) { - LOG(WARNING) << "Partition: " << partition_name_ - << " Error master node ip port: " << master_ip << ":" << master_port; - slave_partition->SetReplState(ReplState::kError); - return false; - } - - // Retransmit Data to target redis - g_pika_server->RetransmitData(dbsync_path_); - - slash::DeleteFile(info_path); - if (!ChangeDb(dbsync_path_)) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to change db"; - slave_partition->SetReplState(ReplState::kError); - return false; - } - - // Update master offset - logger_->SetProducerStatus(filenum, offset); - slave_partition->SetReplState(ReplState::kTryConnect); - return true; -} - -/* - * Change a new db locate in new_path - * return true when change success - * db remain the old one if return false - */ -bool Partition::ChangeDb(const std::string& new_path) { - - std::string tmp_path(db_path_); - if (tmp_path.back() == '/') { - tmp_path.resize(tmp_path.size() - 1); - } - tmp_path += "_bak"; - slash::DeleteDirIfExist(tmp_path); - - RWLock l(&db_rwlock_, true); - LOG(INFO) << "Partition: "<< partition_name_ - << ", Prepare change db from: " << tmp_path; - db_.reset(); - - if (0 != slash::RenameFile(db_path_.c_str(), tmp_path)) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to rename db path when change db, error: " << strerror(errno); - return false; - } - - if (0 != slash::RenameFile(new_path.c_str(), db_path_.c_str())) { - LOG(WARNING) << "Partition: " << partition_name_ - << ", Failed to rename new db path when change db, error: " << strerror(errno); - return false; - } - - db_.reset(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - assert(db_); - assert(s.ok()); - slash::DeleteDirIfExist(tmp_path); - LOG(INFO) << "Partition: " << partition_name_ << ", Change db success"; - return true; -} - -bool Partition::IsBgSaving() { - slash::MutexLock ml(&bgsave_protector_); - return bgsave_info_.bgsaving; -} - -void Partition::BgSavePartition() { - slash::MutexLock l(&bgsave_protector_); - if (bgsave_info_.bgsaving) { - return; - } - bgsave_info_.bgsaving = true; - BgTaskArg* bg_task_arg = new BgTaskArg(); - bg_task_arg->partition = shared_from_this(); - g_pika_server->BGSaveTaskSchedule(&DoBgSave, static_cast(bg_task_arg)); -} - -BgSaveInfo Partition::bgsave_info() { - slash::MutexLock l(&bgsave_protector_); - return bgsave_info_; -} - -void Partition::DoBgSave(void* arg) { - BgTaskArg* bg_task_arg = static_cast(arg); - - // Do BgSave - bool success = bg_task_arg->partition->RunBgsaveEngine(); - - // Some output - BgSaveInfo info = bg_task_arg->partition->bgsave_info(); - std::ofstream out; - out.open(info.path + "/" + kBgsaveInfoFile, std::ios::in | std::ios::trunc); - if (out.is_open()) { - out << (time(NULL) - info.start_time) << "s\n" - << g_pika_server->host() << "\n" - << g_pika_server->port() << "\n" - << info.filenum << "\n" - << info.offset << "\n"; - out.close(); - } - if (!success) { - std::string fail_path = info.path + "_FAILED"; - slash::RenameFile(info.path.c_str(), fail_path.c_str()); - } - bg_task_arg->partition->FinishBgsave(); - - delete bg_task_arg; -} - -bool Partition::RunBgsaveEngine() { - // Prepare for Bgsaving - if (!InitBgsaveEnv() || !InitBgsaveEngine()) { - ClearBgsave(); - return false; - } - LOG(INFO) << partition_name_ << " after prepare bgsave"; - - BgSaveInfo info = bgsave_info(); - LOG(INFO) << partition_name_ << " bgsave_info: path=" << info.path - << ", filenum=" << info.filenum - << ", offset=" << info.offset; - - // Backup to tmp dir - rocksdb::Status s = bgsave_engine_->CreateNewBackup(info.path); - LOG(INFO) << partition_name_ << " create new backup finished."; - - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " create new backup failed :" << s.ToString(); - return false; - } - return true; -} - -// Prepare engine, need bgsave_protector protect -bool Partition::InitBgsaveEnv() { - slash::MutexLock l(&bgsave_protector_); - // Prepare for bgsave dir - bgsave_info_.start_time = time(NULL); - char s_time[32]; - int len = strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgsave_info_.start_time)); - bgsave_info_.s_start_time.assign(s_time, len); - std::string time_sub_path = g_pika_conf->bgsave_prefix() + std::string(s_time, 8); - bgsave_info_.path = g_pika_conf->bgsave_path() + time_sub_path + "/" + bgsave_sub_path_; - if (!slash::DeleteDirIfExist(bgsave_info_.path)) { - LOG(WARNING) << partition_name_ << " remove exist bgsave dir failed"; - return false; - } - slash::CreatePath(bgsave_info_.path, 0755); - // Prepare for failed dir - if (!slash::DeleteDirIfExist(bgsave_info_.path + "_FAILED")) { - LOG(WARNING) << partition_name_ << " remove exist fail bgsave dir failed :"; - return false; - } - return true; -} - -// Prepare bgsave env, need bgsave_protector protect -bool Partition::InitBgsaveEngine() { - delete bgsave_engine_; - rocksdb::Status s = blackwidow::BackupEngine::Open(db().get(), &bgsave_engine_); - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " open backup engine failed " << s.ToString(); - return false; - } - - { - RWLock l(&db_rwlock_, true); - { - slash::MutexLock l(&bgsave_protector_); - logger_->GetProducerStatus(&bgsave_info_.filenum, &bgsave_info_.offset); - } - s = bgsave_engine_->SetBackupContent(); - if (!s.ok()) { - LOG(WARNING) << partition_name_ << " set backup content failed " << s.ToString(); - return false; - } - } - return true; -} - -void Partition::ClearBgsave() { - slash::MutexLock l(&bgsave_protector_); - bgsave_info_.Clear(); -} - -void Partition::FinishBgsave() { - slash::MutexLock l(&bgsave_protector_); - bgsave_info_.bgsaving = false; -} - -bool Partition::FlushDB() { - slash::RWLock rwl(&db_rwlock_, true); - slash::MutexLock ml(&bgsave_protector_); - if (bgsave_info_.bgsaving) { - return false; - } - - LOG(INFO) << partition_name_ << " Delete old db..."; - db_.reset(); - - std::string dbpath = db_path_; - if (dbpath[dbpath.length() - 1] == '/') { - dbpath.erase(dbpath.length() - 1); - } - dbpath.append("_deleting/"); - slash::RenameFile(db_path_, dbpath.c_str()); - - db_ = std::shared_ptr(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - assert(db_); - assert(s.ok()); - LOG(INFO) << partition_name_ << " Open new db success"; - g_pika_server->PurgeDir(dbpath); - return true; -} - -bool Partition::FlushSubDB(const std::string& db_name) { - slash::RWLock rwl(&db_rwlock_, true); - slash::MutexLock ml(&bgsave_protector_); - if (bgsave_info_.bgsaving) { - return false; - } - - LOG(INFO) << partition_name_ << " Delete old " + db_name + " db..."; - db_.reset(); - - std::string dbpath = db_path_; - if (dbpath[dbpath.length() - 1] != '/') { - dbpath.append("/"); - } - - std::string sub_dbpath = dbpath + db_name; - std::string del_dbpath = dbpath + db_name + "_deleting"; - slash::RenameFile(sub_dbpath, del_dbpath); - - db_ = std::shared_ptr(new blackwidow::BlackWidow()); - rocksdb::Status s = db_->Open(g_pika_server->bw_options(), db_path_); - assert(db_); - assert(s.ok()); - LOG(INFO) << partition_name_ << " open new " + db_name + " db success"; - g_pika_server->PurgeDir(del_dbpath); - return true; -} - -bool Partition::PurgeLogs(uint32_t to, bool manual) { - // Only one thread can go through - bool expect = false; - if (!purging_.compare_exchange_strong(expect, true)) { - LOG(WARNING) << "purge process already exist"; - return false; - } - PurgeArg *arg = new PurgeArg(); - arg->to = to; - arg->manual = manual; - arg->partition = shared_from_this(); - g_pika_server->PurgelogsTaskSchedule(&DoPurgeLogs, static_cast(arg)); - return true; -} - -void Partition::ClearPurge() { - purging_ = false; -} - -void Partition::DoPurgeLogs(void* arg) { - PurgeArg* purge = static_cast(arg); - purge->partition->PurgeFiles(purge->to, purge->manual); - purge->partition->ClearPurge(); - delete (PurgeArg*)arg; -} - -bool Partition::PurgeFiles(uint32_t to, bool manual) { - std::map binlogs; - if (!GetBinlogFiles(binlogs)) { - LOG(WARNING) << partition_name_ << " Could not get binlog files!"; - return false; - } - - int delete_num = 0; - struct stat file_stat; - int remain_expire_num = binlogs.size() - g_pika_conf->expire_logs_nums(); - std::map::iterator it; - for (it = binlogs.begin(); it != binlogs.end(); ++it) { - if ((manual && it->first <= to) // Manual purgelogsto - || (remain_expire_num > 0) // Expire num trigger - || (binlogs.size() - delete_num > 10 // At lease remain 10 files - && stat(((log_path_ + it->second)).c_str(), &file_stat) == 0 - && file_stat.st_mtime < time(NULL) - g_pika_conf->expire_logs_days() * 24 * 3600)) { // Expire time trigger - // We check this every time to avoid lock when we do file deletion - if (!g_pika_rm->BinlogCloudPurgeFromSMP(table_name_, partition_id_, it->first)) { - LOG(WARNING) << partition_name_ << " Could not purge "<< (it->first) << ", since it is already be used"; - return false; - } - - // Do delete - slash::Status s = slash::DeleteFile(log_path_ + it->second); - if (s.ok()) { - ++delete_num; - --remain_expire_num; - } else { - LOG(WARNING) << partition_name_ << " Purge log file : " << (it->second) << " failed! error:" << s.ToString(); - } - } else { - // Break when face the first one not satisfied - // Since the binlogs is order by the file index - break; - } - } - if (delete_num) { - LOG(INFO) << partition_name_ << " Success purge "<< delete_num; - } - return true; -} - -bool Partition::GetBinlogFiles(std::map& binlogs) { - std::vector children; - int ret = slash::GetChildren(log_path_, children); - if (ret != 0) { - LOG(WARNING) << partition_name_ << " Get all files in log path failed! error:" << ret; - return false; - } - - int64_t index = 0; - std::string sindex; - std::vector::iterator it; - for (it = children.begin(); it != children.end(); ++it) { - if ((*it).compare(0, kBinlogPrefixLen, kBinlogPrefix) != 0) { - continue; - } - sindex = (*it).substr(kBinlogPrefixLen); - if (slash::string2l(sindex.c_str(), sindex.size(), &index) == 1) { - binlogs.insert(std::pair(static_cast(index), *it)); - } - } - return true; -} - -void Partition::InitKeyScan() { - key_scan_info_.start_time = time(NULL); - char s_time[32]; - int len = strftime(s_time, sizeof(s_time), "%Y-%m-%d %H:%M:%S", localtime(&key_scan_info_.start_time)); - key_scan_info_.s_start_time.assign(s_time, len); - key_scan_info_.duration = -1; // duration -1 mean the task in processing -} - -KeyScanInfo Partition::GetKeyScanInfo() { - slash::MutexLock l(&key_info_protector_); - return key_scan_info_; -} - -Status Partition::GetKeyNum(std::vector* key_info) { - slash::MutexLock l(&key_info_protector_); - if (key_scan_info_.key_scaning_) { - *key_info = key_scan_info_.key_infos; - return Status::OK(); - } - InitKeyScan(); - key_scan_info_.key_scaning_ = true; - key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, - // has not been scheduled for exec - rocksdb::Status s = db_->GetKeyNum(key_info); - if (!s.ok()) { - return Status::Corruption(s.ToString()); - } - key_scan_info_.key_infos = *key_info; - key_scan_info_.duration = time(NULL) - key_scan_info_.start_time; - key_scan_info_.key_scaning_ = false; - return Status::OK(); -} diff --git a/tools/pika_migrate/src/pika_pubsub.cc b/tools/pika_migrate/src/pika_pubsub.cc deleted file mode 100644 index c3a0127d3f..0000000000 --- a/tools/pika_migrate/src/pika_pubsub.cc +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_pubsub.h" - -#include "include/pika_server.h" - -extern PikaServer *g_pika_server; - - -static std::string ConstructPubSubResp( - const std::string& cmd, - const std::vector>& result) { - std::stringstream resp; - if (result.size() == 0) { - resp << "*3\r\n" << "$" << cmd.length() << "\r\n" << cmd << "\r\n" << - "$" << -1 << "\r\n" << ":" << 0 << "\r\n"; - } - for (auto it = result.begin(); it != result.end(); it++) { - resp << "*3\r\n" << "$" << cmd.length() << "\r\n" << cmd << "\r\n" << - "$" << it->first.length() << "\r\n" << it->first << "\r\n" << - ":" << it->second << "\r\n"; - } - return resp.str(); -} - - -void PublishCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePublish); - return; - } - channel_ = argv_[1]; - msg_ = argv_[2]; -} - -void PublishCmd::Do(std::shared_ptr partition) { - int receivers = g_pika_server->Publish(channel_, msg_); - res_.AppendInteger(receivers); - return; -} - -void SubscribeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSubscribe); - return; - } -} - -void SubscribeCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = GetConn(); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNameSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - - if (!cli_conn->IsPubSub()) { - cli_conn->server_thread()->MoveConnOut(conn->fd()); - } - std::vector channels; - for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); - } - std::vector> result; - cli_conn->SetIsPubSub(true); - cli_conn->SetHandleType(pink::HandleType::kSynchronous); - g_pika_server->Subscribe(conn, channels, name_ == kCmdNamePSubscribe, &result); - return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); -} - -void UnSubscribeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameUnSubscribe); - return; - } -} - -void UnSubscribeCmd::Do(std::shared_ptr partition) { - std::vector channels; - for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); - } - - std::shared_ptr conn = GetConn(); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNameUnSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - - std::vector> result; - int subscribed = g_pika_server->UnSubscribe(conn, channels, name_ == kCmdNamePUnSubscribe, &result); - if (subscribed == 0 && cli_conn->IsPubSub()) { - /* - * if the number of client subscribed is zero, - * the client will exit the Pub/Sub state - */ - cli_conn->server_thread()->HandleNewConn(conn->fd(), conn->ip_port()); - cli_conn->SetIsPubSub(false); - } - return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); -} - -void PSubscribeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePSubscribe); - return; - } -} - -void PSubscribeCmd::Do(std::shared_ptr partition) { - std::shared_ptr conn = GetConn(); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNamePSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - - if (!cli_conn->IsPubSub()) { - cli_conn->server_thread()->MoveConnOut(conn->fd()); - } - std::vector channels; - for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); - } - std::vector> result; - cli_conn->SetIsPubSub(true); - cli_conn->SetHandleType(pink::HandleType::kSynchronous); - g_pika_server->Subscribe(conn, channels, name_ == kCmdNamePSubscribe, &result); - return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); -} - -void PUnSubscribeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePUnSubscribe); - return; - } -} - -void PUnSubscribeCmd::Do(std::shared_ptr partition) { - std::vector channels; - for (size_t i = 1; i < argv_.size(); i++) { - channels.push_back(argv_[i]); - } - - std::shared_ptr conn = GetConn(); - if (!conn) { - res_.SetRes(CmdRes::kErrOther, kCmdNamePUnSubscribe); - LOG(WARNING) << name_ << " weak ptr is empty"; - return; - } - std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); - - std::vector> result; - int subscribed = g_pika_server->UnSubscribe(conn, channels, name_ == kCmdNamePUnSubscribe, &result); - if (subscribed == 0 && cli_conn->IsPubSub()) { - /* - * if the number of client subscribed is zero, - * the client will exit the Pub/Sub state - */ - cli_conn->server_thread()->HandleNewConn(conn->fd(), conn->ip_port()); - cli_conn->SetIsPubSub(false); - } - return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); -} - -void PubSubCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNamePubSub); - return; - } - subcommand_ = argv_[1]; - if (strcasecmp(subcommand_.data(), "channels") - && strcasecmp(subcommand_.data(), "numsub") - && strcasecmp(subcommand_.data(), "numpat")) { - res_.SetRes(CmdRes::kErrOther, "Unknown PUBSUB subcommand or wrong number of arguments for '" + subcommand_ + "'"); - } - for (size_t i = 2; i < argv_.size(); i++) { - arguments_.push_back(argv_[i]); - } -} - -void PubSubCmd::Do(std::shared_ptr partition) { - if (!strcasecmp(subcommand_.data(), "channels")) { - std::string pattern = ""; - std::vector result; - if (arguments_.size() == 1) { - pattern = arguments_[0]; - } else if (arguments_.size() > 1) { - res_.SetRes(CmdRes::kErrOther, "Unknown PUBSUB subcommand or wrong number of arguments for '" + subcommand_ + "'"); - return; - } - g_pika_server->PubSubChannels(pattern, &result); - - res_.AppendArrayLen(result.size()); - for (auto it = result.begin(); it != result.end(); ++it) { - res_.AppendStringLen((*it).length()); - res_.AppendContent(*it); - } - } else if (!strcasecmp(subcommand_.data(), "numsub")) { - std::vector> result; - g_pika_server->PubSubNumSub(arguments_, &result); - res_.AppendArrayLen(result.size() * 2); - for (auto it = result.begin(); it != result.end(); ++it) { - res_.AppendStringLen(it->first.length()); - res_.AppendContent(it->first); - res_.AppendInteger(it->second); - } - return; - } else if (!strcasecmp(subcommand_.data(), "numpat")) { - int subscribed = g_pika_server->PubSubNumPat(); - res_.AppendInteger(subscribed); - } - return; -} - diff --git a/tools/pika_migrate/src/pika_repl_bgworker.cc b/tools/pika_migrate/src/pika_repl_bgworker.cc deleted file mode 100644 index f68db2d288..0000000000 --- a/tools/pika_migrate/src/pika_repl_bgworker.cc +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_repl_bgworker.h" - -#include - -#include "pink/include/redis_cli.h" - -#include "include/pika_rm.h" -#include "include/pika_conf.h" -#include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaConf* g_pika_conf; -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; - -PikaReplBgWorker::PikaReplBgWorker(int queue_size) - : bg_thread_(queue_size) { - bg_thread_.set_thread_name("ReplBgWorker"); - pink::RedisParserSettings settings; - settings.DealMessage = &(PikaReplBgWorker::HandleWriteBinlog); - redis_parser_.RedisParserInit(REDIS_PARSER_REQUEST, settings); - redis_parser_.data = this; - table_name_ = g_pika_conf->default_table(); - partition_id_ = 0; - -} - -PikaReplBgWorker::~PikaReplBgWorker() { -} - -int PikaReplBgWorker::StartThread() { - return bg_thread_.StartThread(); -} - -int PikaReplBgWorker::StopThread() { - return bg_thread_.StopThread(); -} - -void PikaReplBgWorker::Schedule(pink::TaskFunc func, void* arg) { - bg_thread_.Schedule(func, arg); -} - -void PikaReplBgWorker::QueueClear() { - bg_thread_.QueueClear(); -} - -void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { - ReplClientWriteBinlogTaskArg* task_arg = static_cast(arg); - const std::shared_ptr res = task_arg->res; - std::shared_ptr conn = task_arg->conn; - std::vector* index = static_cast* >(task_arg->res_private_data); - PikaReplBgWorker* worker = task_arg->worker; - worker->ip_port_ = conn->ip_port(); - - std::string table_name; - uint32_t partition_id = 0; - BinlogOffset ack_start, ack_end; - // find the first not keepalive binlogsync - for (size_t i = 0; i < index->size(); ++i) { - const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync((*index)[i]); - if (i == 0) { - table_name = binlog_res.partition().table_name(); - partition_id = binlog_res.partition().partition_id(); - } - if (!binlog_res.binlog().empty()) { - ack_start.filenum = binlog_res.binlog_offset().filenum(); - ack_start.offset = binlog_res.binlog_offset().offset(); - break; - } - } - worker->table_name_ = table_name; - worker->partition_id_ = partition_id; - - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition " << table_name << "_" << partition_id << " Not Found"; - delete index; - delete task_arg; - return; - } - - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition " << table_name << "_" << partition_id << " Not Found"; - delete index; - delete task_arg; - return; - } - - for (size_t i = 0; i < index->size(); ++i) { - const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync((*index)[i]); - // if pika are not current a slave or partition not in - // BinlogSync state, we drop remain write binlog task - if ((g_pika_conf->classic_mode() && !(g_pika_server->role() & PIKA_ROLE_SLAVE)) - || ((slave_partition->State() != ReplState::kConnected) - && (slave_partition->State() != ReplState::kWaitDBSync))) { - delete index; - delete task_arg; - return; - } - - if (!g_pika_rm->CheckSlavePartitionSessionId( - binlog_res.partition().table_name(), - binlog_res.partition().partition_id(), - binlog_res.session_id())) { - LOG(WARNING) << "Check Session failed " - << binlog_res.partition().table_name() - << "_" << binlog_res.partition().partition_id(); - slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; - return; - } - - // empty binlog treated as keepalive packet - if (binlog_res.binlog().empty()) { - continue; - } - if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog_res.binlog(), &worker->binlog_item_)) { - LOG(WARNING) << "Binlog item decode failed"; - slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; - return; - } - const char* redis_parser_start = binlog_res.binlog().data() + BINLOG_ENCODE_LEN; - int redis_parser_len = static_cast(binlog_res.binlog().size()) - BINLOG_ENCODE_LEN; - int processed_len = 0; - pink::RedisParserStatus ret = worker->redis_parser_.ProcessInputBuffer( - redis_parser_start, redis_parser_len, &processed_len); - if (ret != pink::kRedisParserDone) { - LOG(WARNING) << "Redis parser failed"; - slave_partition->SetReplState(ReplState::kTryConnect); - delete index; - delete task_arg; - return; - } - } - delete index; - delete task_arg; - - // Reply Ack to master immediately - std::shared_ptr logger = partition->logger(); - logger->GetProducerStatus(&ack_end.filenum, &ack_end.offset); - // keepalive case - if (ack_start == BinlogOffset()) { - // set ack_end as 0 - ack_end = ack_start; - } - g_pika_rm->SendPartitionBinlogSyncAckRequest(table_name, partition_id, ack_start, ack_end); -} - -int PikaReplBgWorker::HandleWriteBinlog(pink::RedisParser* parser, const pink::RedisCmdArgsType& argv) { - PikaReplBgWorker* worker = static_cast(parser->data); - const BinlogItem& binlog_item = worker->binlog_item_; - g_pika_server->UpdateQueryNumAndExecCountTable(argv[0]); - - // Monitor related - std::string monitor_message; - if (g_pika_server->HasMonitorClients()) { - std::string table_name = g_pika_conf->classic_mode() - ? worker->table_name_.substr(2) : worker->table_name_; - std::string monitor_message = std::to_string(1.0 * slash::NowMicros() / 1000000) - + " [" + table_name + " " + worker->ip_port_ + "]"; - for (const auto& item : argv) { - monitor_message += " " + slash::ToRead(item); - } - g_pika_server->AddMonitorMessage(monitor_message); - } - - std::string opt = argv[0]; - std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(slash::StringToLower(opt)); - if (!c_ptr) { - LOG(WARNING) << "Command " << opt << " not in the command table"; - return -1; - } - // Initial - c_ptr->Initial(argv, worker->table_name_); - if (!c_ptr->res().ok()) { - LOG(WARNING) << "Fail to initial command from binlog: " << opt; - return -1; - } - - std::shared_ptr partition = g_pika_server->GetTablePartitionById(worker->table_name_, worker->partition_id_); - std::shared_ptr logger = partition->logger(); - - logger->Lock(); - logger->Put(c_ptr->ToBinlog(binlog_item.exec_time(), - std::to_string(binlog_item.server_id()), - binlog_item.logic_id(), - binlog_item.filenum(), - binlog_item.offset())); - uint32_t filenum; - uint64_t offset; - logger->GetProducerStatus(&filenum, &offset); - logger->Unlock(); - - PikaCmdArgsType *v = new PikaCmdArgsType(argv); - BinlogItem *b = new BinlogItem(binlog_item); - std::string dispatch_key = argv.size() >= 2 ? argv[1] : argv[0]; - g_pika_rm->ScheduleWriteDBTask(dispatch_key, v, b, worker->table_name_, worker->partition_id_); - return 0; -} - -void PikaReplBgWorker::HandleBGWorkerWriteDB(void* arg) { - ReplClientWriteDBTaskArg* task_arg = static_cast(arg); - PikaCmdArgsType* argv = task_arg->argv; - BinlogItem binlog_item = *(task_arg->binlog_item); - std::string table_name = task_arg->table_name; - uint32_t partition_id = task_arg->partition_id; - std::string opt = (*argv)[0]; - slash::StringToLower(opt); - - // Get command - std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(slash::StringToLower(opt)); - if (!c_ptr) { - LOG(WARNING) << "Error operation from binlog: " << opt; - delete task_arg; - return; - } - - // Initial - c_ptr->Initial(*argv, table_name); - if (!c_ptr->res().ok()) { - LOG(WARNING) << "Fail to initial command from binlog: " << opt; - delete task_arg; - return; - } - - uint64_t start_us = 0; - if (g_pika_conf->slowlog_slower_than() >= 0) { - start_us = slash::NowMicros(); - } - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - - if (strcmp(table_name.data(), "db0") || partition_id != 0) { - LOG(FATAL) << "table_name: " << table_name << ", partition_id: " - << std::to_string(partition_id) << ", but only single DB data is support transfer"; - return; - } - - /* convert Pika custom command to Redis standard command */ - if (!strcasecmp((*argv)[0].data(), "pksetexat")) { - if (argv->size() != 4) { - LOG(WARNING) << "find invaild command, command size: " << argv->size(); - return; - } else { - std::string key = (*argv)[1]; - int timestamp = std::atoi((*argv)[2].data()); - std::string value = (*argv)[3]; - - int seconds = timestamp - time(NULL); - PikaCmdArgsType tmp_argv; - tmp_argv.push_back("setex"); - tmp_argv.push_back(key); - tmp_argv.push_back(std::to_string(seconds)); - tmp_argv.push_back(value); - - std::string command; - pink::SerializeRedisCommand(tmp_argv, &command); - g_pika_server->SendRedisCommand(command, key); - } - } else { - std::string key = argv->size() > 1 ? (*argv)[1] : ""; - std::string command; - pink::SerializeRedisCommand(*argv, &command); - g_pika_server->SendRedisCommand(command, key); - } - - // Add read lock for no suspend command - if (!c_ptr->is_suspend()) { - partition->DbRWLockReader(); - } - - c_ptr->Do(partition); - - if (!c_ptr->is_suspend()) { - partition->DbRWUnLock(); - } - - if (g_pika_conf->slowlog_slower_than() >= 0) { - int32_t start_time = start_us / 1000000; - int64_t duration = slash::NowMicros() - start_us; - if (duration > g_pika_conf->slowlog_slower_than()) { - g_pika_server->SlowlogPushEntry(*argv, start_time, duration); - if (g_pika_conf->slowlog_write_errorlog()) { - LOG(ERROR) << "command: " << opt << ", start_time(s): " << start_time << ", duration(us): " << duration; - } - } - } - delete task_arg; -} - diff --git a/tools/pika_migrate/src/pika_repl_client.cc b/tools/pika_migrate/src/pika_repl_client.cc deleted file mode 100644 index 5c78dfab7d..0000000000 --- a/tools/pika_migrate/src/pika_repl_client.cc +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_repl_client.h" - -#include -#include -#include - -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" -#include "slash/include/slash_coding.h" -#include "slash/include/env.h" -#include "slash/include/slash_string.h" - -#include "include/pika_rm.h" -#include "include/pika_server.h" - -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -PikaReplClient::PikaReplClient(int cron_interval, int keepalive_timeout) : next_avail_(0) { - client_thread_ = new PikaReplClientThread(cron_interval, keepalive_timeout); - client_thread_->set_thread_name("PikaReplClient"); - for (int i = 0; i < 2 * g_pika_conf->sync_thread_num(); ++i) { - bg_workers_.push_back(new PikaReplBgWorker(PIKA_SYNC_BUFFER_SIZE)); - } -} - -PikaReplClient::~PikaReplClient() { - client_thread_->StopThread(); - delete client_thread_; - for (size_t i = 0; i < bg_workers_.size(); ++i) { - delete bg_workers_[i]; - } - LOG(INFO) << "PikaReplClient exit!!!"; -} - -int PikaReplClient::Start() { - int res = client_thread_->StartThread(); - if (res != pink::kSuccess) { - LOG(FATAL) << "Start ReplClient ClientThread Error: " << res << (res == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } - for (size_t i = 0; i < bg_workers_.size(); ++i) { - res = bg_workers_[i]->StartThread(); - if (res != pink::kSuccess) { - LOG(FATAL) << "Start Pika Repl Worker Thread Error: " << res - << (res == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } - } - return res; -} - -int PikaReplClient::Stop() { - client_thread_->StopThread(); - for (size_t i = 0; i < bg_workers_.size(); ++i) { - bg_workers_[i]->StopThread(); - } - return 0; -} - -void PikaReplClient::Schedule(pink::TaskFunc func, void* arg) { - bg_workers_[next_avail_]->Schedule(func, arg); - UpdateNextAvail(); -} - -void PikaReplClient::ScheduleWriteBinlogTask(std::string table_partition, - const std::shared_ptr res, - std::shared_ptr conn, void* res_private_data) { - size_t index = GetHashIndex(table_partition, true); - ReplClientWriteBinlogTaskArg* task_arg = - new ReplClientWriteBinlogTaskArg(res, conn, res_private_data, bg_workers_[index]); - bg_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteBinlog, static_cast(task_arg)); -} - -void PikaReplClient::ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id) { - size_t index = GetHashIndex(dispatch_key, false); - ReplClientWriteDBTaskArg* task_arg = - new ReplClientWriteDBTaskArg(argv, binlog_item, table_name, partition_id); - bg_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteDB, static_cast(task_arg)); -} - -size_t PikaReplClient::GetHashIndex(std::string key, bool upper_half) { - size_t hash_base = bg_workers_.size() / 2; - return (str_hash(key) % hash_base) + (upper_half ? 0 : hash_base); -} - -Status PikaReplClient::Write(const std::string& ip, const int port, const std::string& msg) { - return client_thread_->Write(ip, port, msg); -} - -Status PikaReplClient::Close(const std::string& ip, const int port) { - return client_thread_->Close(ip, port); -} - - -Status PikaReplClient::SendMetaSync() { - std::string local_ip; - pink::PinkCli* cli = pink::NewRedisCli(); - cli->set_connect_timeout(1500); - if ((cli->Connect(g_pika_server->master_ip(), g_pika_server->master_port(), "")).ok()) { - struct sockaddr_in laddr; - socklen_t llen = sizeof(laddr); - getsockname(cli->fd(), (struct sockaddr*) &laddr, &llen); - std::string tmp_local_ip(inet_ntoa(laddr.sin_addr)); - local_ip = tmp_local_ip; - cli->Close(); - delete cli; - } else { - LOG(WARNING) << "Failed to connect master, Master (" - << g_pika_server->master_ip() << ":" << g_pika_server->master_port() << "), try reconnect"; - // Sleep three seconds to avoid frequent try Meta Sync - // when the connection fails - sleep(3); - g_pika_server->ResetMetaSyncStatus(); - delete cli; - return Status::Corruption("Connect master error"); - } - - InnerMessage::InnerRequest request; - request.set_type(InnerMessage::kMetaSync); - InnerMessage::InnerRequest::MetaSync* meta_sync = request.mutable_meta_sync(); - InnerMessage::Node* node = meta_sync->mutable_node(); - node->set_ip(local_ip); - node->set_port(g_pika_server->port()); - - std::string masterauth = g_pika_conf->masterauth(); - if (!masterauth.empty()) { - meta_sync->set_auth(masterauth); - } - - std::string to_send; - std::string master_ip = g_pika_server->master_ip(); - int master_port = g_pika_server->master_port(); - if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Meta Sync Request Failed, to Master (" - << master_ip << ":" << master_port << ")"; - return Status::Corruption("Serialize Failed"); - } - - LOG(INFO) << "Try Send Meta Sync Request to Master (" - << master_ip << ":" << master_port << ")"; - return client_thread_->Write(master_ip, master_port + kPortShiftReplServer, to_send); -} - -Status PikaReplClient::SendPartitionDBSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip) { - InnerMessage::InnerRequest request; - request.set_type(InnerMessage::kDBSync); - InnerMessage::InnerRequest::DBSync* db_sync = request.mutable_db_sync(); - InnerMessage::Node* node = db_sync->mutable_node(); - node->set_ip(local_ip); - node->set_port(g_pika_server->port()); - InnerMessage::Partition* partition = db_sync->mutable_partition(); - partition->set_table_name(table_name); - partition->set_partition_id(partition_id); - - InnerMessage::BinlogOffset* binlog_offset = db_sync->mutable_binlog_offset(); - binlog_offset->set_filenum(boffset.filenum); - binlog_offset->set_offset(boffset.offset); - - std::string to_send; - if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Partition DBSync Request Failed, to Master (" - << ip << ":" << port << ")"; - return Status::Corruption("Serialize Failed"); - } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); -} - - -Status PikaReplClient::SendPartitionTrySync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& boffset, - const std::string& local_ip) { - InnerMessage::InnerRequest request; - request.set_type(InnerMessage::kTrySync); - InnerMessage::InnerRequest::TrySync* try_sync = request.mutable_try_sync(); - InnerMessage::Node* node = try_sync->mutable_node(); - node->set_ip(local_ip); - node->set_port(g_pika_server->port()); - InnerMessage::Partition* partition = try_sync->mutable_partition(); - partition->set_table_name(table_name); - partition->set_partition_id(partition_id); - - InnerMessage::BinlogOffset* binlog_offset = try_sync->mutable_binlog_offset(); - binlog_offset->set_filenum(boffset.filenum); - binlog_offset->set_offset(boffset.offset); - - std::string to_send; - if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Partition TrySync Request Failed, to Master (" - << ip << ":" << port << ")"; - return Status::Corruption("Serialize Failed"); - } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); -} - -Status PikaReplClient::SendPartitionBinlogSync(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const BinlogOffset& ack_start, - const BinlogOffset& ack_end, - const std::string& local_ip, - bool is_first_send) { - InnerMessage::InnerRequest request; - request.set_type(InnerMessage::kBinlogSync); - InnerMessage::InnerRequest::BinlogSync* binlog_sync = request.mutable_binlog_sync(); - InnerMessage::Node* node = binlog_sync->mutable_node(); - node->set_ip(local_ip); - node->set_port(g_pika_server->port()); - binlog_sync->set_table_name(table_name); - binlog_sync->set_partition_id(partition_id); - binlog_sync->set_first_send(is_first_send); - - InnerMessage::BinlogOffset* ack_range_start = binlog_sync->mutable_ack_range_start(); - ack_range_start->set_filenum(ack_start.filenum); - ack_range_start->set_offset(ack_start.offset); - - InnerMessage::BinlogOffset* ack_range_end = binlog_sync->mutable_ack_range_end(); - ack_range_end->set_filenum(ack_end.filenum); - ack_range_end->set_offset(ack_end.offset); - - int32_t session_id = g_pika_rm->GetSlavePartitionSessionId(table_name, partition_id); - binlog_sync->set_session_id(session_id); - - std::string to_send; - if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Partition BinlogSync Request Failed, to Master (" - << ip << ":" << port << ")"; - return Status::Corruption("Serialize Failed"); - } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); -} - -Status PikaReplClient::SendRemoveSlaveNode(const std::string& ip, - uint32_t port, - const std::string& table_name, - uint32_t partition_id, - const std::string& local_ip) { - InnerMessage::InnerRequest request; - request.set_type(InnerMessage::kRemoveSlaveNode); - InnerMessage::InnerRequest::RemoveSlaveNode* remove_slave_node = - request.add_remove_slave_node(); - InnerMessage::Node* node = remove_slave_node->mutable_node(); - node->set_ip(local_ip); - node->set_port(g_pika_server->port()); - - InnerMessage::Partition* partition = remove_slave_node->mutable_partition(); - partition->set_table_name(table_name); - partition->set_partition_id(partition_id); - - std::string to_send; - if (!request.SerializeToString(&to_send)) { - LOG(WARNING) << "Serialize Remove Slave Node Failed, to Master (" - << ip << ":" << port << "), " << table_name << "_" << partition_id; - return Status::Corruption("Serialize Failed"); - } - return client_thread_->Write(ip, port + kPortShiftReplServer, to_send); -} diff --git a/tools/pika_migrate/src/pika_repl_client_conn.cc b/tools/pika_migrate/src/pika_repl_client_conn.cc deleted file mode 100644 index dce825afbf..0000000000 --- a/tools/pika_migrate/src/pika_repl_client_conn.cc +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_repl_client_conn.h" - -#include - -#include "include/pika_server.h" -#include "include/pika_rm.h" -#include "slash/include/slash_string.h" - -#include "include/pika_rm.h" -#include "include/pika_server.h" - -extern PikaConf* g_pika_conf; -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -PikaReplClientConn::PikaReplClientConn(int fd, - const std::string& ip_port, - pink::Thread* thread, - void* worker_specific_data, - pink::PinkEpoll* epoll) - : pink::PbConn(fd, ip_port, thread, epoll) { -} - -bool PikaReplClientConn::IsTableStructConsistent( - const std::vector& current_tables, - const std::vector& expect_tables) { - if (current_tables.size() != expect_tables.size()) { - return false; - } - for (const auto& table_struct : current_tables) { - if (find(expect_tables.begin(), expect_tables.end(), - table_struct) == expect_tables.end()) { - return false; - } - } - return true; -} - -int PikaReplClientConn::DealMessage() { - std::shared_ptr response = std::make_shared(); - response->ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); - switch (response->type()) { - case InnerMessage::kMetaSync: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleMetaSyncResponse, static_cast(task_arg)); - break; - } - case InnerMessage::kDBSync: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleDBSyncResponse, static_cast(task_arg)); - break; - } - case InnerMessage::kTrySync: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleTrySyncResponse, static_cast(task_arg)); - break; - } - case InnerMessage::kBinlogSync: - { - DispatchBinlogRes(response); - break; - } - case InnerMessage::kRemoveSlaveNode: - { - ReplClientTaskArg* task_arg = new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleRemoveSlaveNodeResponse, static_cast(task_arg)); - break; - } - default: - break; - } - return 0; -} - -void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; - std::shared_ptr response = task_arg->res; - - if (response->code() != InnerMessage::kOk) { - std::string reply = response->has_reply() ? response->reply() : ""; - LOG(WARNING) << "Meta Sync Failed: " << reply; - g_pika_server->SyncError(); - conn->NotifyClose(); - delete task_arg; - return; - } - - const InnerMessage::InnerResponse_MetaSync meta_sync = response->meta_sync(); - if (g_pika_conf->classic_mode() != meta_sync.classic_mode()) { - LOG(WARNING) << "Self in " << (g_pika_conf->classic_mode() ? "classic" : "sharding") - << " mode, but master in " << (meta_sync.classic_mode() ? "classic" : "sharding") - << " mode, failed to establish master-slave relationship"; - g_pika_server->SyncError(); - conn->NotifyClose(); - delete task_arg; - return; - } - - std::vector master_table_structs; - for (int idx = 0; idx < meta_sync.tables_info_size(); ++idx) { - InnerMessage::InnerResponse_MetaSync_TableInfo table_info = meta_sync.tables_info(idx); - master_table_structs.push_back({table_info.table_name(), - static_cast(table_info.partition_num()), {0}}); - } - - std::vector self_table_structs = g_pika_conf->table_structs(); - if (!PikaReplClientConn::IsTableStructConsistent(self_table_structs, master_table_structs)) { - LOG(WARNING) << "Self table structs(number of databases: " << self_table_structs.size() - << ") inconsistent with master(number of databases: " << master_table_structs.size() - << "), failed to establish master-slave relationship"; - g_pika_server->SyncError(); - conn->NotifyClose(); - delete task_arg; - return; - } - - g_pika_conf->SetWriteBinlog("yes"); - g_pika_server->PreparePartitionTrySync(); - g_pika_server->FinishMetaSync(); - LOG(INFO) << "Finish to handle meta sync response"; - delete task_arg; -} - -void PikaReplClientConn::HandleDBSyncResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; - std::shared_ptr response = task_arg->res; - - const InnerMessage::InnerResponse_DBSync db_sync_response = response->db_sync(); - int32_t session_id = db_sync_response.session_id(); - const InnerMessage::Partition partition_response = db_sync_response.partition(); - std::string table_name = partition_response.table_name(); - uint32_t partition_id = partition_response.partition_id(); - - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; - return; - } - - if (response->code() != InnerMessage::kOk) { - slave_partition->SetReplState(ReplState::kError); - std::string reply = response->has_reply() ? response->reply() : ""; - LOG(WARNING) << "DBSync Failed: " << reply; - delete task_arg; - return; - } - - g_pika_rm->UpdateSyncSlavePartitionSessionId( - PartitionInfo(table_name, partition_id), session_id); - - std::string partition_name = slave_partition->SyncPartitionInfo().ToString(); - slave_partition->SetReplState(ReplState::kWaitDBSync); - LOG(INFO) << "Partition: " << partition_name << " Need Wait To Sync"; - delete task_arg; -} - -void PikaReplClientConn::HandleTrySyncResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; - std::shared_ptr response = task_arg->res; - - if (response->code() != InnerMessage::kOk) { - std::string reply = response->has_reply() ? response->reply() : ""; - LOG(WARNING) << "TrySync Failed: " << reply; - delete task_arg; - return; - } - - const InnerMessage::InnerResponse_TrySync& try_sync_response = response->try_sync(); - const InnerMessage::Partition& partition_response = try_sync_response.partition(); - std::string table_name = partition_response.table_name(); - uint32_t partition_id = partition_response.partition_id(); - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; - return; - } - - std::shared_ptr slave_partition = - g_pika_rm->GetSyncSlavePartitionByName( - PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id << " Not Found"; - delete task_arg; - return; - } - - std::string partition_name = partition->GetPartitionName(); - if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kOk) { - BinlogOffset boffset; - int32_t session_id = try_sync_response.session_id(); - partition->logger()->GetProducerStatus(&boffset.filenum, &boffset.offset); - g_pika_rm->UpdateSyncSlavePartitionSessionId(PartitionInfo(table_name, partition_id), session_id); - g_pika_rm->SendPartitionBinlogSyncAckRequest(table_name, partition_id, boffset, boffset, true); - slave_partition->SetReplState(ReplState::kConnected); - LOG(INFO) << "Partition: " << partition_name << " TrySync Ok"; - } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kSyncPointBePurged) { - slave_partition->SetReplState(ReplState::kTryDBSync); - LOG(INFO) << "Partition: " << partition_name << " Need To Try DBSync"; - } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kSyncPointLarger) { - slave_partition->SetReplState(ReplState::kError); - LOG(WARNING) << "Partition: " << partition_name << " TrySync Error, Because the invalid filenum and offset"; - } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kError) { - slave_partition->SetReplState(ReplState::kError); - LOG(WARNING) << "Partition: " << partition_name << " TrySync Error"; - } - delete task_arg; -} - -void PikaReplClientConn::DispatchBinlogRes(const std::shared_ptr res) { - // partition to a bunch of binlog chips - std::unordered_map*, hash_partition_info> par_binlog; - for (int i = 0; i < res->binlog_sync_size(); ++i) { - const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync(i); - // hash key: table + partition_id - PartitionInfo p_info(binlog_res.partition().table_name(), - binlog_res.partition().partition_id()); - if (par_binlog.find(p_info) == par_binlog.end()) { - par_binlog[p_info] = new std::vector(); - } - par_binlog[p_info]->push_back(i); - } - - for (auto& binlog_nums : par_binlog) { - RmNode node(binlog_nums.first.table_name_, binlog_nums.first.partition_id_); - g_pika_rm->SetSlaveLastRecvTime(node, slash::NowMicros()); - g_pika_rm->ScheduleWriteBinlogTask( - binlog_nums.first.table_name_ + std::to_string(binlog_nums.first.partition_id_), - res, - std::dynamic_pointer_cast(shared_from_this()), - reinterpret_cast(binlog_nums.second)); - } -} - -void PikaReplClientConn::HandleRemoveSlaveNodeResponse(void* arg) { - ReplClientTaskArg* task_arg = static_cast(arg); - std::shared_ptr conn = task_arg->conn; - std::shared_ptr response = task_arg->res; - if (response->code() != InnerMessage::kOk) { - std::string reply = response->has_reply() ? response->reply() : ""; - LOG(WARNING) << "Remove slave node Failed: " << reply; - delete task_arg; - return; - } - delete task_arg; -} - diff --git a/tools/pika_migrate/src/pika_repl_client_thread.cc b/tools/pika_migrate/src/pika_repl_client_thread.cc deleted file mode 100644 index cfb8de7500..0000000000 --- a/tools/pika_migrate/src/pika_repl_client_thread.cc +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_repl_client_thread.h" - -#include "include/pika_server.h" - -#include "slash/include/slash_string.h" - -extern PikaServer* g_pika_server; - -PikaReplClientThread::PikaReplClientThread(int cron_interval, int keepalive_timeout) : - ClientThread(&conn_factory_, cron_interval, keepalive_timeout, &handle_, NULL) { -} - -void PikaReplClientThread::ReplClientHandle::FdClosedHandle(int fd, const std::string& ip_port) const { - LOG(INFO) << "ReplClient Close conn, fd=" << fd << ", ip_port=" << ip_port; - std::string ip; - int port = 0; - if (!slash::ParseIpPortString(ip_port, ip, port)) { - LOG(WARNING) << "Parse ip_port error " << ip_port; - return; - } - if (ip == g_pika_server->master_ip() - && port == g_pika_server->master_port() + kPortShiftReplServer - && PIKA_REPL_ERROR != g_pika_server->repl_state()) { // if state machine in error state, no retry - LOG(WARNING) << "Master conn disconnect : " << ip_port << " try reconnect"; - g_pika_server->ResetMetaSyncStatus(); - } -}; - -void PikaReplClientThread::ReplClientHandle::FdTimeoutHandle(int fd, const std::string& ip_port) const { - LOG(INFO) << "ReplClient Timeout conn, fd=" << fd << ", ip_port=" << ip_port; - std::string ip; - int port = 0; - if (!slash::ParseIpPortString(ip_port, ip, port)) { - LOG(WARNING) << "Parse ip_port error " << ip_port; - return; - } - if (ip == g_pika_server->master_ip() - && port == g_pika_server->master_port() + kPortShiftReplServer - && PIKA_REPL_ERROR != g_pika_server->repl_state()) { // if state machine in error state, no retry - LOG(WARNING) << "Master conn timeout : " << ip_port << " try reconnect"; - g_pika_server->ResetMetaSyncStatus(); - } -}; diff --git a/tools/pika_migrate/src/pika_repl_server.cc b/tools/pika_migrate/src/pika_repl_server.cc deleted file mode 100644 index 6587780561..0000000000 --- a/tools/pika_migrate/src/pika_repl_server.cc +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_repl_server.h" - -#include - -#include "include/pika_rm.h" -#include "include/pika_conf.h" -#include "include/pika_server.h" - -extern PikaConf* g_pika_conf; -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -PikaReplServer::PikaReplServer(const std::set& ips, - int port, - int cron_interval) { - server_tp_ = new pink::ThreadPool(PIKA_REPL_SERVER_TP_SIZE, 100000); - pika_repl_server_thread_ = new PikaReplServerThread(ips, port, cron_interval); - pika_repl_server_thread_->set_thread_name("PikaReplServer"); - pthread_rwlock_init(&client_conn_rwlock_, NULL); -} - -PikaReplServer::~PikaReplServer() { - delete pika_repl_server_thread_; - delete server_tp_; - pthread_rwlock_destroy(&client_conn_rwlock_); - LOG(INFO) << "PikaReplServer exit!!!"; -} - -int PikaReplServer::Start() { - int res = pika_repl_server_thread_->StartThread(); - if (res != pink::kSuccess) { - LOG(FATAL) << "Start Pika Repl Server Thread Error: " << res - << (res == pink::kBindError ? ": bind port " + std::to_string(pika_repl_server_thread_->ListenPort()) + " conflict" : ": create thread error ") - << ", Listen on this port to handle the request sent by the Slave"; - } - res = server_tp_->start_thread_pool(); - if (res != pink::kSuccess) { - LOG(FATAL) << "Start ThreadPool Error: " << res << (res == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } - return res; -} - -int PikaReplServer::Stop() { - server_tp_->stop_thread_pool(); - pika_repl_server_thread_->StopThread(); - return 0; -} - -slash::Status PikaReplServer::SendSlaveBinlogChips(const std::string& ip, - int port, - const std::vector& tasks) { - InnerMessage::InnerResponse response; - response.set_code(InnerMessage::kOk); - response.set_type(InnerMessage::Type::kBinlogSync); - for (const auto task :tasks) { - InnerMessage::InnerResponse::BinlogSync* binlog_sync = response.add_binlog_sync(); - binlog_sync->set_session_id(task.rm_node_.SessionId()); - InnerMessage::Partition* partition = binlog_sync->mutable_partition(); - partition->set_table_name(task.rm_node_.TableName()); - partition->set_partition_id(task.rm_node_.PartitionId()); - InnerMessage::BinlogOffset* boffset = binlog_sync->mutable_binlog_offset(); - boffset->set_filenum(task.binlog_chip_.offset_.filenum); - boffset->set_offset(task.binlog_chip_.offset_.offset); - binlog_sync->set_binlog(task.binlog_chip_.binlog_); - } - - std::string binlog_chip_pb; - if (!response.SerializeToString(&binlog_chip_pb)) { - return Status::Corruption("Serialized Failed"); - } - return Write(ip, port, binlog_chip_pb); -} - -slash::Status PikaReplServer::Write(const std::string& ip, - const int port, - const std::string& msg) { - slash::RWLock l(&client_conn_rwlock_, false); - const std::string ip_port = slash::IpPortString(ip, port); - if (client_conn_map_.find(ip_port) == client_conn_map_.end()) { - return Status::NotFound("The " + ip_port + " fd cannot be found"); - } - int fd = client_conn_map_[ip_port]; - std::shared_ptr conn = - std::dynamic_pointer_cast(pika_repl_server_thread_->get_conn(fd)); - if (conn == nullptr) { - return Status::NotFound("The" + ip_port + " conn cannot be found"); - } - - if (conn->WriteResp(msg)) { - conn->NotifyClose(); - return Status::Corruption("The" + ip_port + " conn, Write Resp Failed"); - } - conn->NotifyWrite(); - return Status::OK(); -} - -void PikaReplServer::Schedule(pink::TaskFunc func, void* arg){ - server_tp_->Schedule(func, arg); -} - -void PikaReplServer::UpdateClientConnMap(const std::string& ip_port, int fd) { - slash::RWLock l(&client_conn_rwlock_, true); - client_conn_map_[ip_port] = fd; -} - -void PikaReplServer::RemoveClientConn(int fd) { - slash::RWLock l(&client_conn_rwlock_, true); - std::map::const_iterator iter = client_conn_map_.begin(); - while (iter != client_conn_map_.end()) { - if (iter->second == fd) { - iter = client_conn_map_.erase(iter); - break; - } - iter++; - } -} - -void PikaReplServer::KillAllConns() { - return pika_repl_server_thread_->KillAllConns(); -} - diff --git a/tools/pika_migrate/src/pika_repl_server_conn.cc b/tools/pika_migrate/src/pika_repl_server_conn.cc deleted file mode 100644 index 85b3273741..0000000000 --- a/tools/pika_migrate/src/pika_repl_server_conn.cc +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_repl_server_conn.h" - -#include - -#include "include/pika_rm.h" -#include "include/pika_server.h" - -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -PikaReplServerConn::PikaReplServerConn(int fd, - std::string ip_port, - pink::Thread* thread, - void* worker_specific_data, pink::PinkEpoll* epoll) - : PbConn(fd, ip_port, thread, epoll) { -} - -PikaReplServerConn::~PikaReplServerConn() { -} - -void PikaReplServerConn::HandleMetaSyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); - const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; - - InnerMessage::InnerRequest::MetaSync meta_sync_request = req->meta_sync(); - InnerMessage::Node node = meta_sync_request.node(); - std::string masterauth = meta_sync_request.has_auth() ? meta_sync_request.auth() : ""; - - InnerMessage::InnerResponse response; - response.set_type(InnerMessage::kMetaSync); - if (!g_pika_conf->requirepass().empty() - && g_pika_conf->requirepass() != masterauth) { - response.set_code(InnerMessage::kError); - response.set_reply("Auth with master error, Invalid masterauth"); - } else { - std::vector table_structs = g_pika_conf->table_structs(); - bool success = g_pika_server->TryAddSlave(node.ip(), node.port(), conn->fd(), table_structs); - const std::string ip_port = slash::IpPortString(node.ip(), node.port()); - g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); - if (!success) { - response.set_code(InnerMessage::kError); - response.set_reply("Slave AlreadyExist"); - } else { - g_pika_server->BecomeMaster(); - response.set_code(InnerMessage::kOk); - InnerMessage::InnerResponse_MetaSync* meta_sync = response.mutable_meta_sync(); - meta_sync->set_classic_mode(g_pika_conf->classic_mode()); - for (const auto& table_struct : table_structs) { - InnerMessage::InnerResponse_MetaSync_TableInfo* table_info = meta_sync->add_tables_info(); - table_info->set_table_name(table_struct.table_name); - table_info->set_partition_num(table_struct.partition_num); - } - } - } - - std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { - LOG(WARNING) << "Process MetaSync request serialization failed"; - conn->NotifyClose(); - delete task_arg; - return; - } - conn->NotifyWrite(); - delete task_arg; -} - -void PikaReplServerConn::HandleTrySyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); - const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; - - InnerMessage::InnerRequest::TrySync try_sync_request = req->try_sync(); - InnerMessage::Partition partition_request = try_sync_request.partition(); - InnerMessage::BinlogOffset slave_boffset = try_sync_request.binlog_offset(); - InnerMessage::Node node = try_sync_request.node(); - - InnerMessage::InnerResponse response; - InnerMessage::InnerResponse::TrySync* try_sync_response = response.mutable_try_sync(); - InnerMessage::Partition* partition_response = try_sync_response->mutable_partition(); - InnerMessage::BinlogOffset* master_partition_boffset = try_sync_response->mutable_binlog_offset(); - - std::string table_name = partition_request.table_name(); - uint32_t partition_id = partition_request.partition_id(); - - bool pre_success = true; - response.set_type(InnerMessage::Type::kTrySync); - std::shared_ptr partition = g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - response.set_code(InnerMessage::kError); - response.set_reply("Partition not found"); - LOG(WARNING) << "Table Name: " << table_name << " Partition ID: " - << partition_id << " Not Found, TrySync Error"; - pre_success = false; - } - - BinlogOffset boffset; - std::string partition_name; - if (pre_success) { - partition_name = partition->GetPartitionName(); - LOG(INFO) << "Receive Trysync, Slave ip: " << node.ip() << ", Slave port:" - << node.port() << ", Partition: " << partition_name << ", filenum: " - << slave_boffset.filenum() << ", pro_offset: " << slave_boffset.offset(); - - response.set_code(InnerMessage::kOk); - partition_response->set_table_name(table_name); - partition_response->set_partition_id(partition_id); - if (!partition->GetBinlogOffset(&boffset)) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Handle TrySync, Partition: " - << partition_name << " Get binlog offset error, TrySync failed"; - pre_success = false; - } - } - - if (pre_success) { - master_partition_boffset->set_filenum(boffset.filenum); - master_partition_boffset->set_offset(boffset.offset); - if (boffset.filenum < slave_boffset.filenum() - || (boffset.filenum == slave_boffset.filenum() && boffset.offset < slave_boffset.offset())) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointLarger); - LOG(WARNING) << "Slave offset is larger than mine, Slave ip: " - << node.ip() << ", Slave port: " << node.port() << ", Partition: " - << partition_name << ", filenum: " << slave_boffset.filenum() - << ", pro_offset_: " << slave_boffset.offset(); - pre_success = false; - } - if (pre_success) { - std::string confile = NewFileName(partition->logger()->filename, slave_boffset.filenum()); - if (!slash::FileExists(confile)) { - LOG(INFO) << "Partition: " << partition_name << " binlog has been purged, may need full sync"; - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointBePurged); - pre_success = false; - } - } - if (pre_success) { - PikaBinlogReader reader; - reader.Seek(partition->logger(), slave_boffset.filenum(), slave_boffset.offset()); - BinlogOffset seeked_offset; - reader.GetReaderStatus(&(seeked_offset.filenum), &(seeked_offset.offset)); - if (seeked_offset.filenum != slave_boffset.filenum() || seeked_offset.offset != slave_boffset.offset()) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Slave offset is not a start point of cur log, Slave ip: " - << node.ip() << ", Slave port: " << node.port() << ", Partition: " - << partition_name << ", cloest start point, filenum: " << seeked_offset.filenum - << ", offset: " << seeked_offset.offset; - pre_success = false; - } - } - } - - if (pre_success) { - if (!g_pika_rm->CheckPartitionSlaveExist(RmNode(node.ip(), node.port(), table_name, partition_id))) { - int32_t session_id = g_pika_rm->GenPartitionSessionId(table_name, partition_id); - if (session_id != -1) { - try_sync_response->set_session_id(session_id); - // incremental sync - Status s = g_pika_rm->AddPartitionSlave(RmNode(node.ip(), node.port(), table_name, partition_id, session_id)); - if (!s.ok()) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Partition: " << partition_name << " TrySync Failed, " << s.ToString(); - pre_success = false; - } else { - const std::string ip_port = slash::IpPortString(node.ip(), node.port()); - g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); - LOG(INFO) << "Partition: " << partition_name << " TrySync Success, Session: " << session_id; - } - } else { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Gen Session id Failed"; - pre_success = false; - } - } else { - int32_t session_id; - Status s = g_pika_rm->GetPartitionSlaveSession( - RmNode(node.ip(), node.port(), table_name, partition_id), &session_id); - if (!s.ok()) { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Get Session id Failed" << s.ToString(); - pre_success = false; - } else { - try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); - try_sync_response->set_session_id(session_id); - LOG(INFO) << "Partition: " << partition_name << " TrySync Success, Session: " << session_id; - } - } - } - - std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { - LOG(WARNING) << "Handle Try Sync Failed"; - conn->NotifyClose(); - delete task_arg; - return; - } - conn->NotifyWrite(); - delete task_arg; -} - - -void PikaReplServerConn::HandleDBSyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); - const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; - - InnerMessage::InnerRequest::DBSync db_sync_request = req->db_sync(); - InnerMessage::Partition partition_request = db_sync_request.partition(); - InnerMessage::Node node = db_sync_request.node(); - InnerMessage::BinlogOffset slave_boffset = db_sync_request.binlog_offset(); - std::string table_name = partition_request.table_name(); - uint32_t partition_id = partition_request.partition_id(); - std::string partition_name = table_name + "_" + std::to_string(partition_id); - - InnerMessage::InnerResponse response; - response.set_code(InnerMessage::kOk); - response.set_type(InnerMessage::Type::kDBSync); - InnerMessage::InnerResponse::DBSync* db_sync_response = response.mutable_db_sync(); - InnerMessage::Partition* partition_response = db_sync_response->mutable_partition(); - partition_response->set_table_name(table_name); - partition_response->set_partition_id(partition_id); - - LOG(INFO) << "Handle partition DBSync Request"; - bool prior_success = true; - if (!g_pika_rm->CheckPartitionSlaveExist(RmNode(node.ip(), node.port(), table_name, partition_id))) { - int32_t session_id = g_pika_rm->GenPartitionSessionId(table_name, partition_id); - if (session_id == -1) { - response.set_code(InnerMessage::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Gen Session id Failed"; - prior_success = false; - } - if (prior_success) { - db_sync_response->set_session_id(session_id); - Status s = g_pika_rm->AddPartitionSlave(RmNode(node.ip(), node.port(), table_name, partition_id, session_id)); - if (s.ok()) { - const std::string ip_port = slash::IpPortString(node.ip(), node.port()); - g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); - LOG(INFO) << "Partition: " << partition_name << " Handle DBSync Request Success, Session: " << session_id; - } else { - response.set_code(InnerMessage::kError); - LOG(WARNING) << "Partition: " << partition_name << " Handle DBSync Request Failed, " << s.ToString(); - prior_success = false; - } - } else { - db_sync_response->set_session_id(-1); - } - } else { - int32_t session_id; - Status s = g_pika_rm->GetPartitionSlaveSession( - RmNode(node.ip(), node.port(), table_name, partition_id), &session_id); - if (!s.ok()) { - response.set_code(InnerMessage::kError); - LOG(WARNING) << "Partition: " << partition_name << ", Get Session id Failed" << s.ToString(); - prior_success = false; - db_sync_response->set_session_id(-1); - } else { - db_sync_response->set_session_id(session_id); - LOG(INFO) << "Partition: " << partition_name << " Handle DBSync Request Success, Session: " << session_id; - } - } - - g_pika_server->TryDBSync(node.ip(), node.port() + kPortShiftRSync, - table_name, partition_id, slave_boffset.filenum()); - - std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { - LOG(WARNING) << "Handle DBSync Failed"; - conn->NotifyClose(); - delete task_arg; - return; - } - conn->NotifyWrite(); - delete task_arg; -} - -void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); - const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; - if (!req->has_binlog_sync()) { - LOG(WARNING) << "Pb parse error"; - //conn->NotifyClose(); - delete task_arg; - return; - } - const InnerMessage::InnerRequest::BinlogSync& binlog_req = req->binlog_sync(); - const InnerMessage::Node& node = binlog_req.node(); - const std::string& table_name = binlog_req.table_name(); - uint32_t partition_id = binlog_req.partition_id(); - - bool is_first_send = binlog_req.first_send(); - int32_t session_id = binlog_req.session_id(); - const InnerMessage::BinlogOffset& ack_range_start = binlog_req.ack_range_start(); - const InnerMessage::BinlogOffset& ack_range_end = binlog_req.ack_range_end(); - BinlogOffset range_start(ack_range_start.filenum(), ack_range_start.offset()); - BinlogOffset range_end(ack_range_end.filenum(), ack_range_end.offset()); - - if (!g_pika_rm->CheckMasterPartitionSessionId(node.ip(), - node.port(), table_name, partition_id, session_id)) { - LOG(WARNING) << "Check Session failed " << node.ip() << ":" << node.port() - << ", " << table_name << "_" << partition_id; - //conn->NotifyClose(); - delete task_arg; - return; - } - - // Set ack info from slave - RmNode slave_node = RmNode(node.ip(), node.port(), table_name, partition_id); - - Status s = g_pika_rm->SetMasterLastRecvTime(slave_node, slash::NowMicros()); - if (!s.ok()) { - LOG(WARNING) << "SetMasterLastRecvTime failed " << node.ip() << ":" << node.port() - << ", " << table_name << "_" << partition_id << " " << s.ToString(); - conn->NotifyClose(); - delete task_arg; - return; - } - - if (is_first_send) { - if (!(range_start == range_end)) { - LOG(WARNING) << "first binlogsync request pb argument invalid"; - conn->NotifyClose(); - delete task_arg; - return; - } - Status s = g_pika_rm->ActivateBinlogSync(slave_node, range_start); - if (!s.ok()) { - LOG(WARNING) << "Activate Binlog Sync failed " << slave_node.ToString() << " " << s.ToString(); - conn->NotifyClose(); - delete task_arg; - return; - } - delete task_arg; - return; - } - - // not the first_send the range_ack cant be 0 - // set this case as ping - if (range_start == BinlogOffset() && range_end == BinlogOffset()) { - delete task_arg; - return; - } - s = g_pika_rm->UpdateSyncBinlogStatus(slave_node, range_start, range_end); - if (!s.ok()) { - LOG(WARNING) << "Update binlog ack failed " << table_name << " " << partition_id << " " << s.ToString(); - conn->NotifyClose(); - delete task_arg; - return; - } - delete task_arg; - g_pika_server->SignalAuxiliary(); - return; -} - -void PikaReplServerConn::HandleRemoveSlaveNodeRequest(void* arg) { - ReplServerTaskArg* task_arg = static_cast(arg); - const std::shared_ptr req = task_arg->req; - std::shared_ptr conn = task_arg->conn; - if (!req->remove_slave_node_size()) { - LOG(WARNING) << "Pb parse error"; - conn->NotifyClose(); - delete task_arg; - return; - } - const InnerMessage::InnerRequest::RemoveSlaveNode& remove_slave_node_req = req->remove_slave_node(0); - const InnerMessage::Node& node = remove_slave_node_req.node(); - const InnerMessage::Partition& partition = remove_slave_node_req.partition(); - - std::string table_name = partition.table_name(); - uint32_t partition_id = partition.partition_id(); - Status s = g_pika_rm->RemovePartitionSlave(RmNode(node.ip(), - node.port(), table_name, partition_id)); - - InnerMessage::InnerResponse response; - response.set_code(InnerMessage::kOk); - response.set_type(InnerMessage::Type::kRemoveSlaveNode); - InnerMessage::InnerResponse::RemoveSlaveNode* remove_slave_node_response = response.add_remove_slave_node(); - InnerMessage::Partition* partition_response = remove_slave_node_response->mutable_partition(); - partition_response->set_table_name(table_name); - partition_response->set_partition_id(partition_id); - InnerMessage::Node* node_response = remove_slave_node_response->mutable_node(); - node_response->set_ip(g_pika_server->host()); - node_response->set_port(g_pika_server->port()); - - std::string reply_str; - if (!response.SerializeToString(&reply_str) - || conn->WriteResp(reply_str)) { - LOG(WARNING) << "Remove Slave Node Failed"; - conn->NotifyClose(); - delete task_arg; - return; - } - conn->NotifyWrite(); - delete task_arg; -} - -int PikaReplServerConn::DealMessage() { - std::shared_ptr req = std::make_shared(); - bool parse_res = req->ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); - if (!parse_res) { - LOG(WARNING) << "Pika repl server connection pb parse error."; - return -1; - } - int res = 0; - switch (req->type()) { - case InnerMessage::kMetaSync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleMetaSyncRequest, task_arg); - break; - } - case InnerMessage::kTrySync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleTrySyncRequest, task_arg); - break; - } - case InnerMessage::kDBSync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleDBSyncRequest, task_arg); - break; - } - case InnerMessage::kBinlogSync: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleBinlogSyncRequest, task_arg); - break; - } - case InnerMessage::kRemoveSlaveNode: - { - ReplServerTaskArg* task_arg = new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); - g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleRemoveSlaveNodeRequest, task_arg); - break; - } - default: - break; - } - return res; -} diff --git a/tools/pika_migrate/src/pika_repl_server_thread.cc b/tools/pika_migrate/src/pika_repl_server_thread.cc deleted file mode 100644 index edc33f8fd0..0000000000 --- a/tools/pika_migrate/src/pika_repl_server_thread.cc +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_repl_server_thread.h" - -#include "include/pika_rm.h" -#include "include/pika_server.h" - -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; - -PikaReplServerThread::PikaReplServerThread(const std::set& ips, - int port, - int cron_interval) : - HolyThread(ips, port, &conn_factory_, cron_interval, &handle_, true), - conn_factory_(this), - port_(port), - serial_(0) { - set_keepalive_timeout(180); -} - -int PikaReplServerThread::ListenPort() { - return port_; -} - -void PikaReplServerThread::ReplServerHandle::FdClosedHandle(int fd, const std::string& ip_port) const { - LOG(INFO) << "ServerThread Close Slave Conn, fd: " << fd << ", ip_port: " << ip_port; - g_pika_server->DeleteSlave(fd); - g_pika_rm->ReplServerRemoveClientConn(fd); -} diff --git a/tools/pika_migrate/src/pika_rm.cc b/tools/pika_migrate/src/pika_rm.cc deleted file mode 100644 index 2c240ba9a4..0000000000 --- a/tools/pika_migrate/src/pika_rm.cc +++ /dev/null @@ -1,1634 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "set" - -#include -#include -#include -#include - -#include "pink/include/pink_cli.h" - -#include "include/pika_rm.h" -#include "include/pika_conf.h" -#include "include/pika_server.h" -#include "include/pika_repl_client.h" -#include "include/pika_repl_server.h" - - -extern PikaConf *g_pika_conf; -extern PikaReplicaManager* g_pika_rm; -extern PikaServer *g_pika_server; - -/* BinlogReaderManager */ - -BinlogReaderManager::~BinlogReaderManager() { -} - -Status BinlogReaderManager::FetchBinlogReader(const RmNode& rm_node, std::shared_ptr* reader) { - slash::MutexLock l(&reader_mu_); - if (occupied_.find(rm_node) != occupied_.end()) { - return Status::Corruption(rm_node.ToString() + " exist"); - } - if (vacant_.empty()) { - *reader = std::make_shared(); - } else { - *reader = *(vacant_.begin()); - vacant_.erase(vacant_.begin()); - } - occupied_[rm_node] = *reader; - return Status::OK(); -} - -Status BinlogReaderManager::ReleaseBinlogReader(const RmNode& rm_node) { - slash::MutexLock l(&reader_mu_); - if (occupied_.find(rm_node) == occupied_.end()) { - return Status::NotFound(rm_node.ToString()); - } - std::shared_ptr reader = occupied_[rm_node]; - occupied_.erase(rm_node); - vacant_.push_back(reader); - return Status::OK(); -} - -/* SlaveNode */ - -SlaveNode::SlaveNode(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int session_id) - : RmNode(ip, port, table_name, partition_id, session_id), - slave_state(kSlaveNotSync), - b_state(kNotSync), sent_offset(), acked_offset() { -} - -SlaveNode::~SlaveNode() { - if (b_state == kReadFromFile && binlog_reader != nullptr) { - RmNode rm_node(Ip(), Port(), TableName(), PartitionId()); - ReleaseBinlogFileReader(); - } -} - -Status SlaveNode::InitBinlogFileReader(const std::shared_ptr& binlog, - const BinlogOffset& offset) { - Status s = g_pika_rm->binlog_reader_mgr.FetchBinlogReader( - RmNode(Ip(), Port(), NodePartitionInfo()), &binlog_reader); - if (!s.ok()) { - return s; - } - int res = binlog_reader->Seek(binlog, offset.filenum, offset.offset); - if (res) { - g_pika_rm->binlog_reader_mgr.ReleaseBinlogReader( - RmNode(Ip(), Port(), NodePartitionInfo())); - return Status::Corruption(ToString() + " binlog reader init failed"); - } - return Status::OK(); -} - -void SlaveNode::ReleaseBinlogFileReader() { - g_pika_rm->binlog_reader_mgr.ReleaseBinlogReader( - RmNode(Ip(), Port(), NodePartitionInfo())); - binlog_reader = nullptr; -} - -std::string SlaveNode::ToStringStatus() { - std::stringstream tmp_stream; - tmp_stream << " Slave_state: " << SlaveStateMsg[slave_state] << "\r\n"; - tmp_stream << " Binlog_sync_state: " << BinlogSyncStateMsg[b_state] << "\r\n"; - tmp_stream << " Sync_window: " << "\r\n" << sync_win.ToStringStatus(); - tmp_stream << " Sent_offset: " << sent_offset.ToString() << "\r\n"; - tmp_stream << " Acked_offset: " << acked_offset.ToString() << "\r\n"; - tmp_stream << " Binlog_reader activated: " << (binlog_reader != nullptr) << "\r\n"; - return tmp_stream.str(); -} - -/* SyncPartition */ - -SyncPartition::SyncPartition(const std::string& table_name, uint32_t partition_id) - : partition_info_(table_name, partition_id) { -} - -/* SyncMasterPartition*/ - -SyncMasterPartition::SyncMasterPartition(const std::string& table_name, uint32_t partition_id) - : SyncPartition(table_name, partition_id), - session_id_(0) {} - -bool SyncMasterPartition::CheckReadBinlogFromCache() { - return false; -} - -int SyncMasterPartition::GetNumberOfSlaveNode() { - slash::MutexLock l(&partition_mu_); - return slaves_.size(); -} - -bool SyncMasterPartition::CheckSlaveNodeExist(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - for (auto& slave : slaves_) { - if (ip == slave->Ip() && port == slave->Port()) { - return true; - } - } - return false; -} - -Status SyncMasterPartition::GetSlaveNodeSession( - const std::string& ip, int port, int32_t* session) { - slash::MutexLock l(&partition_mu_); - for (auto& slave : slaves_) { - if (ip == slave->Ip() && port == slave->Port()) { - *session = slave->SessionId(); - return Status::OK(); - } - } - return Status::NotFound("slave " + ip + ":" + std::to_string(port) + " not found"); -} - -Status SyncMasterPartition::AddSlaveNode(const std::string& ip, int port, int session_id) { - slash::MutexLock l(&partition_mu_); - for (auto& slave : slaves_) { - if (ip == slave->Ip() && port == slave->Port()) { - slave->SetSessionId(session_id); - return Status::OK(); - } - } - std::shared_ptr slave_ptr = - std::make_shared(ip, port, SyncPartitionInfo().table_name_, SyncPartitionInfo().partition_id_, session_id); - slave_ptr->SetLastSendTime(slash::NowMicros()); - slave_ptr->SetLastRecvTime(slash::NowMicros()); - slaves_.push_back(slave_ptr); - LOG(INFO) << "Add Slave Node, partition: " << SyncPartitionInfo().ToString() << ", ip_port: "<< ip << ":" << port; - return Status::OK(); -} - -Status SyncMasterPartition::RemoveSlaveNode(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr slave = slaves_[i]; - if (ip == slave->Ip() && port == slave->Port()) { - slaves_.erase(slaves_.begin() + i); - LOG(INFO) << "Remove Slave Node, Partition: " << SyncPartitionInfo().ToString() - << ", ip_port: "<< ip << ":" << port; - return Status::OK(); - } - } - return Status::NotFound("RemoveSlaveNode" + ip + std::to_string(port)); -} - -Status SyncMasterPartition::ActivateSlaveBinlogSync(const std::string& ip, - int port, - const std::shared_ptr binlog, - const BinlogOffset& offset) { - { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - bool read_cache = CheckReadBinlogFromCache(); - - slave_ptr->Lock(); - slave_ptr->slave_state = kSlaveBinlogSync; - slave_ptr->sent_offset = offset; - slave_ptr->acked_offset = offset; - if (read_cache) { - slave_ptr->Unlock(); - // RegistToBinlogCacheWindow(ip, port, offset); - slave_ptr->Lock(); - slave_ptr->b_state = kReadFromCache; - } else { - // read binlog file from file - s = slave_ptr->InitBinlogFileReader(binlog, offset); - if (!s.ok()) { - slave_ptr->Unlock(); - return Status::Corruption("Init binlog file reader failed" + s.ToString()); - } - slave_ptr->b_state = kReadFromFile; - } - slave_ptr->Unlock(); - } - - Status s = SyncBinlogToWq(ip, port); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status SyncMasterPartition::SyncBinlogToWq(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - if (slave_ptr->b_state == kReadFromFile) { - ReadBinlogFileToWq(slave_ptr); - } else if (slave_ptr->b_state == kReadFromCache) { - ReadCachedBinlogToWq(slave_ptr); - } - } - return Status::OK(); -} - -Status SyncMasterPartition::ActivateSlaveDbSync(const std::string& ip, int port) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - slave_ptr->slave_state = kSlaveDbSync; - // invoke db sync - } - return Status::OK(); -} - -Status SyncMasterPartition::ReadCachedBinlogToWq(const std::shared_ptr& slave_ptr) { - return Status::OK(); -} - -Status SyncMasterPartition::ReadBinlogFileToWq(const std::shared_ptr& slave_ptr) { - int cnt = slave_ptr->sync_win.Remainings(); - std::shared_ptr reader = slave_ptr->binlog_reader; - std::vector tasks; - for (int i = 0; i < cnt; ++i) { - std::string msg; - uint32_t filenum; - uint64_t offset; - Status s = reader->Get(&msg, &filenum, &offset); - if (s.IsEndFile()) { - break; - } else if (s.IsCorruption() || s.IsIOError()) { - LOG(WARNING) << SyncPartitionInfo().ToString() - << " Read Binlog error : " << s.ToString(); - return s; - } - slave_ptr->sync_win.Push(SyncWinItem(filenum, offset)); - - BinlogOffset sent_offset = BinlogOffset(filenum, offset); - slave_ptr->sent_offset = sent_offset; - slave_ptr->SetLastSendTime(slash::NowMicros()); - RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->TableName(), slave_ptr->PartitionId(), slave_ptr->SessionId()); - WriteTask task(rm_node, BinlogChip(sent_offset, msg)); - tasks.push_back(task); - } - - if (!tasks.empty()) { - g_pika_rm->ProduceWriteQueue(slave_ptr->Ip(), slave_ptr->Port(), tasks); - } - return Status::OK(); -} - -Status SyncMasterPartition::GetSlaveNode(const std::string& ip, int port, std::shared_ptr* slave_node) { - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr tmp_slave = slaves_[i]; - if (ip == tmp_slave->Ip() && port == tmp_slave->Port()) { - *slave_node = tmp_slave; - return Status::OK(); - } - } - return Status::NotFound("ip " + ip + " port " + std::to_string(port)); -} - -Status SyncMasterPartition::UpdateSlaveBinlogAckInfo(const std::string& ip, int port, const BinlogOffset& start, const BinlogOffset& end) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - if (slave_ptr->slave_state != kSlaveBinlogSync) { - return Status::Corruption(ip + std::to_string(port) + "state not BinlogSync"); - } - bool res = slave_ptr->sync_win.Update(SyncWinItem(start), SyncWinItem(end), &(slave_ptr->acked_offset)); - if (!res) { - return Status::Corruption("UpdateAckedInfo failed"); - } - } - return Status::OK(); -} - -Status SyncMasterPartition::GetSlaveSyncBinlogInfo(const std::string& ip, - int port, - BinlogOffset* sent_offset, - BinlogOffset* acked_offset) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - *sent_offset = slave_ptr->sent_offset; - *acked_offset = slave_ptr->acked_offset; - } - return Status::OK(); -} - -Status SyncMasterPartition::GetSlaveState(const std::string& ip, - int port, - SlaveState* const slave_state) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - *slave_state = slave_ptr->slave_state; - } - return Status::OK(); -} - -Status SyncMasterPartition::WakeUpSlaveBinlogSync() { - slash::MutexLock l(&partition_mu_); - for (auto& slave_ptr : slaves_) { - { - slash::MutexLock l(&slave_ptr->slave_mu); - if (slave_ptr->sent_offset == slave_ptr->acked_offset) { - if (slave_ptr->b_state == kReadFromFile) { - ReadBinlogFileToWq(slave_ptr); - } else if (slave_ptr->b_state == kReadFromCache) { - ReadCachedBinlogToWq(slave_ptr); - } - } - } - } - return Status::OK(); -} - -Status SyncMasterPartition::SetLastSendTime(const std::string& ip, int port, uint64_t time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - slave_ptr->SetLastSendTime(time); - } - - return Status::OK(); -} - -Status SyncMasterPartition::GetLastSendTime(const std::string& ip, int port, uint64_t* time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - *time = slave_ptr->LastSendTime(); - } - - return Status::OK(); -} - -Status SyncMasterPartition::SetLastRecvTime(const std::string& ip, int port, uint64_t time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - slave_ptr->SetLastRecvTime(time); - } - - return Status::OK(); -} - -Status SyncMasterPartition::GetLastRecvTime(const std::string& ip, int port, uint64_t* time) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - return s; - } - - { - slash::MutexLock l(&slave_ptr->slave_mu); - *time = slave_ptr->LastRecvTime(); - } - - return Status::OK(); -} - -Status SyncMasterPartition::GetSafetyPurgeBinlog(std::string* safety_purge) { - BinlogOffset boffset; - std::string table_name = partition_info_.table_name_; - uint32_t partition_id = partition_info_.partition_id_; - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition || !partition->GetBinlogOffset(&boffset)) { - return Status::NotFound("Partition NotFound"); - } else { - bool success = false; - uint32_t purge_max = boffset.filenum; - if (purge_max >= 10) { - success = true; - purge_max -= 10; - slash::MutexLock l(&partition_mu_); - for (const auto& slave : slaves_) { - if (slave->slave_state == SlaveState::kSlaveBinlogSync - && slave->acked_offset.filenum > 0) { - purge_max = std::min(slave->acked_offset.filenum - 1, purge_max); - } else { - success = false; - break; - } - } - } - *safety_purge = (success ? kBinlogPrefix + std::to_string(static_cast(purge_max)) : "none"); - } - return Status::OK(); -} - -bool SyncMasterPartition::BinlogCloudPurge(uint32_t index) { - BinlogOffset boffset; - std::string table_name = partition_info_.table_name_; - uint32_t partition_id = partition_info_.partition_id_; - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition || !partition->GetBinlogOffset(&boffset)) { - return false; - } else { - if (index > boffset.filenum - 10) { // remain some more - return false; - } else { - slash::MutexLock l(&partition_mu_); - for (const auto& slave : slaves_) { - if (slave->slave_state == SlaveState::kSlaveDbSync) { - return false; - } else if (slave->slave_state == SlaveState::kSlaveBinlogSync) { - if (index >= slave->acked_offset.filenum) { - return false; - } - } - } - } - } - return true; -} - -Status SyncMasterPartition::CheckSyncTimeout(uint64_t now) { - slash::MutexLock l(&partition_mu_); - - std::shared_ptr slave_ptr = nullptr; - std::vector to_del; - - for (auto& slave_ptr : slaves_) { - slash::MutexLock l(&slave_ptr->slave_mu); - if (slave_ptr->LastRecvTime() + kRecvKeepAliveTimeout < now) { - to_del.push_back(Node(slave_ptr->Ip(), slave_ptr->Port())); - } else if (slave_ptr->LastSendTime() + kSendKeepAliveTimeout < now) { - std::vector task; - RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->TableName(), slave_ptr->PartitionId(), slave_ptr->SessionId()); - WriteTask empty_task(rm_node, BinlogChip(BinlogOffset(0, 0), "")); - task.push_back(empty_task); - Status s = g_pika_rm->SendSlaveBinlogChipsRequest(slave_ptr->Ip(), slave_ptr->Port(), task); - slave_ptr->SetLastSendTime(now); - if (!s.ok()) { - LOG(INFO)<< "Send ping failed: " << s.ToString(); - return Status::Corruption("Send ping failed: " + slave_ptr->Ip() + ":" + std::to_string(slave_ptr->Port())); - } - } - } - for (auto& node : to_del) { - for (size_t i = 0; i < slaves_.size(); ++i) { - if (node.Ip() == slaves_[i]->Ip() && node.Port() == slaves_[i]->Port()) { - slaves_.erase(slaves_.begin() + i); - LOG(WARNING) << SyncPartitionInfo().ToString() << " Master del Recv Timeout slave success " << node.ToString(); - break; - } - } - } - return Status::OK(); -} - -std::string SyncMasterPartition::ToStringStatus() { - std::stringstream tmp_stream; - tmp_stream << " Current Master Session: " << session_id_ << "\r\n"; - slash::MutexLock l(&partition_mu_); - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr slave_ptr = slaves_[i]; - slash::MutexLock l(&slave_ptr->slave_mu); - tmp_stream << " slave[" << i << "]: " << slave_ptr->ToString() << - "\r\n" << slave_ptr->ToStringStatus(); - } - return tmp_stream.str(); -} - -void SyncMasterPartition::GetValidSlaveNames(std::vector* slavenames) { - slash::MutexLock l(&partition_mu_); - for (auto ptr : slaves_) { - if (ptr->slave_state != kSlaveBinlogSync) { - continue; - } - std::string name = ptr->Ip() + ":" + std::to_string(ptr->Port()); - slavenames->push_back(name); - } -} - -Status SyncMasterPartition::GetInfo(std::string* info) { - std::stringstream tmp_stream; - slash::MutexLock l(&partition_mu_); - tmp_stream << " Role: Master" << "\r\n"; - tmp_stream << " connected_slaves: " << slaves_.size() << "\r\n"; - for (size_t i = 0; i < slaves_.size(); ++i) { - std::shared_ptr slave_ptr = slaves_[i]; - slash::MutexLock l(&slave_ptr->slave_mu); - tmp_stream << " slave[" << i << "]: " - << slave_ptr->Ip() << ":" << std::to_string(slave_ptr->Port()) << "\r\n"; - tmp_stream << " replication_status: " << SlaveStateMsg[slave_ptr->slave_state] << "\r\n"; - if (slave_ptr->slave_state == kSlaveBinlogSync) { - std::shared_ptr partition = g_pika_server->GetTablePartitionById(slave_ptr->TableName(), slave_ptr->PartitionId()); - BinlogOffset binlog_offset; - if (!partition || !partition->GetBinlogOffset(&binlog_offset)) { - return Status::Corruption("Get Info failed."); - } - uint64_t lag = (binlog_offset.filenum - slave_ptr->acked_offset.filenum) * - g_pika_conf->binlog_file_size() - + (binlog_offset.offset - slave_ptr->acked_offset.offset); - tmp_stream << " lag: " << lag << "\r\n"; - } - } - info->append(tmp_stream.str()); - return Status::OK(); -} - -int32_t SyncMasterPartition::GenSessionId() { - slash::MutexLock ml(&session_mu_); - return session_id_++; -} - -bool SyncMasterPartition::CheckSessionId(const std::string& ip, int port, - const std::string& table_name, - uint64_t partition_id, int session_id) { - slash::MutexLock l(&partition_mu_); - std::shared_ptr slave_ptr = nullptr; - Status s = GetSlaveNode(ip, port, &slave_ptr); - if (!s.ok()) { - LOG(WARNING)<< "Check SessionId Get Slave Node Error: " - << ip << ":" << port << "," << table_name << "_" << partition_id; - return false; - } - if (session_id != slave_ptr->SessionId()) { - LOG(WARNING)<< "Check SessionId Mismatch: " << ip << ":" << port << ", " - << table_name << "_" << partition_id << " expected_session: " << session_id - << ", actual_session:" << slave_ptr->SessionId(); - return false; - } - return true; -} - - -/* SyncSlavePartition */ -SyncSlavePartition::SyncSlavePartition(const std::string& table_name, - uint32_t partition_id) - : SyncPartition(table_name, partition_id), - m_info_(), - repl_state_(kNoConnect), - local_ip_("") { - m_info_.SetLastRecvTime(slash::NowMicros()); -} - -void SyncSlavePartition::SetReplState(const ReplState& repl_state) { - if (repl_state == ReplState::kNoConnect) { - // deactivate - Deactivate(); - return; - } - slash::MutexLock l(&partition_mu_); - repl_state_ = repl_state; -} - -ReplState SyncSlavePartition::State() { - slash::MutexLock l(&partition_mu_); - return repl_state_; -} - -void SyncSlavePartition::SetLastRecvTime(uint64_t time) { - slash::MutexLock l(&partition_mu_); - m_info_.SetLastRecvTime(time); -} - -uint64_t SyncSlavePartition::LastRecvTime() { - slash::MutexLock l(&partition_mu_); - return m_info_.LastRecvTime(); -} - -Status SyncSlavePartition::CheckSyncTimeout(uint64_t now) { - slash::MutexLock l(&partition_mu_); - // no need to do session keepalive return ok - if (repl_state_ != ReplState::kWaitDBSync && repl_state_ != ReplState::kConnected) { - return Status::OK(); - } - if (m_info_.LastRecvTime() + kRecvKeepAliveTimeout < now) { - m_info_ = RmNode(); - repl_state_ = ReplState::kTryConnect; - g_pika_server->SetLoopPartitionStateMachine(true); - } - return Status::OK(); -} - -Status SyncSlavePartition::GetInfo(std::string* info) { - std::string tmp_str = " Role: Slave\r\n"; - tmp_str += " master: " + MasterIp() + ":" + std::to_string(MasterPort()) + "\r\n"; - info->append(tmp_str); - return Status::OK(); -} - -void SyncSlavePartition::Activate(const RmNode& master, const ReplState& repl_state) { - slash::MutexLock l(&partition_mu_); - m_info_ = master; - repl_state_ = repl_state; - m_info_.SetLastRecvTime(slash::NowMicros()); -} - -void SyncSlavePartition::Deactivate() { - slash::MutexLock l(&partition_mu_); - m_info_ = RmNode(); - repl_state_ = ReplState::kNoConnect; -} - -std::string SyncSlavePartition::ToStringStatus() { - return " Master: " + MasterIp() + ":" + std::to_string(MasterPort()) + "\r\n" + - " SessionId: " + std::to_string(MasterSessionId()) + "\r\n" + - " SyncStatus " + ReplStateMsg[repl_state_] + "\r\n"; -} - -/* SyncWindow */ - -void SyncWindow::Push(const SyncWinItem& item) { - win_.push_back(item); -} - -bool SyncWindow::Update(const SyncWinItem& start_item, - const SyncWinItem& end_item, BinlogOffset* acked_offset) { - size_t start_pos = win_.size(), end_pos = win_.size(); - for (size_t i = 0; i < win_.size(); ++i) { - if (win_[i] == start_item) { - start_pos = i; - } - if (win_[i] == end_item) { - end_pos = i; - break; - } - } - if (start_pos == win_.size() || end_pos == win_.size()) { - LOG(WARNING) << "Ack offset Start: " << - start_item.ToString() << "End: " << end_item.ToString() << - " not found in binlog controller window." << - std::endl << "window status "<< std::endl << ToStringStatus(); - return false; - } - for (size_t i = start_pos; i <= end_pos; ++i) { - win_[i].acked_ = true; - } - while (!win_.empty()) { - if (win_[0].acked_) { - *acked_offset = win_[0].offset_; - win_.pop_front(); - } else { - break; - } - } - return true; -} - -int SyncWindow::Remainings() { - std::size_t remaining_size = g_pika_conf->sync_window_size() - win_.size(); - return remaining_size > 0? remaining_size:0 ; -} - -/* PikaReplicaManger */ - -PikaReplicaManager::PikaReplicaManager() - : last_meta_sync_timestamp_(0) { - std::set ips; - ips.insert("0.0.0.0"); - int port = g_pika_conf->port() + kPortShiftReplServer; - pika_repl_client_ = new PikaReplClient(3000, 60); - pika_repl_server_ = new PikaReplServer(ips, port, 3000); - InitPartition(); - pthread_rwlock_init(&partitions_rw_, NULL); -} - -PikaReplicaManager::~PikaReplicaManager() { - delete pika_repl_client_; - delete pika_repl_server_; - pthread_rwlock_destroy(&partitions_rw_); -} - -void PikaReplicaManager::Start() { - int ret = 0; - ret = pika_repl_client_->Start(); - if (ret != pink::kSuccess) { - LOG(FATAL) << "Start Repl Client Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } - - ret = pika_repl_server_->Start(); - if (ret != pink::kSuccess) { - LOG(FATAL) << "Start Repl Server Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } -} - -void PikaReplicaManager::Stop() { - pika_repl_client_->Stop(); - pika_repl_server_->Stop(); -} - -void PikaReplicaManager::InitPartition() { - std::vector table_structs = g_pika_conf->table_structs(); - for (const auto& table : table_structs) { - const std::string& table_name = table.table_name; - for (const auto& partition_id : table.partition_ids) { - sync_master_partitions_[PartitionInfo(table_name, partition_id)] - = std::make_shared(table_name, partition_id); - sync_slave_partitions_[PartitionInfo(table_name, partition_id)] - = std::make_shared(table_name, partition_id); - } - } -} - -void PikaReplicaManager::ProduceWriteQueue(const std::string& ip, int port, const std::vector& tasks) { - slash::MutexLock l(&write_queue_mu_); - std::string index = ip + ":" + std::to_string(port); - for (auto& task : tasks) { - write_queues_[index].push(task); - } -} - -int PikaReplicaManager::ConsumeWriteQueue() { - std::vector to_delete; - std::unordered_map>> to_send_map; - int counter = 0; - { - slash::MutexLock l(&write_queue_mu_); - std::vector to_delete; - for (auto& iter : write_queues_) { - std::queue& queue = iter.second; - for (int i = 0; i < kBinlogSendPacketNum; ++i) { - if (queue.empty()) { - break; - } - size_t batch_index = queue.size() > kBinlogSendBatchNum ? kBinlogSendBatchNum : queue.size(); - std::vector to_send; - for (size_t i = 0; i < batch_index; ++i) { - to_send.push_back(queue.front()); - queue.pop(); - counter++; - } - to_send_map[iter.first].push_back(std::move(to_send)); - } - } - } - - for (auto& iter : to_send_map) { - std::string ip; - int port = 0; - if (!slash::ParseIpPortString(iter.first, ip, port)) { - LOG(WARNING) << "Parse ip_port error " << iter.first; - continue; - } - for (auto& to_send : iter.second) { - Status s = pika_repl_server_->SendSlaveBinlogChips(ip, port, to_send); - if (!s.ok()) { - LOG(WARNING) << "send binlog to " << ip << ":" << port << " failed, " << s.ToString(); - to_delete.push_back(iter.first); - continue; - } - } - } - - if (!to_delete.empty()) { - { - slash::MutexLock l(&write_queue_mu_); - for (auto& del_queue : to_delete) { - write_queues_.erase(del_queue); - } - } - } - return counter; -} - -void PikaReplicaManager::DropItemInWriteQueue(const std::string& ip, int port) { - slash::MutexLock l(&write_queue_mu_); - std::string index = ip + ":" + std::to_string(port); - write_queues_.erase(index); -} - -void PikaReplicaManager::ScheduleReplServerBGTask(pink::TaskFunc func, void* arg) { - pika_repl_server_->Schedule(func, arg); -} - -void PikaReplicaManager::ScheduleReplClientBGTask(pink::TaskFunc func, void* arg) { - pika_repl_client_->Schedule(func, arg); -} - -void PikaReplicaManager::ScheduleWriteBinlogTask(const std::string& table_partition, - const std::shared_ptr res, - std::shared_ptr conn, - void* res_private_data) { - pika_repl_client_->ScheduleWriteBinlogTask(table_partition, res, conn, res_private_data); -} - -void PikaReplicaManager::ScheduleWriteDBTask(const std::string& dispatch_key, - PikaCmdArgsType* argv, BinlogItem* binlog_item, - const std::string& table_name, uint32_t partition_id) { - pika_repl_client_->ScheduleWriteDBTask(dispatch_key, argv, binlog_item, table_name, partition_id); -} - -void PikaReplicaManager::ReplServerRemoveClientConn(int fd) { - pika_repl_server_->RemoveClientConn(fd); -} - -void PikaReplicaManager::ReplServerUpdateClientConnMap(const std::string& ip_port, - int fd) { - pika_repl_server_->UpdateClientConnMap(ip_port, fd); -} - -Status PikaReplicaManager::UpdateSyncBinlogStatus(const RmNode& slave, const BinlogOffset& range_start, const BinlogOffset& range_end) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->UpdateSlaveBinlogAckInfo(slave.Ip(), slave.Port(), range_start, range_end); - if (!s.ok()) { - return s; - } - s = partition->SyncBinlogToWq(slave.Ip(), slave.Port()); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::GetSyncBinlogStatus(const RmNode& slave, BinlogOffset* sent_offset, BinlogOffset* acked_offset) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->GetSlaveSyncBinlogInfo(slave.Ip(), slave.Port(), sent_offset, acked_offset); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::GetSyncMasterPartitionSlaveState(const RmNode& slave, - SlaveState* const slave_state) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->GetSlaveState(slave.Ip(), slave.Port(), slave_state); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -bool PikaReplicaManager::CheckPartitionSlaveExist(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return false; - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - return partition->CheckSlaveNodeExist(slave.Ip(), slave.Port()); -} - -Status PikaReplicaManager::GetPartitionSlaveSession(const RmNode& slave, int32_t* session) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString(), + "not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - return partition->GetSlaveNodeSession(slave.Ip(), slave.Port(), session); -} - -Status PikaReplicaManager::AddPartitionSlave(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s= partition->RemoveSlaveNode(slave.Ip(), slave.Port()); - if (!s.ok() && !s.IsNotFound()) { - return s; - } - s = partition->AddSlaveNode(slave.Ip(), slave.Port(), slave.SessionId()); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::RemovePartitionSlave(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->RemoveSlaveNode(slave.Ip(), slave.Port()); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::LostConnection(const std::string& ip, int port) { - slash::RWLock l(&partitions_rw_, false); - for (auto& iter : sync_master_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->RemoveSlaveNode(ip, port); - if (!s.ok() && !s.IsNotFound()) { - LOG(WARNING) << "Lost Connection failed " << s.ToString(); - } - } - - for (auto& iter : sync_slave_partitions_) { - std::shared_ptr partition = iter.second; - if (partition->MasterIp() == ip && partition->MasterPort() == port) { - partition->Deactivate(); - } - } - return Status::OK(); -} - -Status PikaReplicaManager::ActivateBinlogSync(const RmNode& slave, const BinlogOffset& offset) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr sync_partition = sync_master_partitions_[slave.NodePartitionInfo()]; - - std::shared_ptr partition = g_pika_server->GetTablePartitionById(slave.TableName(), slave.PartitionId()); - if (!partition) { - return Status::Corruption("Found Binlog faile"); - } - - Status s = sync_partition->ActivateSlaveBinlogSync(slave.Ip(), slave.Port(), partition->logger(), offset); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::ActivateDbSync(const RmNode& slave) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(slave.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(slave.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[slave.NodePartitionInfo()]; - Status s = partition->ActivateSlaveDbSync(slave.Ip(), slave.Port()); - if (!s.ok()) { - return s; - } - return Status::OK(); -} - -Status PikaReplicaManager::SetMasterLastRecvTime(const RmNode& node, uint64_t time) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(node.NodePartitionInfo()) == sync_master_partitions_.end()) { - return Status::NotFound(node.ToString() + " not found"); - } - std::shared_ptr partition = sync_master_partitions_[node.NodePartitionInfo()]; - partition->SetLastRecvTime(node.Ip(), node.Port(), time); - return Status::OK(); -} - -Status PikaReplicaManager::SetSlaveLastRecvTime(const RmNode& node, uint64_t time) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(node.NodePartitionInfo()) == sync_slave_partitions_.end()) { - return Status::NotFound(node.ToString() + " not found"); - } - std::shared_ptr partition = sync_slave_partitions_[node.NodePartitionInfo()]; - partition->SetLastRecvTime(time); - return Status::OK(); -} - -Status PikaReplicaManager::WakeUpBinlogSync() { - slash::RWLock l(&partitions_rw_, false); - for (auto& iter : sync_master_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->WakeUpSlaveBinlogSync(); - if (!s.ok()) { - return s; - } - } - return Status::OK(); -} - -int32_t PikaReplicaManager::GenPartitionSessionId(const std::string& table_name, - uint32_t partition_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return -1; - } else { - std::shared_ptr sync_master_partition = sync_master_partitions_[p_info]; - return sync_master_partition->GenSessionId(); - } -} - -int32_t PikaReplicaManager::GetSlavePartitionSessionId(const std::string& table_name, - uint32_t partition_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return -1; - } else { - std::shared_ptr sync_slave_partition = sync_slave_partitions_[p_info]; - return sync_slave_partition->MasterSessionId(); - } -} - -bool PikaReplicaManager::CheckSlavePartitionSessionId(const std::string& table_name, - uint32_t partition_id, - int session_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - LOG(WARNING)<< "Slave Partition Not Found: " << p_info.ToString().data(); - return false; - } else { - std::shared_ptr sync_slave_partition = sync_slave_partitions_[p_info]; - if (sync_slave_partition->MasterSessionId() != session_id) { - LOG(WARNING)<< "Check SessionId Mismatch: " << sync_slave_partition->MasterIp() - << ":" << sync_slave_partition->MasterPort() << ", " - << sync_slave_partition->SyncPartitionInfo().ToString() - << " expected_session: " << session_id << ", actual_session:" - << sync_slave_partition->MasterSessionId(); - return false; - } - } - return true; -} - -bool PikaReplicaManager::CheckMasterPartitionSessionId(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int session_id) { - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table_name, partition_id); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return false; - } else { - std::shared_ptr sync_master_partition = sync_master_partitions_[p_info]; - return sync_master_partition->CheckSessionId(ip, port, table_name, partition_id, session_id); - } -} - -Status PikaReplicaManager::CheckSyncTimeout(uint64_t now) { - slash::RWLock l(&partitions_rw_, false); - - for (auto& iter : sync_master_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->CheckSyncTimeout(now); - if (!s.ok()) { - LOG(WARNING) << "CheckSyncTimeout Failed " << s.ToString(); - } - } - for (auto& iter : sync_slave_partitions_) { - std::shared_ptr partition = iter.second; - Status s = partition->CheckSyncTimeout(now); - if (!s.ok()) { - LOG(WARNING) << "CheckSyncTimeout Failed " << s.ToString(); - } - } - return Status::OK(); -} - -Status PikaReplicaManager::CheckPartitionRole( - const std::string& table, uint32_t partition_id, int* role) { - slash::RWLock l(&partitions_rw_, false); - *role = 0; - PartitionInfo p_info(table, partition_id); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); - } - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); - } - if (sync_master_partitions_[p_info]->GetNumberOfSlaveNode() != 0) { - *role |= PIKA_ROLE_MASTER; - } - if (sync_slave_partitions_[p_info]->State() == ReplState::kConnected) { - *role |= PIKA_ROLE_SLAVE; - } - // if role is not master or slave, the rest situations are all single - return Status::OK(); -} - -Status PikaReplicaManager::GetPartitionInfo( - const std::string& table, uint32_t partition_id, std::string* info) { - int role = 0; - std::string tmp_res; - Status s = CheckPartitionRole(table, partition_id, &role); - if (!s.ok()) { - return s; - } - - bool add_divider_line = ((role & PIKA_ROLE_MASTER) && (role & PIKA_ROLE_SLAVE)); - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table, partition_id); - if (role & PIKA_ROLE_MASTER) { - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); - } - Status s = sync_master_partitions_[p_info]->GetInfo(info); - if (!s.ok()) { - return s; - } - } - if (add_divider_line) { - info->append(" -----------\r\n"); - } - if (role & PIKA_ROLE_SLAVE) { - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound(table + std::to_string(partition_id) + " not found"); - } - Status s = sync_slave_partitions_[p_info]->GetInfo(info); - if (!s.ok()) { - return s; - } - } - info->append("\r\n"); - return Status::OK(); -} - -Status PikaReplicaManager::SelectLocalIp(const std::string& remote_ip, - const int remote_port, - std::string* const local_ip) { - pink::PinkCli* cli = pink::NewRedisCli(); - cli->set_connect_timeout(1500); - if ((cli->Connect(remote_ip, remote_port, "")).ok()) { - struct sockaddr_in laddr; - socklen_t llen = sizeof(laddr); - getsockname(cli->fd(), (struct sockaddr*) &laddr, &llen); - std::string tmp_ip(inet_ntoa(laddr.sin_addr)); - *local_ip = tmp_ip; - cli->Close(); - delete cli; - } else { - LOG(WARNING) << "Failed to connect remote node(" - << remote_ip << ":" << remote_port << ")"; - delete cli; - return Status::Corruption("connect remote node error"); - } - return Status::OK(); -} - -Status PikaReplicaManager::ActivateSyncSlavePartition(const RmNode& node, - const ReplState& repl_state) { - slash::RWLock l(&partitions_rw_, false); - const PartitionInfo& p_info = node.NodePartitionInfo(); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + node.ToString() + " not found"); - } - ReplState ssp_state = sync_slave_partitions_[p_info]->State(); - if (ssp_state != ReplState::kNoConnect) { - return Status::Corruption("Sync Slave partition in " + ReplStateMsg[ssp_state]); - } - std::string local_ip; - Status s = SelectLocalIp(node.Ip(), node.Port(), &local_ip); - if (s.ok()) { - sync_slave_partitions_[p_info]->SetLocalIp(local_ip); - sync_slave_partitions_[p_info]->Activate(node, repl_state); - } - return s; -} - -Status PikaReplicaManager::UpdateSyncSlavePartitionSessionId(const PartitionInfo& p_info, - int32_t session_id) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - sync_slave_partitions_[p_info]->SetMasterSessionId(session_id); - return Status::OK(); -} - -Status PikaReplicaManager::DeactivateSyncSlavePartition(const PartitionInfo& p_info) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - sync_slave_partitions_[p_info]->Deactivate(); - return Status::OK(); -} - -Status PikaReplicaManager::SetSlaveReplState(const PartitionInfo& p_info, - const ReplState& repl_state) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - sync_slave_partitions_[p_info]->SetReplState(repl_state); - return Status::OK(); -} - -Status PikaReplicaManager::GetSlaveReplState(const PartitionInfo& p_info, - ReplState* repl_state) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } - *repl_state = sync_slave_partitions_[p_info]->State(); - return Status::OK(); -} - -Status PikaReplicaManager::SendMetaSyncRequest() { - Status s; - int now = time(NULL); - if (now - last_meta_sync_timestamp_ >= PIKA_META_SYNC_MAX_WAIT_TIME) { - s = pika_repl_client_->SendMetaSync(); - if (s.ok()) { - last_meta_sync_timestamp_ = now; - } - } - return s; -} - -Status PikaReplicaManager::SendRemoveSlaveNodeRequest(const std::string& table, - uint32_t partition_id) { - slash::Status s; - slash::RWLock l(&partitions_rw_, false); - PartitionInfo p_info(table, partition_id); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return Status::NotFound("Sync Slave partition " + p_info.ToString()); - } else { - std::shared_ptr s_partition = sync_slave_partitions_[p_info]; - s = pika_repl_client_->SendRemoveSlaveNode(s_partition->MasterIp(), - s_partition->MasterPort(), table, partition_id, s_partition->LocalIp()); - if (s.ok()) { - s_partition->Deactivate(); - } - } - - if (s.ok()) { - LOG(INFO) << "SlaveNode (" << table << ":" << partition_id - << "), stop sync success"; - } else { - LOG(WARNING) << "SlaveNode (" << table << ":" << partition_id - << "), stop sync faild, " << s.ToString(); - } - return s; -} - -Status PikaReplicaManager::SendPartitionTrySyncRequest( - const std::string& table_name, size_t partition_id) { - BinlogOffset boffset; - if (!g_pika_server->GetTablePartitionBinlogOffset( - table_name, partition_id, &boffset)) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id - << ", Get partition binlog offset failed"; - return Status::Corruption("Partition get binlog offset error"); - } - - std::shared_ptr slave_partition = - GetSyncSlavePartitionByName(PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Slave Partition not found"); - } - - Status status = pika_repl_client_->SendPartitionTrySync(slave_partition->MasterIp(), - slave_partition->MasterPort(), - table_name, partition_id, boffset, - slave_partition->LocalIp()); - - Status s; - if (status.ok()) { - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kWaitReply); - } else { - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kError); - LOG(WARNING) << "SendPartitionTrySyncRequest failed " << status.ToString(); - } - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } - return status; -} - -static bool already_dbsync = false; -Status PikaReplicaManager::SendPartitionDBSyncRequest( - const std::string& table_name, size_t partition_id) { - if (!already_dbsync) { - already_dbsync = true; - } else { - LOG(FATAL) << "we only allow one DBSync action to avoid passing duplicate commands to target Redis multiple times"; - } - - BinlogOffset boffset; - if (!g_pika_server->GetTablePartitionBinlogOffset( - table_name, partition_id, &boffset)) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id - << ", Get partition binlog offset failed"; - return Status::Corruption("Partition get binlog offset error"); - } - - std::shared_ptr partition = - g_pika_server->GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Partition not found"); - } - partition->PrepareRsync(); - - std::shared_ptr slave_partition = - GetSyncSlavePartitionByName(PartitionInfo(table_name, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Slave Partition not found"); - } - - Status status = pika_repl_client_->SendPartitionDBSync(slave_partition->MasterIp(), - slave_partition->MasterPort(), - table_name, partition_id, boffset, - slave_partition->LocalIp()); - - Status s; - if (status.ok()) { - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kWaitReply); - } else { - LOG(WARNING) << "SendPartitionDbSync failed " << status.ToString(); - s = g_pika_rm->SetSlaveReplState(PartitionInfo(table_name, partition_id), ReplState::kError); - } - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } - return status; -} - -Status PikaReplicaManager::SendPartitionBinlogSyncAckRequest( - const std::string& table, uint32_t partition_id, - const BinlogOffset& ack_start, const BinlogOffset& ack_end, - bool is_first_send) { - std::shared_ptr slave_partition = - GetSyncSlavePartitionByName(PartitionInfo(table, partition_id)); - if (!slave_partition) { - LOG(WARNING) << "Slave Partition: " << table << ":" << partition_id - << ", NotFound"; - return Status::Corruption("Slave Partition not found"); - } - return pika_repl_client_->SendPartitionBinlogSync( - slave_partition->MasterIp(), slave_partition->MasterPort(), - table, partition_id, ack_start, ack_end, slave_partition->LocalIp(), - is_first_send); -} - -Status PikaReplicaManager::CloseReplClientConn(const std::string& ip, int32_t port) { - return pika_repl_client_->Close(ip, port); -} - -Status PikaReplicaManager::SendSlaveBinlogChipsRequest(const std::string& ip, - int port, - const std::vector& tasks) { - return pika_repl_server_->SendSlaveBinlogChips(ip, port, tasks); -} - -std::shared_ptr -PikaReplicaManager::GetSyncMasterPartitionByName(const PartitionInfo& p_info) { - slash::RWLock l(&partitions_rw_, false); - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end()) { - return nullptr; - } - return sync_master_partitions_[p_info]; -} - -Status PikaReplicaManager::GetSafetyPurgeBinlogFromSMP(const std::string& table_name, - uint32_t partition_id, - std::string* safety_purge) { - std::shared_ptr master_partition = - GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); - if (!master_partition) { - LOG(WARNING) << "Sync Master Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return Status::NotFound("SyncMasterPartition NotFound"); - } else { - return master_partition->GetSafetyPurgeBinlog(safety_purge); - } -} - -bool PikaReplicaManager::BinlogCloudPurgeFromSMP(const std::string& table_name, - uint32_t partition_id, uint32_t index) { - std::shared_ptr master_partition = - GetSyncMasterPartitionByName(PartitionInfo(table_name, partition_id)); - if (!master_partition) { - LOG(WARNING) << "Sync Master Partition: " << table_name << ":" << partition_id - << ", NotFound"; - return false; - } else { - return master_partition->BinlogCloudPurge(index); - } -} - -std::shared_ptr -PikaReplicaManager::GetSyncSlavePartitionByName(const PartitionInfo& p_info) { - slash::RWLock l(&partitions_rw_, false); - if (sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - return nullptr; - } - return sync_slave_partitions_[p_info]; -} - -Status PikaReplicaManager::RunSyncSlavePartitionStateMachine() { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : sync_slave_partitions_) { - PartitionInfo p_info = item.first; - std::shared_ptr s_partition = item.second; - if (s_partition->State() == ReplState::kTryConnect) { - SendPartitionTrySyncRequest(p_info.table_name_, p_info.partition_id_); - } else if (s_partition->State() == ReplState::kTryDBSync) { - SendPartitionDBSyncRequest(p_info.table_name_, p_info.partition_id_); - } else if (s_partition->State() == ReplState::kWaitReply) { - continue; - } else if (s_partition->State() == ReplState::kWaitDBSync) { - std::shared_ptr partition = - g_pika_server->GetTablePartitionById( - p_info.table_name_, p_info.partition_id_); - if (partition) { - partition->TryUpdateMasterOffset(); - } else { - LOG(WARNING) << "Partition not found, Table Name: " - << p_info.table_name_ << " Partition Id: " << p_info.partition_id_; - } - } else if (s_partition->State() == ReplState::kConnected - || s_partition->State() == ReplState::kNoConnect) { - continue; - } - } - return Status::OK(); -} - -Status PikaReplicaManager::AddSyncPartitionSanityCheck(const std::set& p_infos) { - slash::RWLock l(&partitions_rw_, false); - for (const auto& p_info : p_infos) { - if (sync_master_partitions_.find(p_info) != sync_master_partitions_.end() - || sync_slave_partitions_.find(p_info) != sync_slave_partitions_.end()) { - LOG(WARNING) << "sync partition: " << p_info.ToString() << " exist"; - return Status::Corruption("sync partition " + p_info.ToString() - + " exist"); - } - } - return Status::OK(); -} - -Status PikaReplicaManager::AddSyncPartition( - const std::set& p_infos) { - Status s = AddSyncPartitionSanityCheck(p_infos); - if (!s.ok()) { - return s; - } - - slash::RWLock l(&partitions_rw_, true); - for (const auto& p_info : p_infos) { - sync_master_partitions_[p_info] = - std::make_shared(p_info.table_name_, - p_info.partition_id_); - sync_slave_partitions_[p_info] = - std::make_shared(p_info.table_name_, - p_info.partition_id_); - } - return Status::OK(); -} - -Status PikaReplicaManager::RemoveSyncPartitionSanityCheck( - const std::set& p_infos) { - slash::RWLock l(&partitions_rw_, false); - for (const auto& p_info : p_infos) { - if (sync_master_partitions_.find(p_info) == sync_master_partitions_.end() - || sync_slave_partitions_.find(p_info) == sync_slave_partitions_.end()) { - LOG(WARNING) << "sync partition: " << p_info.ToString() << " not found"; - return Status::Corruption("sync partition " + p_info.ToString() - + " not found"); - } - - if (sync_master_partitions_[p_info]->GetNumberOfSlaveNode() != 0) { - LOG(WARNING) << "sync master partition: " << p_info.ToString() - << " in syncing"; - return Status::Corruption("sync master partition " + p_info.ToString() - + " in syncing"); - } - - ReplState state = sync_slave_partitions_[p_info]->State(); - if (state != kNoConnect && state != kError) { - LOG(WARNING) << "sync slave partition: " << p_info.ToString() - << " in " << ReplStateMsg[state] + " state"; - return Status::Corruption("sync slave partition " + p_info.ToString() - + " in " + ReplStateMsg[state] + " state"); - } - } - return Status::OK(); -} - -Status PikaReplicaManager::RemoveSyncPartition( - const std::set& p_infos) { - Status s = RemoveSyncPartitionSanityCheck(p_infos); - if (!s.ok()) { - return s; - } - - slash::RWLock l(&partitions_rw_, true); - for (const auto& p_info : p_infos) { - sync_master_partitions_.erase(p_info); - sync_slave_partitions_.erase(p_info); - } - return Status::OK(); -} - -void PikaReplicaManager::FindCompleteReplica(std::vector* replica) { - std::unordered_map replica_slotnum; - slash::RWLock l(&partitions_rw_, false); - for (auto& iter : sync_master_partitions_) { - std::vector names; - iter.second->GetValidSlaveNames(&names); - for (auto& name : names) { - if (replica_slotnum.find(name) == replica_slotnum.end()) { - replica_slotnum[name] = 0; - } - replica_slotnum[name]++; - } - } - for (auto item : replica_slotnum) { - if (item.second == sync_master_partitions_.size()) { - replica->push_back(item.first); - } - } -} - -void PikaReplicaManager::FindCommonMaster(std::string* master) { - slash::RWLock l(&partitions_rw_, false); - std::string common_master_ip; - int common_master_port = 0; - for (auto& iter : sync_slave_partitions_) { - if (iter.second->State() != kConnected) { - return; - } - std::string tmp_ip = iter.second->MasterIp(); - int tmp_port = iter.second->MasterPort(); - if (common_master_ip.empty() && common_master_port == 0) { - common_master_ip = tmp_ip; - common_master_port = tmp_port; - } - if (tmp_ip != common_master_ip || tmp_port != common_master_port) { - return; - } - } - if (!common_master_ip.empty() && common_master_port != 0) { - *master = common_master_ip + ":" + std::to_string(common_master_port); - } -} - -void PikaReplicaManager::RmStatus(std::string* info) { - slash::RWLock l(&partitions_rw_, false); - std::stringstream tmp_stream; - tmp_stream << "Master partition(" << sync_master_partitions_.size() << "):" << "\r\n"; - for (auto& iter : sync_master_partitions_) { - tmp_stream << " Partition " << iter.second->SyncPartitionInfo().ToString() - << "\r\n" << iter.second->ToStringStatus() << "\r\n"; - } - tmp_stream << "Slave partition(" << sync_slave_partitions_.size() << "):" << "\r\n"; - for (auto& iter : sync_slave_partitions_) { - tmp_stream << " Partition " << iter.second->SyncPartitionInfo().ToString() - << "\r\n" << iter.second->ToStringStatus() << "\r\n"; - } - info->append(tmp_stream.str()); -} diff --git a/tools/pika_migrate/src/pika_rsync_service.cc b/tools/pika_migrate/src/pika_rsync_service.cc deleted file mode 100644 index 00f3e70ee4..0000000000 --- a/tools/pika_migrate/src/pika_rsync_service.cc +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_rsync_service.h" - -#include -#include - -#include "slash/include/env.h" -#include "slash/include/rsync.h" - -#include "include/pika_define.h" -#include "include/pika_conf.h" - -extern PikaConf *g_pika_conf; - -PikaRsyncService::PikaRsyncService(const std::string& raw_path, - const int port) - : raw_path_(raw_path), port_(port) { - if (raw_path_.back() != '/') { - raw_path_ += "/"; - } - rsync_path_ = raw_path_ + slash::kRsyncSubDir + "/"; - pid_path_ = rsync_path_ + slash::kRsyncPidFile; -} - -PikaRsyncService::~PikaRsyncService() { - if (!CheckRsyncAlive()) { - slash::DeleteDirIfExist(rsync_path_); - } else { - slash::StopRsync(raw_path_); - } - LOG(INFO) << "PikaRsyncService exit!!!"; -} - -int PikaRsyncService::StartRsync() { - int ret = 0; - std::string auth; - if (g_pika_conf->masterauth().empty()) { - auth = kDefaultRsyncAuth; - } else { - auth = g_pika_conf->masterauth(); - } - ret = slash::StartRsync(raw_path_, kDBSyncModule, "0.0.0.0", port_, auth); - if (ret != 0) { - LOG(WARNING) << "Failed to start rsync, path:" << raw_path_ << " error : " << ret; - return -1; - } - ret = CreateSecretFile(); - if (ret != 0) { - LOG(WARNING) << "Failed to create secret file"; - return -1; - } - // Make sure the listening addr of rsyncd is accessible, avoid the corner case - // that rsync --daemon process is started but not finished listening on the socket - sleep(1); - - if (!CheckRsyncAlive()) { - LOG(WARNING) << "Rsync service is no live, path:" << raw_path_; - return -1; - } - return 0; -} - -int PikaRsyncService::CreateSecretFile() { - std::string secret_file_path = g_pika_conf->db_sync_path(); - if (g_pika_conf->db_sync_path().back() != '/') { - secret_file_path += "/"; - } - secret_file_path += slash::kRsyncSubDir + "/"; - slash::CreatePath(secret_file_path); - secret_file_path += kPikaSecretFile; - - std::string auth; - if (g_pika_conf->requirepass().empty()) { - auth = kDefaultRsyncAuth; - } else { - auth = g_pika_conf->requirepass(); - } - - std::ofstream secret_stream(secret_file_path.c_str()); - if (!secret_stream) { - return -1; - } - secret_stream << auth; - secret_stream.close(); - - // secret file cant be other-accessible - std::string cmd = "chmod 600 " + secret_file_path; - int ret = system(cmd.c_str()); - if (ret == 0 || (WIFEXITED(ret) && !WEXITSTATUS(ret))) { - return 0; - } - return ret; -} - -bool PikaRsyncService::CheckRsyncAlive() { - return slash::FileExists(pid_path_); -} - -int PikaRsyncService::ListenPort() { - return port_; -} diff --git a/tools/pika_migrate/src/pika_sender.cc b/tools/pika_migrate/src/pika_sender.cc deleted file mode 100644 index a2109b22e8..0000000000 --- a/tools/pika_migrate/src/pika_sender.cc +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_sender.h" - -#include - -#include "slash/include/xdebug.h" - -PikaSender::PikaSender(std::string ip, int64_t port, std::string password): - cli_(NULL), - signal_(&keys_mutex_), - ip_(ip), - port_(port), - password_(password), - should_exit_(false), - elements_(0) - { - } - -PikaSender::~PikaSender() { -} - -int PikaSender::QueueSize() { - slash::MutexLock l(&keys_mutex_); - return keys_queue_.size(); -} - -void PikaSender::Stop() { - should_exit_ = true; - keys_mutex_.Lock(); - signal_.Signal(); - keys_mutex_.Unlock(); -} - -void PikaSender::ConnectRedis() { - while (cli_ == NULL) { - // Connect to redis - cli_ = pink::NewRedisCli(); - cli_->set_connect_timeout(1000); - slash::Status s = cli_->Connect(ip_, port_); - if (!s.ok()) { - delete cli_; - cli_ = NULL; - LOG(WARNING) << "Can not connect to " << ip_ << ":" << port_ << ", status: " << s.ToString(); - continue; - } else { - // Connect success - - // Authentication - if (!password_.empty()) { - pink::RedisCmdArgsType argv, resp; - std::string cmd; - - argv.push_back("AUTH"); - argv.push_back(password_); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); - - if (s.ok()) { - s = cli_->Recv(&resp); - if (resp[0] == "OK") { - } else { - LOG(FATAL) << "Connect to redis(" << ip_ << ":" << port_ << ") Invalid password"; - cli_->Close(); - delete cli_; - cli_ = NULL; - should_exit_ = true; - return; - } - } else { - LOG(WARNING) << "send auth failed: " << s.ToString(); - cli_->Close(); - delete cli_; - cli_ = NULL; - continue; - } - } else { - // If forget to input password - pink::RedisCmdArgsType argv, resp; - std::string cmd; - - argv.push_back("PING"); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); - - if (s.ok()) { - s = cli_->Recv(&resp); - if (s.ok()) { - if (resp[0] == "NOAUTH Authentication required.") { - LOG(FATAL) << "Ping redis(" << ip_ << ":" << port_ << ") NOAUTH Authentication required"; - cli_->Close(); - delete cli_; - cli_ = NULL; - should_exit_ = true; - return; - } - } else { - LOG(WARNING) << "Recv failed: " << s.ToString(); - cli_->Close(); - delete cli_; - cli_ = NULL; - } - } - } - } - } -} - -void PikaSender::LoadKey(const std::string &key) { - keys_mutex_.Lock(); - if (keys_queue_.size() < 100000) { - keys_queue_.push(key); - signal_.Signal(); - keys_mutex_.Unlock(); - } else { - while (keys_queue_.size() > 100000 && !should_exit_) { - signal_.TimedWait(100); - } - keys_queue_.push(key); - signal_.Signal(); - keys_mutex_.Unlock(); - } -} - -void PikaSender::SendCommand(std::string &command, const std::string &key) { - // Send command - slash::Status s = cli_->Send(&command); - if (!s.ok()) { - elements_--; - LoadKey(key); - cli_->Close(); - log_info("%s", s.ToString().data()); - delete cli_; - cli_ = NULL; - ConnectRedis(); - } -} - -void *PikaSender::ThreadMain() { - log_info("Start sender thread..."); - int cnt = 0; - - if (cli_ == NULL) { - ConnectRedis(); - } - - while (!should_exit_ || QueueSize() != 0) { - std::string command; - - keys_mutex_.Lock(); - while (keys_queue_.size() == 0 && !should_exit_) { - signal_.TimedWait(200); - } - keys_mutex_.Unlock(); - if (QueueSize() == 0 && should_exit_) { - // if (should_exit_) { - return NULL; - } - - keys_mutex_.Lock(); - std::string key = keys_queue_.front(); - elements_++; - keys_queue_.pop(); - keys_mutex_.Unlock(); - - SendCommand(key, key); - cnt++; - if (cnt >= 200) { - for(; cnt > 0; cnt--) { - cli_->Recv(NULL); - } - } - } - for(; cnt > 0; cnt--) { - cli_->Recv(NULL); - } - - if (cli_) { - cli_->Close(); - delete cli_; - cli_ = NULL; - } - log_info("PikaSender thread complete"); - return NULL; -} - diff --git a/tools/pika_migrate/src/pika_server.cc b/tools/pika_migrate/src/pika_server.cc deleted file mode 100644 index c03b06a5ab..0000000000 --- a/tools/pika_migrate/src/pika_server.cc +++ /dev/null @@ -1,1598 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_server.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "slash/include/env.h" -#include "slash/include/rsync.h" -#include "pink/include/pink_cli.h" -#include "pink/include/redis_cli.h" -#include "pink/include/bg_thread.h" - -#include "include/pika_rm.h" -#include "include/pika_server.h" -#include "include/pika_sender.h" -#include "include/migrator_thread.h" -#include "include/pika_dispatch_thread.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaServer* g_pika_server; -extern PikaReplicaManager* g_pika_rm; -extern PikaCmdTableManager* g_pika_cmd_table_manager; - -void DoPurgeDir(void* arg) { - std::string path = *(static_cast(arg)); - LOG(INFO) << "Delete dir: " << path << " start"; - slash::DeleteDir(path); - LOG(INFO) << "Delete dir: " << path << " done"; - delete static_cast(arg); -} - -void DoDBSync(void* arg) { - DBSyncArg* dbsa = reinterpret_cast(arg); - PikaServer* const ps = dbsa->p; - ps->DbSyncSendFile(dbsa->ip, dbsa->port, - dbsa->table_name, dbsa->partition_id); - delete dbsa; -} - -PikaServer::PikaServer() : - exit_(false), - slot_state_(INFREE), - have_scheduled_crontask_(false), - last_check_compact_time_({0, 0}), - master_ip_(""), - master_port_(0), - repl_state_(PIKA_REPL_NO_CONNECT), - role_(PIKA_ROLE_SINGLE), - loop_partition_state_machine_(false), - force_full_sync_(false), - slowlog_entry_id_(0) { - - //Init server ip host - if (!ServerInit()) { - LOG(FATAL) << "ServerInit iotcl error"; - } - - InitBlackwidowOptions(); - - pthread_rwlockattr_t tables_rw_attr; - pthread_rwlockattr_init(&tables_rw_attr); - pthread_rwlockattr_setkind_np(&tables_rw_attr, - PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); - pthread_rwlock_init(&tables_rw_, &tables_rw_attr); - - // Create thread - worker_num_ = std::min(g_pika_conf->thread_num(), - PIKA_MAX_WORKER_THREAD_NUM); - - std::set ips; - if (g_pika_conf->network_interface().empty()) { - ips.insert("0.0.0.0"); - } else { - ips.insert("127.0.0.1"); - ips.insert(host_); - } - // We estimate the queue size - int worker_queue_limit = g_pika_conf->maxclients() / worker_num_ + 100; - LOG(INFO) << "Worker queue limit is " << worker_queue_limit; - pika_dispatch_thread_ = new PikaDispatchThread(ips, port_, worker_num_, 3000, - worker_queue_limit); - pika_monitor_thread_ = new PikaMonitorThread(); - pika_rsync_service_ = new PikaRsyncService(g_pika_conf->db_sync_path(), - g_pika_conf->port() + kPortShiftRSync); - pika_pubsub_thread_ = new pink::PubSubThread(); - pika_auxiliary_thread_ = new PikaAuxiliaryThread(); - pika_thread_pool_ = new pink::ThreadPool(g_pika_conf->thread_pool_size(), 100000); - - // Create redis sender - for (int i = 0; i < g_pika_conf->redis_sender_num(); i++) { - redis_senders_.emplace_back( - new RedisSender(int(i), - g_pika_conf->target_redis_host(), - g_pika_conf->target_redis_port(), - g_pika_conf->target_redis_pwd())); - } - - pthread_rwlock_init(&state_protector_, NULL); - pthread_rwlock_init(&slowlog_protector_, NULL); -} - -PikaServer::~PikaServer() { - - // DispatchThread will use queue of worker thread, - // so we need to delete dispatch before worker. - pika_thread_pool_->stop_thread_pool(); - delete pika_dispatch_thread_; - - { - slash::MutexLock l(&slave_mutex_); - std::vector::iterator iter = slaves_.begin(); - while (iter != slaves_.end()) { - iter = slaves_.erase(iter); - LOG(INFO) << "Delete slave success"; - } - } - - delete pika_pubsub_thread_; - delete pika_auxiliary_thread_; - delete pika_rsync_service_; - delete pika_thread_pool_; - delete pika_monitor_thread_; - - for (size_t i = 0; i < redis_senders_.size(); i++) { - redis_senders_[i]->Stop(); - } - // wait thread exit - sleep(1); - for (size_t i = 0; i < redis_senders_.size(); i++) { - delete redis_senders_[i]; - } - redis_senders_.clear(); - - bgsave_thread_.StopThread(); - key_scan_thread_.StopThread(); - - tables_.clear(); - - pthread_rwlock_destroy(&tables_rw_); - pthread_rwlock_destroy(&state_protector_); - pthread_rwlock_destroy(&slowlog_protector_); - - LOG(INFO) << "PikaServer " << pthread_self() << " exit!!!"; -} - -bool PikaServer::ServerInit() { - std::string network_interface = g_pika_conf->network_interface(); - - if (network_interface == "") { - - std::ifstream routeFile("/proc/net/route", std::ios_base::in); - if (!routeFile.good()) - { - return false; - } - - std::string line; - std::vector tokens; - while(std::getline(routeFile, line)) - { - std::istringstream stream(line); - std::copy(std::istream_iterator(stream), - std::istream_iterator(), - std::back_inserter >(tokens)); - - // the default interface is the one having the second - // field, Destination, set to "00000000" - if ((tokens.size() >= 2) && (tokens[1] == std::string("00000000"))) - { - network_interface = tokens[0]; - break; - } - - tokens.clear(); - } - routeFile.close(); - } - LOG(INFO) << "Using Networker Interface: " << network_interface; - - struct ifaddrs * ifAddrStruct = NULL; - struct ifaddrs * ifa = NULL; - void * tmpAddrPtr = NULL; - - if (getifaddrs(&ifAddrStruct) == -1) { - LOG(FATAL) << "getifaddrs failed: " << strerror(errno); - } - - for (ifa = ifAddrStruct; ifa != NULL; ifa = ifa->ifa_next) { - if (ifa->ifa_addr == NULL) { - continue; - } - if (ifa ->ifa_addr->sa_family==AF_INET) { // Check it is - // a valid IPv4 address - tmpAddrPtr = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr; - char addressBuffer[INET_ADDRSTRLEN]; - inet_ntop(AF_INET, tmpAddrPtr, addressBuffer, INET_ADDRSTRLEN); - if (std::string(ifa->ifa_name) == network_interface) { - host_ = addressBuffer; - break; - } - } else if (ifa->ifa_addr->sa_family==AF_INET6) { // Check it is - // a valid IPv6 address - tmpAddrPtr = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr; - char addressBuffer[INET6_ADDRSTRLEN]; - inet_ntop(AF_INET6, tmpAddrPtr, addressBuffer, INET6_ADDRSTRLEN); - if (std::string(ifa->ifa_name) == network_interface) { - host_ = addressBuffer; - break; - } - } - } - - if (ifAddrStruct != NULL) { - freeifaddrs(ifAddrStruct); - } - if (ifa == NULL) { - LOG(FATAL) << "error network interface: " << network_interface << ", please check!"; - } - - port_ = g_pika_conf->port(); - LOG(INFO) << "host: " << host_ << " port: " << port_; - return true; -} - -void PikaServer::Start() { - int ret = 0; - // start rsync first, rocksdb opened fd will not appear in this fork - ret = pika_rsync_service_->StartRsync(); - if (0 != ret) { - tables_.clear(); - LOG(FATAL) << "Start Rsync Error: bind port " +std::to_string(pika_rsync_service_->ListenPort()) + " failed" - << ", Listen on this port to receive Master FullSync Data"; - } - - // We Init Table Struct Before Start The following thread - InitTableStruct(); - - ret = pika_thread_pool_->start_thread_pool(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start ThreadPool Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } - ret = pika_dispatch_thread_->StartThread(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start Dispatch Error: " << ret << (ret == pink::kBindError ? ": bind port " + std::to_string(port_) + " conflict" - : ": other error") << ", Listen on this port to handle the connected redis client"; - } - ret = pika_pubsub_thread_->StartThread(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start Pubsub Error: " << ret << (ret == pink::kBindError ? ": bind port conflict" : ": other error"); - } - - ret = pika_auxiliary_thread_->StartThread(); - if (ret != pink::kSuccess) { - tables_.clear(); - LOG(FATAL) << "Start Auxiliary Thread Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } - for (size_t i = 0; i < redis_senders_.size(); i++) { - ret = redis_senders_[i]->StartThread(); - if (ret != pink::kSuccess) { - LOG(FATAL) << "Start Redis Sender Thread Error: " << ret << (ret == pink::kCreateThreadError ? ": create thread error " : ": other error"); - } - } - - time(&start_time_s_); - - std::string slaveof = g_pika_conf->slaveof(); - if (!slaveof.empty()) { - int32_t sep = slaveof.find(":"); - std::string master_ip = slaveof.substr(0, sep); - int32_t master_port = std::stoi(slaveof.substr(sep+1)); - if ((master_ip == "127.0.0.1" || master_ip == host_) && master_port == port_) { - LOG(FATAL) << "you will slaveof yourself as the config file, please check"; - } else { - SetMaster(master_ip, master_port); - } - } - - LOG(INFO) << "Pika Server going to start"; - while (!exit_) { - DoTimingTask(); - // wake up every 10 second - int try_num = 0; - while (!exit_ && try_num++ < 10) { - sleep(1); - } - } - LOG(INFO) << "Goodbye..."; -} - -void PikaServer::Exit() { - exit_ = true; -} - -std::string PikaServer::host() { - return host_; -} - -int PikaServer::port() { - return port_; -} - -time_t PikaServer::start_time_s() { - return start_time_s_; -} - -std::string PikaServer::master_ip() { - slash::RWLock(&state_protector_, false); - return master_ip_; -} - -int PikaServer::master_port() { - slash::RWLock(&state_protector_, false); - return master_port_; -} - -int PikaServer::role() { - slash::RWLock(&state_protector_, false); - return role_; -} - -bool PikaServer::readonly(const std::string& table_name, const std::string& key) { - slash::RWLock(&state_protector_, false); - if ((role_ & PIKA_ROLE_SLAVE) - && g_pika_conf->slave_read_only()) { - return true; - } - if (!g_pika_conf->classic_mode()) { - std::shared_ptr
table = GetTable(table_name); - if (table == nullptr) { - // swallow this error will process later - return false; - } - uint32_t index = g_pika_cmd_table_manager->DistributeKey( - key, table->PartitionNum()); - int role = 0; - Status s = g_pika_rm->CheckPartitionRole(table_name, index, &role); - if (!s.ok()) { - // swallow this error will process later - return false; - } - if (role & PIKA_ROLE_SLAVE) { - return true; - } - } - return false; -} - -int PikaServer::repl_state() { - slash::RWLock(&state_protector_, false); - return repl_state_; -} - -std::string PikaServer::repl_state_str() { - slash::RWLock(&state_protector_, false); - switch (repl_state_) { - case PIKA_REPL_NO_CONNECT: - return "no connect"; - case PIKA_REPL_SHOULD_META_SYNC: - return "should meta sync"; - case PIKA_REPL_META_SYNC_DONE: - return "meta sync done"; - case PIKA_REPL_ERROR: - return "error"; - default: - return ""; - } -} - -bool PikaServer::force_full_sync() { - return force_full_sync_; -} - -void PikaServer::SetForceFullSync(bool v) { - force_full_sync_ = v; -} - -void PikaServer::SetDispatchQueueLimit(int queue_limit) { - rlimit limit; - rlim_t maxfiles = g_pika_conf->maxclients() + PIKA_MIN_RESERVED_FDS; - if (getrlimit(RLIMIT_NOFILE, &limit) == -1) { - LOG(WARNING) << "getrlimit error: " << strerror(errno); - } else if (limit.rlim_cur < maxfiles) { - rlim_t old_limit = limit.rlim_cur; - limit.rlim_cur = maxfiles; - limit.rlim_max = maxfiles; - if (setrlimit(RLIMIT_NOFILE, &limit) != -1) { - LOG(WARNING) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; - } else { - LOG(FATAL) << "your 'limit -n ' of " << old_limit << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) << "), do it by yourself"; - } - } - pika_dispatch_thread_->SetQueueLimit(queue_limit); -} - -blackwidow::BlackwidowOptions PikaServer::bw_options() { - return bw_options_; -} - -void PikaServer::InitTableStruct() { - std::string db_path = g_pika_conf->db_path(); - std::string log_path = g_pika_conf->log_path(); - std::vector table_structs = g_pika_conf->table_structs(); - slash::RWLock rwl(&tables_rw_, true); - for (const auto& table : table_structs) { - std::string name = table.table_name; - uint32_t num = table.partition_num; - std::shared_ptr
table_ptr = std::make_shared
( - name, num, db_path, log_path); - table_ptr->AddPartitions(table.partition_ids); - tables_.emplace(name, table_ptr); - } -} - -std::shared_ptr
PikaServer::GetTable(const std::string &table_name) { - slash::RWLock l(&tables_rw_, false); - auto iter = tables_.find(table_name); - return (iter == tables_.end()) ? NULL : iter->second; -} - -std::set PikaServer::GetTablePartitionIds(const std::string& table_name) { - std::set empty; - slash::RWLock l(&tables_rw_, false); - auto iter = tables_.find(table_name); - return (iter == tables_.end()) ? empty : iter->second->GetPartitionIds(); -} - -bool PikaServer::IsBgSaving() { - slash::RWLock table_rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& patition_item : table_item.second->partitions_) { - if (patition_item.second->IsBgSaving()) { - return true; - } - } - } - return false; -} - -bool PikaServer::IsKeyScaning() { - slash::RWLock table_rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - if (table_item.second->IsKeyScaning()) { - return true; - } - } - return false; -} - -bool PikaServer::IsCompacting() { - slash::RWLock table_rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - slash::RWLock partition_rwl(&table_item.second->partitions_rw_, false); - for (const auto& partition_item : table_item.second->partitions_) { - partition_item.second->DbRWLockReader(); - std::string task_type = partition_item.second->db()->GetCurrentTaskType(); - partition_item.second->DbRWUnLock(); - if (strcasecmp(task_type.data(), "no")) { - return true; - } - } - } - return false; -} - -bool PikaServer::IsTableExist(const std::string& table_name) { - return GetTable(table_name) ? true : false; -} - -bool PikaServer::IsTablePartitionExist(const std::string& table_name, - uint32_t partition_id) { - std::shared_ptr
table_ptr = GetTable(table_name); - if (!table_ptr) { - return false; - } else { - return table_ptr->GetPartitionById(partition_id) ? true : false; - } -} - -bool PikaServer::IsCommandSupport(const std::string& command) { - if (g_pika_conf->classic_mode()) { - return true; - } else { - std::string cmd = command; - slash::StringToLower(cmd); - return !ShardingModeNotSupportCommands.count(cmd); - } -} - -bool PikaServer::IsTableBinlogIoError(const std::string& table_name) { - std::shared_ptr
table = GetTable(table_name); - return table ? table->IsBinlogIoError() : true; -} - -// If no collection of specified tables is given, we execute task in all tables -Status PikaServer::DoSameThingSpecificTable(const TaskType& type, const std::set& tables) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - if (!tables.empty() - && tables.find(table_item.first) == tables.end()) { - continue; - } else { - switch (type) { - case TaskType::kCompactAll: - table_item.second->Compact(blackwidow::DataType::kAll); - break; - case TaskType::kCompactStrings: - table_item.second->Compact(blackwidow::DataType::kStrings); - break; - case TaskType::kCompactHashes: - table_item.second->Compact(blackwidow::DataType::kHashes); - break; - case TaskType::kCompactSets: - table_item.second->Compact(blackwidow::DataType::kSets); - break; - case TaskType::kCompactZSets: - table_item.second->Compact(blackwidow::DataType::kZSets); - break; - case TaskType::kCompactList: - table_item.second->Compact(blackwidow::DataType::kLists); - break; - case TaskType::kStartKeyScan: - table_item.second->KeyScan(); - break; - case TaskType::kStopKeyScan: - table_item.second->StopKeyScan(); - break; - case TaskType::kBgSave: - table_item.second->BgSaveTable(); - break; - default: - break; - } - } - } - return Status::OK(); -} - -void PikaServer::PreparePartitionTrySync() { - slash::RWLock rwl(&tables_rw_, false); - ReplState state = force_full_sync_ ? - ReplState::kTryDBSync : ReplState::kTryConnect; - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - Status s = g_pika_rm->ActivateSyncSlavePartition( - RmNode(g_pika_server->master_ip(), - g_pika_server->master_port(), - table_item.second->GetTableName(), - partition_item.second->GetPartitionId()), state); - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } - } - } - force_full_sync_ = false; - loop_partition_state_machine_ = true; - LOG(INFO) << "Mark try connect finish"; -} - -void PikaServer::PartitionSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - partition_item.second->DbRWLockReader(); - partition_item.second->db()->SetMaxCacheStatisticKeys(max_cache_statistic_keys); - partition_item.second->DbRWUnLock(); - } - } -} - -void PikaServer::PartitionSetSmallCompactionThreshold(uint32_t small_compaction_threshold) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - partition_item.second->DbRWLockReader(); - partition_item.second->db()->SetSmallCompactionThreshold(small_compaction_threshold); - partition_item.second->DbRWUnLock(); - } - } -} - -bool PikaServer::GetTablePartitionBinlogOffset(const std::string& table_name, - uint32_t partition_id, - BinlogOffset* const boffset) { - std::shared_ptr partition = GetTablePartitionById(table_name, partition_id); - if (!partition) { - return false; - } else { - return partition->GetBinlogOffset(boffset); - } -} - -// Only use in classic mode -std::shared_ptr PikaServer::GetPartitionByDbName(const std::string& db_name) { - std::shared_ptr
table = GetTable(db_name); - return table ? table->GetPartitionById(0) : NULL; -} - -std::shared_ptr PikaServer::GetTablePartitionById( - const std::string& table_name, - uint32_t partition_id) { - std::shared_ptr
table = GetTable(table_name); - return table ? table->GetPartitionById(partition_id) : NULL; -} - -std::shared_ptr PikaServer::GetTablePartitionByKey( - const std::string& table_name, - const std::string& key) { - std::shared_ptr
table = GetTable(table_name); - return table ? table->GetPartitionByKey(key) : NULL; -} - -Status PikaServer::DoSameThingEveryPartition(const TaskType& type) { - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - switch (type) { - case TaskType::kResetReplState: - { - Status s = g_pika_rm->SetSlaveReplState( - PartitionInfo(table_item.second->GetTableName(), - partition_item.second->GetPartitionId()), - ReplState::kNoConnect); - if (!s.ok()) { - LOG(WARNING) << s.ToString(); - } - break; - } - case TaskType::kPurgeLog: - partition_item.second->PurgeLogs(); - break; - default: - break; - } - } - } - return Status::OK(); -} - -void PikaServer::BecomeMaster() { - slash::RWLock l(&state_protector_, true); - role_ |= PIKA_ROLE_MASTER; -} - -void PikaServer::DeleteSlave(int fd) { - std::string ip; - int port = -1; - bool is_find = false; - int slave_num = -1; - { - slash::MutexLock l(&slave_mutex_); - std::vector::iterator iter = slaves_.begin(); - while (iter != slaves_.end()) { - if (iter->conn_fd == fd) { - ip = iter->ip; - port = iter->port; - is_find = true; - g_pika_rm->LostConnection(iter->ip, iter->port); - g_pika_rm->DropItemInWriteQueue(iter->ip, iter->port); - LOG(INFO) << "Delete Slave Success, ip_port: " << iter->ip << ":" << iter->port; - slaves_.erase(iter); - break; - } - iter++; - } - slave_num = slaves_.size(); - } - - if (is_find) { - g_pika_rm->LostConnection(ip, port); - g_pika_rm->DropItemInWriteQueue(ip, port); - } - - if (slave_num == 0) { - slash::RWLock l(&state_protector_, true); - role_ &= ~PIKA_ROLE_MASTER; - } -} - -int32_t PikaServer::CountSyncSlaves() { - slash::MutexLock ldb(&db_sync_protector_); - return db_sync_slaves_.size(); -} - -int32_t PikaServer::GetShardingSlaveListString(std::string& slave_list_str) { - std::vector complete_replica; - g_pika_rm->FindCompleteReplica(&complete_replica); - std::stringstream tmp_stream; - size_t index = 0; - for (auto replica : complete_replica) { - std::string ip; - int port; - if(!slash::ParseIpPortString(replica, ip, port)) { - continue; - } - tmp_stream << "slave" << index++ << ":ip=" << ip << ",port=" << port << "\r\n"; - } - slave_list_str.assign(tmp_stream.str()); - return index; -} - -int32_t PikaServer::GetSlaveListString(std::string& slave_list_str) { - size_t index = 0; - SlaveState slave_state; - BinlogOffset master_boffset; - BinlogOffset sent_slave_boffset; - BinlogOffset acked_slave_boffset; - std::stringstream tmp_stream; - slash::MutexLock l(&slave_mutex_); - for (const auto& slave : slaves_) { - tmp_stream << "slave" << index++ << ":ip=" << slave.ip << ",port=" << slave.port << ",conn_fd=" << slave.conn_fd << ",lag="; - for (const auto& ts : slave.table_structs) { - for (size_t idx = 0; idx < ts.partition_num; ++idx) { - std::shared_ptr partition = GetTablePartitionById(ts.table_name, idx); - RmNode rm_node(slave.ip, slave.port, ts.table_name, idx); - Status s = g_pika_rm->GetSyncMasterPartitionSlaveState(rm_node, &slave_state); - if (s.ok() - && slave_state == SlaveState::kSlaveBinlogSync - && g_pika_rm->GetSyncBinlogStatus(rm_node, &sent_slave_boffset, &acked_slave_boffset).ok()) { - if (!partition || !partition->GetBinlogOffset(&master_boffset)) { - continue; - } else { - uint64_t lag = - (master_boffset.filenum - sent_slave_boffset.filenum) * g_pika_conf->binlog_file_size() - + (master_boffset.offset - sent_slave_boffset.offset); - tmp_stream << "(" << partition->GetPartitionName() << ":" << lag << ")"; - } - } else { - tmp_stream << "(" << partition->GetPartitionName() << ":not syncing)"; - } - } - } - tmp_stream << "\r\n"; - } - slave_list_str.assign(tmp_stream.str()); - return index; -} - -// Try add Slave, return true if success, -// return false when slave already exist -bool PikaServer::TryAddSlave(const std::string& ip, int64_t port, int fd, - const std::vector& table_structs) { - std::string ip_port = slash::IpPortString(ip, port); - - slash::MutexLock l(&slave_mutex_); - std::vector::iterator iter = slaves_.begin(); - while (iter != slaves_.end()) { - if (iter->ip_port == ip_port) { - LOG(WARNING) << "Slave Already Exist, ip_port: " << ip << ":" << port; - return false; - } - iter++; - } - - // Not exist, so add new - LOG(INFO) << "Add New Slave, " << ip << ":" << port; - SlaveItem s; - s.ip_port = ip_port; - s.ip = ip; - s.port = port; - s.conn_fd = fd; - s.stage = SLAVE_ITEM_STAGE_ONE; - s.table_structs = table_structs; - gettimeofday(&s.create_time, NULL); - slaves_.push_back(s); - return true; -} - -void PikaServer::SyncError() { - slash::RWLock l(&state_protector_, true); - repl_state_ = PIKA_REPL_ERROR; - LOG(WARNING) << "Sync error, set repl_state to PIKA_REPL_ERROR"; -} - -void PikaServer::RemoveMaster() { - { - slash::RWLock l(&state_protector_, true); - repl_state_ = PIKA_REPL_NO_CONNECT; - role_ &= ~PIKA_ROLE_SLAVE; - - if (master_ip_ != "" && master_port_ != -1) { - g_pika_rm->CloseReplClientConn(master_ip_, master_port_ + kPortShiftReplServer); - g_pika_rm->LostConnection(master_ip_, master_port_); - loop_partition_state_machine_ = false; - LOG(INFO) << "Remove Master Success, ip_port: " << master_ip_ << ":" << master_port_; - } - - master_ip_ = ""; - master_port_ = -1; - DoSameThingEveryPartition(TaskType::kResetReplState); - } -} - -bool PikaServer::SetMaster(std::string& master_ip, int master_port) { - if (master_ip == "127.0.0.1") { - master_ip = host_; - } - slash::RWLock l(&state_protector_, true); - if ((role_ ^ PIKA_ROLE_SLAVE) && repl_state_ == PIKA_REPL_NO_CONNECT) { - master_ip_ = master_ip; - master_port_ = master_port; - role_ |= PIKA_ROLE_SLAVE; - repl_state_ = PIKA_REPL_SHOULD_META_SYNC; - return true; - } - return false; -} - -bool PikaServer::ShouldMetaSync() { - slash::RWLock l(&state_protector_, false); - return repl_state_ == PIKA_REPL_SHOULD_META_SYNC; -} - -void PikaServer::FinishMetaSync() { - slash::RWLock l(&state_protector_, true); - assert(repl_state_ == PIKA_REPL_SHOULD_META_SYNC); - repl_state_ = PIKA_REPL_META_SYNC_DONE; -} - -bool PikaServer::MetaSyncDone() { - slash::RWLock l(&state_protector_, false); - return repl_state_ == PIKA_REPL_META_SYNC_DONE; -} - -void PikaServer::ResetMetaSyncStatus() { - slash::RWLock sp_l(&state_protector_, true); - if (role_ & PIKA_ROLE_SLAVE) { - // not change by slaveof no one, so set repl_state = PIKA_REPL_SHOULD_META_SYNC, - // continue to connect master - repl_state_ = PIKA_REPL_SHOULD_META_SYNC; - loop_partition_state_machine_ = false; - DoSameThingEveryPartition(TaskType::kResetReplState); - } -} - -bool PikaServer::AllPartitionConnectSuccess() { - bool all_partition_connect_success = true; - slash::RWLock rwl(&tables_rw_, false); - for (const auto& table_item : tables_) { - for (const auto& partition_item : table_item.second->partitions_) { - ReplState repl_state; - Status s = g_pika_rm->GetSlaveReplState( - PartitionInfo(table_item.second->GetTableName(), - partition_item.second->GetPartitionId()), &repl_state); - if (!s.ok()) { - return false; - } - if (repl_state != ReplState::kConnected) { - all_partition_connect_success = false; - break; - } - } - } - return all_partition_connect_success; -} - -bool PikaServer::LoopPartitionStateMachine() { - slash::RWLock sp_l(&state_protector_, false); - return loop_partition_state_machine_; -} - -void PikaServer::SetLoopPartitionStateMachine(bool need_loop) { - slash::RWLock sp_l(&state_protector_, true); - assert(repl_state_ == PIKA_REPL_META_SYNC_DONE); - loop_partition_state_machine_ = need_loop; -} - -void PikaServer::Schedule(pink::TaskFunc func, void* arg) { - pika_thread_pool_->Schedule(func, arg); -} - -void PikaServer::BGSaveTaskSchedule(pink::TaskFunc func, void* arg) { - bgsave_thread_.StartThread(); - bgsave_thread_.Schedule(func, arg); -} - -void PikaServer::PurgelogsTaskSchedule(pink::TaskFunc func, void* arg) { - purge_thread_.StartThread(); - purge_thread_.Schedule(func, arg); -} - -void PikaServer::PurgeDir(const std::string& path) { - std::string* dir_path = new std::string(path); - PurgeDirTaskSchedule(&DoPurgeDir, static_cast(dir_path)); -} - -void PikaServer::PurgeDirTaskSchedule(void (*function)(void*), void* arg) { - purge_thread_.StartThread(); - purge_thread_.Schedule(function, arg); -} - -void PikaServer::DBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id) { - { - std::string task_index = - DbSyncTaskIndex(ip, port, table_name, partition_id); - slash::MutexLock ml(&db_sync_protector_); - if (db_sync_slaves_.find(task_index) != db_sync_slaves_.end()) { - return; - } - db_sync_slaves_.insert(task_index); - } - // Reuse the bgsave_thread_ - // Since we expect BgSave and DBSync execute serially - bgsave_thread_.StartThread(); - DBSyncArg* arg = new DBSyncArg(this, ip, port, table_name, partition_id); - bgsave_thread_.Schedule(&DoDBSync, reinterpret_cast(arg)); -} - -void PikaServer::TryDBSync(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id, int32_t top) { - std::shared_ptr partition = - GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " Not Found, TryDBSync Failed"; - } else { - BgSaveInfo bgsave_info = partition->bgsave_info(); - std::string logger_filename = partition->logger()->filename; - if (slash::IsDir(bgsave_info.path) != 0 - || !slash::FileExists(NewFileName(logger_filename, bgsave_info.filenum)) - || top - bgsave_info.filenum > kDBSyncMaxGap) { - // Need Bgsave first - partition->BgSavePartition(); - } - DBSync(ip, port, table_name, partition_id); - } -} - -void PikaServer::DbSyncSendFile(const std::string& ip, int port, - const std::string& table_name, - uint32_t partition_id) { - std::shared_ptr partition = GetTablePartitionById(table_name, partition_id); - if (!partition) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " Not Found, DbSync send file Failed"; - return; - } - - BgSaveInfo bgsave_info = partition->bgsave_info(); - std::string bg_path = bgsave_info.path; - uint32_t binlog_filenum = bgsave_info.filenum; - uint64_t binlog_offset = bgsave_info.offset; - - // Get all files need to send - std::vector descendant; - int ret = 0; - LOG(INFO) << "Partition: " << partition->GetPartitionName() - << " Start Send files in " << bg_path << " to " << ip; - ret = slash::GetChildren(bg_path, descendant); - if (ret != 0) { - std::string ip_port = slash::IpPortString(ip, port); - slash::MutexLock ldb(&db_sync_protector_); - db_sync_slaves_.erase(ip_port); - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " Get child directory when try to do sync failed, error: " << strerror(ret); - return; - } - - std::string local_path, target_path; - std::string remote_path = g_pika_conf->classic_mode() ? table_name : table_name + "/" + std::to_string(partition_id); - std::vector::const_iterator iter = descendant.begin(); - slash::RsyncRemote remote(ip, port, kDBSyncModule, g_pika_conf->db_sync_speed() * 1024); - std::string secret_file_path = g_pika_conf->db_sync_path(); - if (g_pika_conf->db_sync_path().back() != '/') { - secret_file_path += "/"; - } - secret_file_path += slash::kRsyncSubDir + "/" + kPikaSecretFile; - - for (; iter != descendant.end(); ++iter) { - local_path = bg_path + "/" + *iter; - target_path = remote_path + "/" + *iter; - - if (*iter == kBgsaveInfoFile) { - continue; - } - - if (slash::IsDir(local_path) == 0 && - local_path.back() != '/') { - local_path.push_back('/'); - target_path.push_back('/'); - } - - // We need specify the speed limit for every single file - ret = slash::RsyncSendFile(local_path, target_path, secret_file_path, remote); - if (0 != ret) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() - << " RSync send file failed! From: " << *iter - << ", To: " << target_path - << ", At: " << ip << ":" << port - << ", Error: " << ret; - break; - } - } - // Clear target path - slash::RsyncSendClearTarget(bg_path + "/strings", remote_path + "/strings", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/hashes", remote_path + "/hashes", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/lists", remote_path + "/lists", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/sets", remote_path + "/sets", secret_file_path, remote); - slash::RsyncSendClearTarget(bg_path + "/zsets", remote_path + "/zsets", secret_file_path, remote); - - pink::PinkCli* cli = pink::NewRedisCli(); - std::string lip(host_); - if (cli->Connect(ip, port, "").ok()) { - struct sockaddr_in laddr; - socklen_t llen = sizeof(laddr); - getsockname(cli->fd(), (struct sockaddr*) &laddr, &llen); - lip = inet_ntoa(laddr.sin_addr); - cli->Close(); - delete cli; - } else { - LOG(WARNING) << "Rsync try connect slave rsync service error" - << ", slave rsync service(" << ip << ":" << port << ")"; - delete cli; - } - - // Send info file at last - if (0 == ret) { - // need to modify the IP addr in the info file - if (lip.compare(host_)) { - std::ofstream fix; - std::string fn = bg_path + "/" + kBgsaveInfoFile + "." + std::to_string(time(NULL)); - fix.open(fn, std::ios::in | std::ios::trunc); - if (fix.is_open()) { - fix << "0s\n" << lip << "\n" << port_ << "\n" << binlog_filenum << "\n" << binlog_offset << "\n"; - fix.close(); - } - ret = slash::RsyncSendFile(fn, remote_path + "/" + kBgsaveInfoFile, secret_file_path, remote); - slash::DeleteFile(fn); - if (ret != 0) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() << " Send Modified Info File Failed"; - } - } else if (0 != (ret = slash::RsyncSendFile(bg_path + "/" + kBgsaveInfoFile, remote_path + "/" + kBgsaveInfoFile, secret_file_path, remote))) { - LOG(WARNING) << "Partition: " << partition->GetPartitionName() << " Send Info File Failed"; - } - } - // remove slave - { - std::string task_index = - DbSyncTaskIndex(ip, port, table_name, partition_id); - slash::MutexLock ml(&db_sync_protector_); - db_sync_slaves_.erase(task_index); - } - - if (0 == ret) { - LOG(INFO) << "Partition: " << partition->GetPartitionName() << " RSync Send Files Success"; - } -} - -std::string PikaServer::DbSyncTaskIndex(const std::string& ip, - int port, - const std::string& table_name, - uint32_t partition_id) { - char buf[256]; - snprintf(buf, sizeof(buf), "%s:%d_%s:%d", - ip.data(), port, table_name.data(), partition_id); - return buf; -} - -void PikaServer::KeyScanTaskSchedule(pink::TaskFunc func, void* arg) { - key_scan_thread_.StartThread(); - key_scan_thread_.Schedule(func, arg); -} - -void PikaServer::ClientKillAll() { - pika_dispatch_thread_->ClientKillAll(); - pika_monitor_thread_->ThreadClientKill(); -} - -int PikaServer::ClientKill(const std::string &ip_port) { - if (pika_dispatch_thread_->ClientKill(ip_port) - || pika_monitor_thread_->ThreadClientKill(ip_port)) { - return 1; - } - return 0; -} - -int64_t PikaServer::ClientList(std::vector *clients) { - int64_t clients_num = 0; - clients_num += pika_dispatch_thread_->ThreadClientList(clients); - clients_num += pika_monitor_thread_->ThreadClientList(clients); - return clients_num; -} - -bool PikaServer::HasMonitorClients() { - return pika_monitor_thread_->HasMonitorClients(); -} - -void PikaServer::AddMonitorMessage(const std::string& monitor_message) { - pika_monitor_thread_->AddMonitorMessage(monitor_message); -} - -void PikaServer::AddMonitorClient(std::shared_ptr client_ptr) { - pika_monitor_thread_->AddMonitorClient(client_ptr); -} - -void PikaServer::SlowlogTrim() { - pthread_rwlock_wrlock(&slowlog_protector_); - while (slowlog_list_.size() > static_cast(g_pika_conf->slowlog_max_len())) { - slowlog_list_.pop_back(); - } - pthread_rwlock_unlock(&slowlog_protector_); -} - -void PikaServer::SlowlogReset() { - pthread_rwlock_wrlock(&slowlog_protector_); - slowlog_list_.clear(); - pthread_rwlock_unlock(&slowlog_protector_); -} - -uint32_t PikaServer::SlowlogLen() { - RWLock l(&slowlog_protector_, false); - return slowlog_list_.size(); -} - -void PikaServer::SlowlogObtain(int64_t number, std::vector* slowlogs) { - pthread_rwlock_rdlock(&slowlog_protector_); - slowlogs->clear(); - std::list::const_iterator iter = slowlog_list_.begin(); - while (number-- && iter != slowlog_list_.end()) { - slowlogs->push_back(*iter); - iter++; - } - pthread_rwlock_unlock(&slowlog_protector_); -} - -void PikaServer::SlowlogPushEntry(const PikaCmdArgsType& argv, int32_t time, int64_t duration) { - SlowlogEntry entry; - uint32_t slargc = (argv.size() < SLOWLOG_ENTRY_MAX_ARGC) - ? argv.size() : SLOWLOG_ENTRY_MAX_ARGC; - - for (uint32_t idx = 0; idx < slargc; ++idx) { - if (slargc != argv.size() && idx == slargc - 1) { - char buffer[32]; - sprintf(buffer, "... (%lu more arguments)", argv.size() - slargc + 1); - entry.argv.push_back(std::string(buffer)); - } else { - if (argv[idx].size() > SLOWLOG_ENTRY_MAX_STRING) { - char buffer[32]; - sprintf(buffer, "... (%lu more bytes)", argv[idx].size() - SLOWLOG_ENTRY_MAX_STRING); - std::string suffix(buffer); - std::string brief = argv[idx].substr(0, SLOWLOG_ENTRY_MAX_STRING); - entry.argv.push_back(brief + suffix); - } else { - entry.argv.push_back(argv[idx]); - } - } - } - - pthread_rwlock_wrlock(&slowlog_protector_); - entry.id = slowlog_entry_id_++; - entry.start_time = time; - entry.duration = duration; - slowlog_list_.push_front(entry); - pthread_rwlock_unlock(&slowlog_protector_); - - SlowlogTrim(); -} - -void PikaServer::ResetStat() { - statistic_data_.accumulative_connections.store(0); - statistic_data_.thread_querynum.store(0); - statistic_data_.last_thread_querynum.store(0); -} - -uint64_t PikaServer::ServerQueryNum() { - return statistic_data_.thread_querynum.load(); -} - -uint64_t PikaServer::ServerCurrentQps() { - return statistic_data_.last_sec_thread_querynum.load(); -} - -uint64_t PikaServer::accumulative_connections() { - return statistic_data_.accumulative_connections.load(); -} - -void PikaServer::incr_accumulative_connections() { - ++statistic_data_.accumulative_connections; -} - -// only one thread invoke this right now -void PikaServer::ResetLastSecQuerynum() { - uint64_t last_query = statistic_data_.last_thread_querynum.load(); - uint64_t cur_query = statistic_data_.thread_querynum.load(); - uint64_t last_time_us = statistic_data_.last_time_us.load(); - if (cur_query < last_query) { - cur_query = last_query; - } - uint64_t delta_query = cur_query - last_query; - uint64_t cur_time_us = slash::NowMicros(); - if (cur_time_us <= last_time_us) { - cur_time_us = last_time_us + 1; - } - uint64_t delta_time_us = cur_time_us - last_time_us; - statistic_data_.last_sec_thread_querynum.store(delta_query - * 1000000 / (delta_time_us)); - statistic_data_.last_thread_querynum.store(cur_query); - statistic_data_.last_time_us.store(cur_time_us); -} - -void PikaServer::UpdateQueryNumAndExecCountTable(const std::string& command) { - std::string cmd(command); - statistic_data_.thread_querynum++; - statistic_data_.exec_count_table[slash::StringToUpper(cmd)]++; -} - -std::unordered_map PikaServer::ServerExecCountTable() { - std::unordered_map res; - for (auto& cmd : statistic_data_.exec_count_table) { - res[cmd.first] = cmd.second.load(); - } - return res; -} - -int PikaServer::SendToPeer() { - return g_pika_rm->ConsumeWriteQueue(); -} - -void PikaServer::SignalAuxiliary() { - pika_auxiliary_thread_->mu_.Lock(); - pika_auxiliary_thread_->cv_.Signal(); - pika_auxiliary_thread_->mu_.Unlock(); -} - -Status PikaServer::TriggerSendBinlogSync() { - return g_pika_rm->WakeUpBinlogSync(); -} - -int PikaServer::PubSubNumPat() { - return pika_pubsub_thread_->PubSubNumPat(); -} - -int PikaServer::Publish(const std::string& channel, const std::string& msg) { - int receivers = pika_pubsub_thread_->Publish(channel, msg); - return receivers; -} - -int PikaServer::UnSubscribe(std::shared_ptr conn, - const std::vector& channels, - bool pattern, - std::vector>* result) { - int subscribed = pika_pubsub_thread_->UnSubscribe(conn, channels, pattern, result); - return subscribed; -} - -void PikaServer::Subscribe(std::shared_ptr conn, - const std::vector& channels, - bool pattern, - std::vector>* result) { - pika_pubsub_thread_->Subscribe(conn, channels, pattern, result); -} - -void PikaServer::PubSubChannels(const std::string& pattern, - std::vector* result) { - pika_pubsub_thread_->PubSubChannels(pattern, result); -} - -void PikaServer::PubSubNumSub(const std::vector& channels, - std::vector>* result) { - pika_pubsub_thread_->PubSubNumSub(channels, result); -} - -int PikaServer::SendRedisCommand(const std::string& command, const std::string& key) { - // Send command - size_t idx = std::hash()(key) % redis_senders_.size(); - redis_senders_[idx]->SendRedisCommand(command); - return 0; -} - -void PikaServer::RetransmitData(const std::string& path) { - - blackwidow::BlackWidow *db = new blackwidow::BlackWidow(); - rocksdb::Status s = db->Open(g_pika_server->bw_options(), path); - - if (!s.ok()) { - LOG(FATAL) << "open received database error: " << s.ToString(); - return; - } - - // Init SenderThread - int thread_num = g_pika_conf->redis_sender_num(); - std::string target_host = g_pika_conf->target_redis_host(); - int target_port = g_pika_conf->target_redis_port(); - std::string target_pwd = g_pika_conf->target_redis_pwd(); - - LOG(INFO) << "open received database success, start retransmit data to redis(" - << target_host << ":" << target_port << ")"; - - std::vector pika_senders; - std::vector migrators; - - for (int i = 0; i < thread_num; i++) { - pika_senders.emplace_back(new PikaSender(target_host, target_port, target_pwd)); - } - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kStrings, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kLists, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kHashes, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kSets, thread_num)); - migrators.emplace_back(new MigratorThread(db, &pika_senders, blackwidow::kZSets, thread_num)); - - for (size_t i = 0; i < pika_senders.size(); i++) { - pika_senders[i]->StartThread(); - } - for (size_t i = 0; i < migrators.size(); i++) { - migrators[i]->StartThread(); - } - - for (size_t i = 0; i < migrators.size(); i++) { - migrators[i]->JoinThread(); - } - for (size_t i = 0; i < pika_senders.size(); i++) { - pika_senders[i]->Stop(); - } - for (size_t i = 0; i < pika_senders.size(); i++) { - pika_senders[i]->JoinThread(); - } - - int64_t replies = 0, records = 0; - for (size_t i = 0; i < migrators.size(); i++) { - records += migrators[i]->num(); - delete migrators[i]; - } - migrators.clear(); - for (size_t i = 0; i < pika_senders.size(); i++) { - replies += pika_senders[i]->elements(); - delete pika_senders[i]; - } - pika_senders.clear(); - - LOG(INFO) << "=============== Retransmit Finish ====================="; - LOG(INFO) << "Total records : " << records << " have been Scaned"; - LOG(INFO) << "Total replies : " << replies << " received from redis server"; - LOG(INFO) << "======================================================="; -} - -/******************************* PRIVATE *******************************/ - -void PikaServer::DoTimingTask() { - // Maybe schedule compactrange - AutoCompactRange(); - // Purge log - AutoPurge(); - // Delete expired dump - AutoDeleteExpiredDump(); - // Cheek Rsync Status - AutoKeepAliveRSync(); -} - -void PikaServer::AutoCompactRange() { - struct statfs disk_info; - int ret = statfs(g_pika_conf->db_path().c_str(), &disk_info); - if (ret == -1) { - LOG(WARNING) << "statfs error: " << strerror(errno); - return; - } - - uint64_t total_size = disk_info.f_bsize * disk_info.f_blocks; - uint64_t free_size = disk_info.f_bsize * disk_info.f_bfree; - std::string ci = g_pika_conf->compact_interval(); - std::string cc = g_pika_conf->compact_cron(); - - if (ci != "") { - std::string::size_type slash = ci.find("/"); - int interval = std::atoi(ci.substr(0, slash).c_str()); - int usage = std::atoi(ci.substr(slash+1).c_str()); - struct timeval now; - gettimeofday(&now, NULL); - if (last_check_compact_time_.tv_sec == 0 || - now.tv_sec - last_check_compact_time_.tv_sec >= interval * 3600) { - gettimeofday(&last_check_compact_time_, NULL); - if (((double)free_size / total_size) * 100 >= usage) { - Status s = DoSameThingSpecificTable(TaskType::kCompactAll); - if (s.ok()) { - LOG(INFO) << "[Interval]schedule compactRange, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; - } else { - LOG(INFO) << "[Interval]schedule compactRange Failed, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 - << "MB, error: " << s.ToString(); - } - } else { - LOG(WARNING) << "compact-interval failed, because there is not enough disk space left, freesize" - << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; - } - } - return; - } - - if (cc != "") { - bool have_week = false; - std::string compact_cron, week_str; - int slash_num = count(cc.begin(), cc.end(), '/'); - if (slash_num == 2) { - have_week = true; - std::string::size_type first_slash = cc.find("/"); - week_str = cc.substr(0, first_slash); - compact_cron = cc.substr(first_slash + 1); - } else { - compact_cron = cc; - } - - std::string::size_type colon = compact_cron.find("-"); - std::string::size_type underline = compact_cron.find("/"); - int week = have_week ? (std::atoi(week_str.c_str()) % 7) : 0; - int start = std::atoi(compact_cron.substr(0, colon).c_str()); - int end = std::atoi(compact_cron.substr(colon+1, underline).c_str()); - int usage = std::atoi(compact_cron.substr(underline+1).c_str()); - std::time_t t = std::time(nullptr); - std::tm* t_m = std::localtime(&t); - - bool in_window = false; - if (start < end && (t_m->tm_hour >= start && t_m->tm_hour < end)) { - in_window = have_week ? (week == t_m->tm_wday) : true; - } else if (start > end && ((t_m->tm_hour >= start && t_m->tm_hour < 24) || - (t_m->tm_hour >= 0 && t_m->tm_hour < end))) { - in_window = have_week ? false : true; - } else { - have_scheduled_crontask_ = false; - } - - if (!have_scheduled_crontask_ && in_window) { - if (((double)free_size / total_size) * 100 >= usage) { - Status s = DoSameThingEveryPartition(TaskType::kCompactAll); - if (s.ok()) { - LOG(INFO) << "[Cron]schedule compactRange, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; - } else { - LOG(INFO) << "[Cron]schedule compactRange Failed, freesize: " << free_size/1048576 << "MB, disksize: " << total_size/1048576 - << "MB, error: " << s.ToString(); - } - have_scheduled_crontask_ = true; - } else { - LOG(WARNING) << "compact-cron failed, because there is not enough disk space left, freesize" - << free_size/1048576 << "MB, disksize: " << total_size/1048576 << "MB"; - } - } - } -} - -void PikaServer::AutoPurge() { - DoSameThingEveryPartition(TaskType::kPurgeLog); -} - -void PikaServer::AutoDeleteExpiredDump() { - std::string db_sync_prefix = g_pika_conf->bgsave_prefix(); - std::string db_sync_path = g_pika_conf->bgsave_path(); - int expiry_days = g_pika_conf->expire_dump_days(); - std::vector dump_dir; - - // Never expire - if (expiry_days <= 0) { - return; - } - - // Dump is not exist - if (!slash::FileExists(db_sync_path)) { - return; - } - - // Directory traversal - if (slash::GetChildren(db_sync_path, dump_dir) != 0) { - return; - } - // Handle dump directory - for (size_t i = 0; i < dump_dir.size(); i++) { - if (dump_dir[i].substr(0, db_sync_prefix.size()) != db_sync_prefix || dump_dir[i].size() != (db_sync_prefix.size() + 8)) { - continue; - } - - std::string str_date = dump_dir[i].substr(db_sync_prefix.size(), (dump_dir[i].size() - db_sync_prefix.size())); - char *end = NULL; - std::strtol(str_date.c_str(), &end, 10); - if (*end != 0) { - continue; - } - - // Parse filename - int dump_year = std::atoi(str_date.substr(0, 4).c_str()); - int dump_month = std::atoi(str_date.substr(4, 2).c_str()); - int dump_day = std::atoi(str_date.substr(6, 2).c_str()); - - time_t t = time(NULL); - struct tm *now = localtime(&t); - int now_year = now->tm_year + 1900; - int now_month = now->tm_mon + 1; - int now_day = now->tm_mday; - - struct tm dump_time, now_time; - - dump_time.tm_year = dump_year; - dump_time.tm_mon = dump_month; - dump_time.tm_mday = dump_day; - dump_time.tm_hour = 0; - dump_time.tm_min = 0; - dump_time.tm_sec = 0; - - now_time.tm_year = now_year; - now_time.tm_mon = now_month; - now_time.tm_mday = now_day; - now_time.tm_hour = 0; - now_time.tm_min = 0; - now_time.tm_sec = 0; - - long dump_timestamp = mktime(&dump_time); - long now_timestamp = mktime(&now_time); - // How many days, 1 day = 86400s - int interval_days = (now_timestamp - dump_timestamp) / 86400; - - if (interval_days >= expiry_days) { - std::string dump_file = db_sync_path + dump_dir[i]; - if (CountSyncSlaves() == 0) { - LOG(INFO) << "Not syncing, delete dump file: " << dump_file; - slash::DeleteDirIfExist(dump_file); - } else { - LOG(INFO) << "Syncing, can not delete " << dump_file << " dump file"; - } - } - } -} - -void PikaServer::AutoKeepAliveRSync() { - if (!pika_rsync_service_->CheckRsyncAlive()) { - LOG(WARNING) << "The Rsync service is down, Try to restart"; - pika_rsync_service_->StartRsync(); - } -} - -void PikaServer::InitBlackwidowOptions() { - - // For rocksdb::Options - bw_options_.options.create_if_missing = true; - bw_options_.options.keep_log_file_num = 10; - bw_options_.options.max_manifest_file_size = 64 * 1024 * 1024; - bw_options_.options.max_log_file_size = 512 * 1024 * 1024; - - bw_options_.options.write_buffer_size = - g_pika_conf->write_buffer_size(); - bw_options_.options.write_buffer_manager.reset( - new rocksdb::WriteBufferManager(g_pika_conf->max_write_buffer_size())); - bw_options_.options.target_file_size_base = - g_pika_conf->target_file_size_base(); - bw_options_.options.max_background_flushes = - g_pika_conf->max_background_flushes(); - bw_options_.options.max_background_compactions = - g_pika_conf->max_background_compactions(); - bw_options_.options.max_open_files = - g_pika_conf->max_cache_files(); - bw_options_.options.max_bytes_for_level_multiplier = - g_pika_conf->max_bytes_for_level_multiplier(); - bw_options_.options.optimize_filters_for_hits = - g_pika_conf->optimize_filters_for_hits(); - bw_options_.options.level_compaction_dynamic_level_bytes = - g_pika_conf->level_compaction_dynamic_level_bytes(); - - - if (g_pika_conf->compression() == "none") { - bw_options_.options.compression = - rocksdb::CompressionType::kNoCompression; - } else if (g_pika_conf->compression() == "snappy") { - bw_options_.options.compression = - rocksdb::CompressionType::kSnappyCompression; - } else if (g_pika_conf->compression() == "zlib") { - bw_options_.options.compression = - rocksdb::CompressionType::kZlibCompression; - } - - // For rocksdb::BlockBasedTableOptions - bw_options_.table_options.block_size = g_pika_conf->block_size(); - bw_options_.table_options.cache_index_and_filter_blocks = - g_pika_conf->cache_index_and_filter_blocks(); - bw_options_.block_cache_size = g_pika_conf->block_cache(); - bw_options_.share_block_cache = g_pika_conf->share_block_cache(); - - if (bw_options_.block_cache_size == 0) { - bw_options_.table_options.no_block_cache = true; - } else if (bw_options_.share_block_cache) { - bw_options_.table_options.block_cache = - rocksdb::NewLRUCache(bw_options_.block_cache_size); - } - - // For Blackwidow small compaction - bw_options_.statistics_max_size = g_pika_conf->max_cache_statistic_keys(); - bw_options_.small_compaction_threshold = - g_pika_conf->small_compaction_threshold(); -} diff --git a/tools/pika_migrate/src/pika_set.cc b/tools/pika_migrate/src/pika_set.cc deleted file mode 100644 index c78784487a..0000000000 --- a/tools/pika_migrate/src/pika_set.cc +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_set.h" - -#include "slash/include/slash_string.h" - -void SAddCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSAdd); - return; - } - key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; - iter++; - members_.assign(iter, argv_.end()); - return; -} - -void SAddCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->SAdd(key_, members_, &count); - if (!s.ok()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - res_.AppendInteger(count); - return; -} - -void SPopCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSPop); - return; - } - key_ = argv_[1]; - return; -} - -void SPopCmd::Do(std::shared_ptr partition) { - std::string member; - rocksdb::Status s = partition->db()->SPop(key_, &member); - if (s.ok()) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); - } else if (s.IsNotFound()) { - res_.AppendContent("$-1"); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void SCardCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSCard); - return; - } - key_ = argv_[1]; - return; -} - -void SCardCmd::Do(std::shared_ptr partition) { - int32_t card = 0; - rocksdb::Status s = partition->db()->SCard(key_, &card); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(card); - } else { - res_.SetRes(CmdRes::kErrOther, "scard error"); - } - return; -} - -void SMembersCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSMembers); - return; - } - key_ = argv_[1]; - return; -} - -void SMembersCmd::Do(std::shared_ptr partition) { - std::vector members; - rocksdb::Status s = partition->db()->SMembers(key_, &members); - if (s.ok() || s.IsNotFound()) { - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void SScanCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSScan); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSScan); - return; - } - size_t argc = argv_.size(), index = 3; - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - if (count_ < 0) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - return; -} - -void SScanCmd::Do(std::shared_ptr partition) { - int64_t next_cursor = 0; - std::vector members; - rocksdb::Status s = partition->db()->SScan(key_, cursor_, pattern_, count_, &members, &next_cursor); - - if (s.ok() || s.IsNotFound()) { - res_.AppendContent("*2"); - char buf[32]; - int64_t len = slash::ll2string(buf, sizeof(buf), next_cursor); - res_.AppendStringLen(len); - res_.AppendContent(buf); - - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendString(member); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void SRemCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSMembers); - return; - } - key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; - members_.assign(++iter, argv_.end()); - return; -} - -void SRemCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->SRem(key_, members_, &count); - res_.AppendInteger(count); - return; -} - -void SUnionCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSUnion); - return; - } - PikaCmdArgsType::iterator iter = argv_.begin(); - keys_.assign(++iter, argv_.end()); - return; -} - -void SUnionCmd::Do(std::shared_ptr partition) { - std::vector members; - partition->db()->SUnion(keys_, &members); - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); - } - return; -} - -void SUnionstoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSUnionstore); - return; - } - dest_key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; - keys_.assign(++iter, argv_.end()); - return; -} - -void SUnionstoreCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->SUnionstore(dest_key_, keys_, &count); - if (s.ok()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void SInterCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSInter); - return; - } - PikaCmdArgsType::iterator iter = argv_.begin(); - keys_.assign(++iter, argv_.end()); - return; -} - -void SInterCmd::Do(std::shared_ptr partition) { - std::vector members; - partition->db()->SInter(keys_, &members); - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); - } - return; -} - -void SInterstoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSInterstore); - return; - } - dest_key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; - keys_.assign(++iter, argv_.end()); - return; -} - -void SInterstoreCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->SInterstore(dest_key_, keys_, &count); - if (s.ok()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void SIsmemberCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSIsmember); - return; - } - key_ = argv_[1]; - member_ = argv_[2]; - return; -} - -void SIsmemberCmd::Do(std::shared_ptr partition) { - int32_t is_member = 0; - partition->db()->SIsmember(key_, member_, &is_member); - if (is_member) { - res_.AppendContent(":1"); - } else { - res_.AppendContent(":0"); - } -} - -void SDiffCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSDiff); - return; - } - PikaCmdArgsType::iterator iter = argv_.begin(); - keys_.assign(++iter, argv_.end()); - return; -} - -void SDiffCmd::Do(std::shared_ptr partition) { - std::vector members; - partition->db()->SDiff(keys_, &members); - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); - } - return; -} - -void SDiffstoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSDiffstore); - return; - } - dest_key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin(); - iter++; - keys_.assign(++iter, argv_.end()); - return; -} - -void SDiffstoreCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->SDiffstore(dest_key_, keys_, &count); - if (s.ok()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void SMoveCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSMove); - return; - } - src_key_ = argv_[1]; - dest_key_ = argv_[2]; - member_ = argv_[3]; - return; -} - -void SMoveCmd::Do(std::shared_ptr partition) { - int32_t res = 0; - rocksdb::Status s = partition->db()->SMove(src_key_, dest_key_, member_, &res); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(res); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void SRandmemberCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSRandmember); - return; - } - key_ = argv_[1]; - if (argv_.size() > 3) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSRandmember); - return; - } else if (argv_.size() == 3) { - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - } else { - reply_arr = true;; - } - } - return; -} - -void SRandmemberCmd::Do(std::shared_ptr partition) { - std::vector members; - rocksdb::Status s = partition->db()->SRandmember(key_, count_, &members); - if (s.ok() || s.IsNotFound()) { - if (!reply_arr && members.size()) { - res_.AppendStringLen(members[0].size()); - res_.AppendContent(members[0]); - } else { - res_.AppendArrayLen(members.size()); - for (const auto& member : members) { - res_.AppendStringLen(member.size()); - res_.AppendContent(member); - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} diff --git a/tools/pika_migrate/src/pika_slot.cc b/tools/pika_migrate/src/pika_slot.cc deleted file mode 100644 index adeecf8bb7..0000000000 --- a/tools/pika_migrate/src/pika_slot.cc +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_rm.h" -#include "include/pika_slot.h" -#include "include/pika_table.h" -#include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaCmdTableManager* g_pika_cmd_table_manager; -extern PikaReplicaManager* g_pika_rm; -extern PikaServer* g_pika_server; -extern PikaConf* g_pika_conf; - -// SLOTSINFO -void SlotsInfoCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsInfo); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSINFO only support on sharding mode"); - return; - } - - return; -} - -void SlotsInfoCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - if (!table_ptr) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsInfo); - return; - } - table_ptr->KeyScan(); - // this get will get last time scan info - KeyScanInfo key_scan_info = table_ptr->GetKeyScanInfo(); - - std::map infos; - Status s = table_ptr->GetPartitionsKeyScanInfo(&infos); - if (!s.ok()) { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNameSlotsInfo); - return; - } - res_.AppendArrayLen(infos.size()); - for (auto& key_info : infos) { - uint64_t total_key_size = 0; - for (size_t idx = 0; idx < key_info.second.key_infos.size(); ++idx) { - total_key_size += key_info.second.key_infos[idx].keys; - } - res_.AppendArrayLen(2); - res_.AppendInteger(key_info.first); - res_.AppendInteger(total_key_size); - } - return; -} - -// SLOTSHASHKEY key1 [key2 …] -void SlotsHashKeyCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsHashKey); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSHASHKEY only support on sharding mode"); - return; - } - - return; -} - -void SlotsHashKeyCmd::Do(std::shared_ptr partition) { - res_.AppendArrayLen(argv_.size() - 1); - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - uint32_t partition_num = table_ptr->PartitionNum(); - if (!table_ptr) { - res_.SetRes(CmdRes::kInvalidParameter, kCmdNameSlotsHashKey); - } - // iter starts from real key, first item in argv_ is command name - std::vector::const_iterator iter = argv_.begin() + 1; - for (; iter != argv_.end(); iter++) { - res_.AppendInteger(g_pika_cmd_table_manager->DistributeKey(*iter, partition_num)); - } - return; -} - -// slotsmgrtslot-async host port timeout maxbulks maxbytes slot numkeys -void SlotsMgrtSlotAsyncCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtSlotAsync); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRTTAGSLOT-ASYNC only support on sharding mode"); - return; - } - - return; -} - -void SlotsMgrtSlotAsyncCmd::Do(std::shared_ptr partition) { - int64_t moved = 0; - int64_t remained = 0; - res_.AppendArrayLen(2); - res_.AppendInteger(moved); - res_.AppendInteger(remained); -} - -// SLOTSMGRTTAGSLOT-ASYNC host port timeout maxbulks maxbytes slot numkeys -void SlotsMgrtTagSlotAsyncCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlotAsync); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRTTAGSLOT-ASYNC only support on sharding mode"); - return; - } - - PikaCmdArgsType::const_iterator it = argv_.begin() + 1; //Remember the first args is the opt name - dest_ip_ = *it++; - slash::StringToLower(dest_ip_); - - std::string str_dest_port = *it++; - if (!slash::string2l(str_dest_port.data(), str_dest_port.size(), &dest_port_) || dest_port_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsMgrtTagSlotAsync); - return; - } - - if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { - res_.SetRes(CmdRes::kErrOther, "destination address error"); - return; - } - - std::string str_timeout_ms = *it++; - - std::string str_max_bulks = *it++; - - std::string str_max_bytes_ = *it++; - - std::string str_slot_num = *it++; - if (!slash::string2l(str_slot_num.data(), str_slot_num.size(), &slot_num_) - || slot_num_ < 0 || slot_num_ >= g_pika_conf->default_slot_num()) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsMgrtTagSlotAsync); - return; - } - - std::string str_keys_num = *it++; - return; -} - -void SlotsMgrtTagSlotAsyncCmd::Do(std::shared_ptr partition) { - int64_t moved = 0; - int64_t remained = 0; - // check if this slave node exist. - // if exist, dont mark migrate done - // cache coming request in codis proxy and keep retrying - // Until sync done, new node slaveof no one. - // mark this migrate done - // proxy retry cached request in new node - bool is_exist = g_pika_rm->CheckPartitionSlaveExist( - RmNode(dest_ip_, dest_port_, g_pika_conf->default_table(), slot_num_)); - if (is_exist) { - remained = 1; - } else { - remained = 0; - } - res_.AppendArrayLen(2); - res_.AppendInteger(moved); - res_.AppendInteger(remained); -} - -// SLOTSSCAN slotnum cursor [COUNT count] -void SlotsScanCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSSCAN only support on sharding mode"); - return; - } - - int64_t slotnum; - if (!slash::string2l(argv_[1].data(), argv_[1].size(), &slotnum)) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsScan); - return; - } - slotnum_ = static_cast(slotnum); - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsScan); - return; - } - size_t argc = argv_.size(), index = 3; - - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_) || count_ <= 0) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - return; -} - -void SlotsScanCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - if (!table_ptr) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsScan); - return; - } - std::shared_ptr cur_partition = table_ptr->GetPartitionById(slotnum_); - if (!cur_partition) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsScan); - return; - } - std::vector keys; - int64_t cursor_ret = cur_partition->db()->Scan(blackwidow::DataType::kAll, - cursor_, pattern_, count_, &keys); - - res_.AppendArrayLen(2); - - char buf[32]; - int len = slash::ll2string(buf, sizeof(buf), cursor_ret); - res_.AppendStringLen(len); - res_.AppendContent(buf); - - res_.AppendArrayLen(keys.size()); - std::vector::iterator iter; - for (iter = keys.begin(); iter != keys.end(); iter++) { - res_.AppendStringLen(iter->size()); - res_.AppendContent(*iter); - } - return; -} - -// SLOTSDEL slot1 [slot2 …] -void SlotsDelCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsDel); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSDEL only support on sharding mode"); - return; - } - - // iter starts from real key, first item in argv_ is command name - std::vector::const_iterator iter = argv_.begin() + 1; - for (; iter != argv_.end(); iter++) { - int64_t slotnum; - if (!slash::string2l(iter->data(), iter->size(), &slotnum)) { - res_.SetRes(CmdRes::kInvalidInt, kCmdNameSlotsDel); - return; - } - slots_.push_back(static_cast(slotnum)); - } - return; -} - -void SlotsDelCmd::Do(std::shared_ptr partition) { - std::shared_ptr
table_ptr = g_pika_server->GetTable(g_pika_conf->default_table()); - if (!table_ptr) { - res_.SetRes(CmdRes::kNotFound, kCmdNameSlotsDel); - return; - } - if (table_ptr->IsKeyScaning()) { - res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); - return; - } - std::vector successed_slots; - for (auto& slotnum : slots_) { - std::shared_ptr cur_partition = table_ptr->GetPartitionById(slotnum); - if (!cur_partition) { - continue; - } - cur_partition->FlushDB(); - successed_slots.push_back(slotnum); - } - res_.AppendArrayLen(successed_slots.size()); - for (auto& slotnum : successed_slots) { - res_.AppendArrayLen(2); - res_.AppendInteger(slotnum); - res_.AppendInteger(0); - } - return; -} - -// SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$arg1 ...] -void SlotsMgrtExecWrapperCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtExecWrapper); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRT-EXEC-WRAPPER only support on sharding mode"); - return; - } - - PikaCmdArgsType::const_iterator it = argv_.begin() + 1; - key_ = *it++; - //slash::StringToLower(key_); - return; -} - -void SlotsMgrtExecWrapperCmd::Do(std::shared_ptr partition) { - // return 0 means proxy will request to new slot server - // return 1 means proxy will keey trying - // return 2 means return this key directly - res_.AppendArrayLen(2); - res_.AppendInteger(1); - res_.AppendInteger(1); - return; -} - -// slotsmgrt-async-status -void SlotsMgrtAsyncStatusCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncStatus); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRT-ASYNC-STATUS only support on sharding mode"); - return; - } - - return; -} - -void SlotsMgrtAsyncStatusCmd::Do(std::shared_ptr partition) { - std::string status; - std::string ip = "none"; - int64_t port = -1, slot = -1, moved = -1, remained = -1; - std::string mstatus = "no"; - res_.AppendArrayLen(5); - status = "dest server: " + ip + ":" + std::to_string(port); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "slot number: " + std::to_string(slot); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "migrating : " + mstatus; - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "moved keys : " + std::to_string(moved); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - status = "remain keys: " + std::to_string(remained); - res_.AppendStringLen(status.size()); - res_.AppendContent(status); - return; -} - -// slotsmgrt-async-cancel -void SlotsMgrtAsyncCancelCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncCancel); - return; - } - - if (g_pika_conf->classic_mode()) { - res_.SetRes(CmdRes::kErrOther, "SLOTSMGRT-ASYNC-CANCEL only support on sharding mode"); - return; - } - - return; -} - -void SlotsMgrtAsyncCancelCmd::Do(std::shared_ptr partition) { - res_.SetRes(CmdRes::kOk); - return; -} - -// slotsmgrtslot host port timeout slot -void SlotsMgrtSlotCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtSlot + " NOT supported"); - return; -} - -void SlotsMgrtSlotCmd::Do(std::shared_ptr partition) { - return; -} - -// slotsmgrttagslot host port timeout slot -void SlotsMgrtTagSlotCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtTagSlot + " NOT supported"); - return; -} - -void SlotsMgrtTagSlotCmd::Do(std::shared_ptr partition) { - return; -} - -// slotsmgrtone host port timeout key -void SlotsMgrtOneCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtOne + " NOT supported"); - return; -} - -void SlotsMgrtOneCmd::Do(std::shared_ptr partition) { - return; -} - -// slotsmgrttagone host port timeout key -void SlotsMgrtTagOneCmd::DoInitial() { - res_.SetRes(CmdRes::kErrOther, kCmdNameSlotsMgrtTagOne + " NOT supported"); - return; -} - -void SlotsMgrtTagOneCmd::Do(std::shared_ptr partition) { - return; -} diff --git a/tools/pika_migrate/src/pika_table.cc b/tools/pika_migrate/src/pika_table.cc deleted file mode 100644 index 20d2c9be4f..0000000000 --- a/tools/pika_migrate/src/pika_table.cc +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_table.h" - -#include "include/pika_server.h" -#include "include/pika_cmd_table_manager.h" - -extern PikaServer* g_pika_server; -extern PikaCmdTableManager* g_pika_cmd_table_manager; - -std::string TablePath(const std::string& path, - const std::string& table_name) { - char buf[100]; - snprintf(buf, sizeof(buf), "%s/", table_name.data()); - return path + buf; -} - -Table::Table(const std::string& table_name, - uint32_t partition_num, - const std::string& db_path, - const std::string& log_path) : - table_name_(table_name), - partition_num_(partition_num) { - - db_path_ = TablePath(db_path, table_name_); - log_path_ = TablePath(log_path, "log_" + table_name_); - - slash::CreatePath(db_path_); - slash::CreatePath(log_path_); - - pthread_rwlock_init(&partitions_rw_, NULL); -} - -Table::~Table() { - StopKeyScan(); - pthread_rwlock_destroy(&partitions_rw_); - partitions_.clear(); -} - -std::string Table::GetTableName() { - return table_name_; -} - -void Table::BgSaveTable() { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - item.second->BgSavePartition(); - } -} - -void Table::CompactTable(const blackwidow::DataType& type) { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - item.second->Compact(type); - } -} - -bool Table::FlushPartitionDB() { - slash::RWLock rwl(&partitions_rw_, false); - slash::MutexLock ml(&key_scan_protector_); - if (key_scan_info_.key_scaning_) { - return false; - } - for (const auto& item : partitions_) { - item.second->FlushDB(); - } - return true; -} - -bool Table::FlushPartitionSubDB(const std::string& db_name) { - slash::RWLock rwl(&partitions_rw_, false); - slash::MutexLock ml(&key_scan_protector_); - if (key_scan_info_.key_scaning_) { - return false; - } - for (const auto& item : partitions_) { - item.second->FlushSubDB(db_name); - } - return true; -} - -bool Table::IsBinlogIoError() { - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - if (item.second->IsBinlogIoError()) { - return true; - } - } - return false; -} - -uint32_t Table::PartitionNum() { - return partition_num_; -} - -Status Table::AddPartitions(const std::set& partition_ids) { - slash::RWLock l(&partitions_rw_, true); - for (const uint32_t& id : partition_ids) { - if (id >= partition_num_) { - return Status::Corruption("partition index out of range[0, " - + std::to_string(partition_num_ - 1) + "]"); - } else if (partitions_.find(id) != partitions_.end()) { - return Status::Corruption("partition " - + std::to_string(id) + " already exist"); - } - } - - for (const uint32_t& id : partition_ids) { - partitions_.emplace(id, std::make_shared( - table_name_, id, db_path_, log_path_)); - } - return Status::OK(); -} - -Status Table::RemovePartitions(const std::set& partition_ids) { - slash::RWLock l(&partitions_rw_, true); - for (const uint32_t& id : partition_ids) { - if (partitions_.find(id) == partitions_.end()) { - return Status::Corruption("partition " + std::to_string(id) + " not found"); - } - } - - for (const uint32_t& id : partition_ids) { - partitions_[id]->Leave(); - partitions_.erase(id); - } - return Status::OK(); -} - -void Table::KeyScan() { - slash::MutexLock ml(&key_scan_protector_); - if (key_scan_info_.key_scaning_) { - return; - } - - key_scan_info_.key_scaning_ = true; - key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, - // has not been scheduled for exec - BgTaskArg* bg_task_arg = new BgTaskArg(); - bg_task_arg->table = shared_from_this(); - g_pika_server->KeyScanTaskSchedule(&DoKeyScan, reinterpret_cast(bg_task_arg)); -} - -bool Table::IsKeyScaning() { - slash::MutexLock ml(&key_scan_protector_); - return key_scan_info_.key_scaning_; -} - -void Table::RunKeyScan() { - Status s; - std::vector new_key_infos(5); - - InitKeyScan(); - slash::RWLock rwl(&partitions_rw_, false); - for (const auto& item : partitions_) { - std::vector tmp_key_infos; - s = item.second->GetKeyNum(&tmp_key_infos); - if (s.ok()) { - for (size_t idx = 0; idx < tmp_key_infos.size(); ++idx) { - new_key_infos[idx].keys += tmp_key_infos[idx].keys; - new_key_infos[idx].expires += tmp_key_infos[idx].expires; - new_key_infos[idx].avg_ttl += tmp_key_infos[idx].avg_ttl; - new_key_infos[idx].invaild_keys += tmp_key_infos[idx].invaild_keys; - } - } else { - break; - } - } - key_scan_info_.duration = time(NULL) - key_scan_info_.start_time; - - slash::MutexLock lm(&key_scan_protector_); - if (s.ok()) { - key_scan_info_.key_infos = new_key_infos; - } - key_scan_info_.key_scaning_ = false; -} - -void Table::StopKeyScan() { - slash::RWLock rwl(&partitions_rw_, false); - slash::MutexLock ml(&key_scan_protector_); - for (const auto& item : partitions_) { - item.second->db()->StopScanKeyNum(); - } - key_scan_info_.key_scaning_ = false; -} - -void Table::ScanDatabase(const blackwidow::DataType& type) { - slash::RWLock rwl(&partitions_rw_, false); - for (const auto& item : partitions_) { - printf("\n\npartition name : %s\n", item.second->GetPartitionName().c_str()); - item.second->db()->ScanDatabase(type); - } -} - -Status Table::GetPartitionsKeyScanInfo(std::map* infos) { - slash::RWLock rwl(&partitions_rw_, false); - for (const auto& item : partitions_) { - (*infos)[item.first] = item.second->GetKeyScanInfo(); - } - return Status::OK(); -} - -KeyScanInfo Table::GetKeyScanInfo() { - slash::MutexLock lm(&key_scan_protector_); - return key_scan_info_; -} - -void Table::Compact(const blackwidow::DataType& type) { - slash::RWLock rwl(&partitions_rw_, true); - for (const auto& item : partitions_) { - item.second->Compact(type); - } -} - -void Table::DoKeyScan(void *arg) { - BgTaskArg* bg_task_arg = reinterpret_cast(arg); - bg_task_arg->table->RunKeyScan(); - delete bg_task_arg; -} - -void Table::InitKeyScan() { - key_scan_info_.start_time = time(NULL); - char s_time[32]; - int len = strftime(s_time, sizeof(s_time), "%Y-%m-%d %H:%M:%S", localtime(&key_scan_info_.start_time)); - key_scan_info_.s_start_time.assign(s_time, len); - key_scan_info_.duration = -1; // duration -1 mean the task in processing -} - -void Table::LeaveAllPartition() { - slash::RWLock rwl(&partitions_rw_, true); - for (const auto& item : partitions_) { - item.second->Leave(); - } - partitions_.clear(); -} - -std::set Table::GetPartitionIds() { - std::set ids; - slash::RWLock l(&partitions_rw_, false); - for (const auto& item : partitions_) { - ids.insert(item.first); - } - return ids; -} - -std::shared_ptr Table::GetPartitionById(uint32_t partition_id) { - slash::RWLock rwl(&partitions_rw_, false); - auto iter = partitions_.find(partition_id); - return (iter == partitions_.end()) ? NULL : iter->second; -} - -std::shared_ptr Table::GetPartitionByKey(const std::string& key) { - assert(partition_num_ != 0); - uint32_t index = g_pika_cmd_table_manager->DistributeKey(key, partition_num_); - slash::RWLock rwl(&partitions_rw_, false); - auto iter = partitions_.find(index); - return (iter == partitions_.end()) ? NULL : iter->second; -} diff --git a/tools/pika_migrate/src/pika_zset.cc b/tools/pika_migrate/src/pika_zset.cc deleted file mode 100644 index 23e144567e..0000000000 --- a/tools/pika_migrate/src/pika_zset.cc +++ /dev/null @@ -1,946 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include "include/pika_zset.h" - -#include "slash/include/slash_string.h" - -void ZAddCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZAdd); - return; - } - size_t argc = argv_.size(); - if (argc % 2 == 1) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - key_ = argv_[1]; - score_members.clear(); - double score; - size_t index = 2; - for (; index < argc; index += 2) { - if (!slash::string2d(argv_[index].data(), argv_[index].size(), &score)) { - res_.SetRes(CmdRes::kInvalidFloat); - return; - } - score_members.push_back({score, argv_[index + 1]}); - } - return; -} - -void ZAddCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->ZAdd(key_, score_members, &count); - if (s.ok()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZCardCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZCard); - return; - } - key_ = argv_[1]; - return; -} - -void ZCardCmd::Do(std::shared_ptr partition) { - int32_t card = 0; - rocksdb::Status s = partition->db()->ZCard(key_, &card); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(card); - } else { - res_.SetRes(CmdRes::kErrOther, "zcard error"); - } - return; -} - -void ZScanCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZScan); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &cursor_)) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZScan); - return; - } - size_t argc = argv_.size(), index = 3; - while (index < argc) { - std::string opt = argv_[index]; - if (!strcasecmp(opt.data(), "match") - || !strcasecmp(opt.data(), "count")) { - index++; - if (index >= argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(opt.data(), "match")) { - pattern_ = argv_[index]; - } else if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } - if (count_ < 0) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - return; -} - -void ZScanCmd::Do(std::shared_ptr partition) { - int64_t next_cursor = 0; - std::vector score_members; - rocksdb::Status s = partition->db()->ZScan(key_, cursor_, pattern_, count_, &score_members, &next_cursor); - if (s.ok() || s.IsNotFound()) { - res_.AppendContent("*2"); - char buf[32]; - int64_t len = slash::ll2string(buf, sizeof(buf), next_cursor); - res_.AppendStringLen(len); - res_.AppendContent(buf); - - res_.AppendArrayLen(score_members.size() * 2); - for (const auto& score_member : score_members) { - res_.AppendString(score_member.member); - - len = slash::d2string(buf, sizeof(buf), score_member.score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZIncrbyCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZIncrby); - return; - } - key_ = argv_[1]; - if (!slash::string2d(argv_[2].data(), argv_[2].size(), &by_)) { - res_.SetRes(CmdRes::kInvalidFloat); - return; - } - member_ = argv_[3]; - return; -} - -void ZIncrbyCmd::Do(std::shared_ptr partition) { - double score = 0; - rocksdb::Status s = partition->db()->ZIncrby(key_, member_, by_, &score); - if (s.ok()) { - char buf[32]; - int64_t len = slash::d2string(buf, sizeof(buf), score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZsetRangeParentCmd::DoInitial() { - if (argv_.size() == 5 && !strcasecmp(argv_[4].data(), "withscores")) { - is_ws_ = true; - } else if (argv_.size() != 4) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &stop_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - return; -} - -void ZRangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRange); - return; - } - ZsetRangeParentCmd::DoInitial(); -} - -void ZRangeCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZRange(key_, start_, stop_, &score_members); - if (s.ok() || s.IsNotFound()) { - if (is_ws_) { - char buf[32]; - int64_t len; - res_.AppendArrayLen(score_members.size() * 2); - for (const auto& sm : score_members) { - res_.AppendStringLen(sm.member.size()); - res_.AppendContent(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } - } else { - res_.AppendArrayLen(score_members.size()); - for (const auto& sm : score_members) { - res_.AppendStringLen(sm.member.size()); - res_.AppendContent(sm.member); - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZRevrangeCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrange); - return; - } - ZsetRangeParentCmd::DoInitial(); -} - -void ZRevrangeCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZRevrange(key_, start_, stop_, &score_members); - if (s.ok() || s.IsNotFound()) { - if (is_ws_) { - char buf[32]; - int64_t len; - res_.AppendArrayLen(score_members.size() * 2); - for (const auto& sm : score_members) { - res_.AppendStringLen(sm.member.size()); - res_.AppendContent(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } - } else { - res_.AppendArrayLen(score_members.size()); - for (const auto& sm : score_members) { - res_.AppendStringLen(sm.member.size()); - res_.AppendContent(sm.member); - } - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -int32_t DoScoreStrRange(std::string begin_score, std::string end_score, bool *left_close, bool *right_close, double *min_score, double *max_score) { - if (begin_score.size() > 0 && begin_score.at(0) == '(') { - *left_close = false; - begin_score.erase(begin_score.begin()); - } - if (begin_score == "-inf") { - *min_score = blackwidow::ZSET_SCORE_MIN; - } else if (begin_score == "inf" || begin_score == "+inf") { - *min_score = blackwidow::ZSET_SCORE_MAX; - } else if (!slash::string2d(begin_score.data(), begin_score.size(), min_score)) { - return -1; - } - - if (end_score.size() > 0 && end_score.at(0) == '(') { - *right_close = false; - end_score.erase(end_score.begin()); - } - if (end_score == "+inf" || end_score == "inf") { - *max_score = blackwidow::ZSET_SCORE_MAX; - } else if (end_score == "-inf") { - *max_score = blackwidow::ZSET_SCORE_MIN; - } else if (!slash::string2d(end_score.data(), end_score.size(), max_score)) { - return -1; - } - return 0; -} - -static void FitLimit(int64_t &count, int64_t &offset, const int64_t size) { - count = count >= 0 ? count : size; - offset = (offset >= 0 && offset < size) ? offset : size; - count = (offset + count < size) ? count : size - offset; -} - -void ZsetRangebyscoreParentCmd::DoInitial() { - key_ = argv_[1]; - int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); - if (ret == -1) { - res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); - return; - } - size_t argc = argv_.size(); - if (argc < 5) { - return; - } - size_t index = 4; - while (index < argc) { - if (!strcasecmp(argv_[index].data(), "withscores")) { - with_scores_ = true; - } else if (!strcasecmp(argv_[index].data(), "limit")) { - if (index + 3 > argc) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - if (!slash::string2l(argv_[index].data(), argv_[index].size(), &offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - index++; - if (!slash::string2l(argv_[index].data(), argv_[index].size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } -} - -void ZRangebyscoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRangebyscore); - return; - } - ZsetRangebyscoreParentCmd::DoInitial(); -} - -void ZRangebyscoreCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { - res_.AppendContent("*0"); - return; - } - std::vector score_members; - rocksdb::Status s = partition->db()->ZRangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - FitLimit(count_, offset_, score_members.size()); - size_t index = offset_, end = offset_ + count_; - if (with_scores_) { - char buf[32]; - int64_t len; - res_.AppendArrayLen(count_ * 2); - for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); - res_.AppendContent(score_members[index].member); - len = slash::d2string(buf, sizeof(buf), score_members[index].score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } - } else { - res_.AppendArrayLen(count_); - for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); - res_.AppendContent(score_members[index].member); - } - } - return; -} - -void ZRevrangebyscoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrangebyscore); - return; - } - ZsetRangebyscoreParentCmd::DoInitial(); - double tmp_score; - tmp_score = min_score_; - min_score_ = max_score_; - max_score_ = tmp_score; - - bool tmp_close; - tmp_close = left_close_; - left_close_ = right_close_; - right_close_ = tmp_close; -} - -void ZRevrangebyscoreCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { - res_.AppendContent("*0"); - return; - } - std::vector score_members; - rocksdb::Status s = partition->db()->ZRevrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - FitLimit(count_, offset_, score_members.size()); - int64_t index = offset_, end = offset_ + count_; - if (with_scores_) { - char buf[32]; - int64_t len; - res_.AppendArrayLen(count_ * 2); - for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); - res_.AppendContent(score_members[index].member); - len = slash::d2string(buf, sizeof(buf), score_members[index].score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } - } else { - res_.AppendArrayLen(count_); - for (; index < end; index++) { - res_.AppendStringLen(score_members[index].member.size()); - res_.AppendContent(score_members[index].member); - } - } - return; -} - -void ZCountCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZCount); - return; - } - key_ = argv_[1]; - int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); - if (ret == -1) { - res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); - return; - } - return; -} - -void ZCountCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { - res_.AppendContent("*0"); - return; - } - - int32_t count = 0; - rocksdb::Status s = partition->db()->ZCount(key_, min_score_, max_score_, left_close_, right_close_, &count); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZRemCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRem); - return; - } - key_ = argv_[1]; - PikaCmdArgsType::iterator iter = argv_.begin() + 2; - members_.assign(iter, argv_.end()); - return; -} - -void ZRemCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->ZRem(key_, members_, &count); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZsetUIstoreParentCmd::DoInitial() { - dest_key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &num_keys_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (num_keys_ < 1) { - res_.SetRes(CmdRes::kErrOther, "at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE"); - return; - } - int argc = argv_.size(); - if (argc < num_keys_ + 3) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - keys_.assign(argv_.begin() + 3, argv_.begin() + 3 + num_keys_); - weights_.assign(num_keys_, 1); - int index = num_keys_ + 3; - while (index < argc) { - if (!strcasecmp(argv_[index].data(), "weights")) { - index++; - if (argc < index + num_keys_) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - double weight; - int base = index; - for (; index < base + num_keys_; index++) { - if (!slash::string2d(argv_[index].data(), argv_[index].size(), &weight)) { - res_.SetRes(CmdRes::kErrOther, "weight value is not a float"); - return; - } - weights_[index-base] = weight; - } - } else if (!strcasecmp(argv_[index].data(), "aggregate")) { - index++; - if (argc < index + 1) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!strcasecmp(argv_[index].data(), "sum")) { - aggregate_ = blackwidow::SUM; - } else if (!strcasecmp(argv_[index].data(), "min")) { - aggregate_ = blackwidow::MIN; - } else if (!strcasecmp(argv_[index].data(), "max")) { - aggregate_ = blackwidow::MAX; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - index++; - } else { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - } - return; -} - -void ZUnionstoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZUnionstore); - return; - } - ZsetUIstoreParentCmd::DoInitial(); -} - -void ZUnionstoreCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->ZUnionstore(dest_key_, keys_, weights_, aggregate_, &count); - if (s.ok()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZInterstoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZInterstore); - return; - } - ZsetUIstoreParentCmd::DoInitial(); - return; -} - -void ZInterstoreCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->ZInterstore(dest_key_, keys_, weights_, aggregate_, &count); - if (s.ok()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZsetRankParentCmd::DoInitial() { - key_ = argv_[1]; - member_ = argv_[2]; - return; -} - -void ZRankCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRank); - return; - } - ZsetRankParentCmd::DoInitial(); -} - -void ZRankCmd::Do(std::shared_ptr partition) { - int32_t rank = 0; - rocksdb::Status s = partition->db()->ZRank(key_, member_, &rank); - if (s.ok()) { - res_.AppendInteger(rank); - } else if (s.IsNotFound()){ - res_.AppendContent("$-1"); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void ZRevrankCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrank); - return; - } - ZsetRankParentCmd::DoInitial(); -} - -void ZRevrankCmd::Do(std::shared_ptr partition) { - int32_t revrank = 0; - rocksdb::Status s = partition->db()->ZRevrank(key_, member_, &revrank); - if (s.ok()) { - res_.AppendInteger(revrank); - } else if (s.IsNotFound()){ - res_.AppendContent("$-1"); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - -void ZScoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZScore); - return; - } - key_ = argv_[1]; - member_ = argv_[2]; -} - -void ZScoreCmd::Do(std::shared_ptr partition) { - double score = 0; - rocksdb::Status s = partition->db()->ZScore(key_, member_, &score); - if (s.ok()) { - char buf[32]; - int64_t len = slash::d2string(buf, sizeof(buf), score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } else if (s.IsNotFound()) { - res_.AppendContent("$-1"); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -static int32_t DoMemberRange(const std::string &raw_min_member, - const std::string &raw_max_member, - bool *left_close, - bool *right_close, - std::string* min_member, - std::string* max_member) { - if (raw_min_member == "-") { - *min_member = "-"; - } else if (raw_min_member == "+") { - *min_member = "+"; - } else { - if (raw_min_member.size() > 0 && raw_min_member.at(0) == '(') { - *left_close = false; - } else if (raw_min_member.size() > 0 && raw_min_member.at(0) == '[') { - *left_close = true; - } else { - return -1; - } - min_member->assign(raw_min_member.begin() + 1, raw_min_member.end()); - } - - if (raw_max_member == "+") { - *max_member = "+"; - } else if (raw_max_member == "-") { - *max_member = "-"; - } else { - if (raw_max_member.size() > 0 && raw_max_member.at(0) == '(') { - *right_close = false; - } else if (raw_max_member.size() > 0 && raw_max_member.at(0) == '[') { - *right_close = true; - } else { - return -1; - } - max_member->assign(raw_max_member.begin() + 1, raw_max_member.end()); - } - return 0; -} - -void ZsetRangebylexParentCmd::DoInitial() { - key_ = argv_[1]; - int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); - if (ret == -1) { - res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); - return; - } - size_t argc = argv_.size(); - if (argc == 4) { - return; - } else if (argc != 7 || strcasecmp(argv_[4].data(), "limit")) { - res_.SetRes(CmdRes::kSyntaxErr); - return; - } - if (!slash::string2l(argv_[5].data(), argv_[5].size(), &offset_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (!slash::string2l(argv_[6].data(), argv_[6].size(), &count_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } -} - -void ZRangebylexCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRangebylex); - return; - } - ZsetRangebylexParentCmd::DoInitial(); -} - -void ZRangebylexCmd::Do(std::shared_ptr partition) { - if (min_member_ == "+" || max_member_ == "-") { - res_.AppendContent("*0"); - return; - } - std::vector members; - rocksdb::Status s = partition->db()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - FitLimit(count_, offset_, members.size()); - - res_.AppendArrayLen(count_); - size_t index = offset_, end = offset_ + count_; - for (; index < end; index++) { - res_.AppendStringLen(members[index].size()); - res_.AppendContent(members[index]); - } - return; -} - -void ZRevrangebylexCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrangebylex); - return; - } - ZsetRangebylexParentCmd::DoInitial(); - - std::string tmp_s; - tmp_s = min_member_; - min_member_ = max_member_; - max_member_ = tmp_s; - - bool tmp_b; - tmp_b = left_close_; - left_close_ = right_close_; - right_close_ = tmp_b; -} - -void ZRevrangebylexCmd::Do(std::shared_ptr partition) { - if (min_member_ == "+" || max_member_ == "-") { - res_.AppendContent("*0"); - return; - } - std::vector members; - rocksdb::Status s = partition->db()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - FitLimit(count_, offset_, members.size()); - - res_.AppendArrayLen(count_); - int64_t index = members.size() - 1 - offset_, end = index - count_; - for (; index > end; index--) { - res_.AppendStringLen(members[index].size()); - res_.AppendContent(members[index]); - } - return; -} - -void ZLexcountCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZLexcount); - return; - } - key_ = argv_[1]; - int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); - if (ret == -1) { - res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); - return; - } -} - -void ZLexcountCmd::Do(std::shared_ptr partition) { - if (min_member_ == "+" || max_member_ == "-") { - res_.AppendContent(":0"); - return; - } - int32_t count = 0; - rocksdb::Status s = partition->db()->ZLexcount(key_, min_member_, max_member_, left_close_, right_close_, &count); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - res_.AppendInteger(count); - return; -} - -void ZRemrangebyrankCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRemrangebyrank); - return; - } - key_ = argv_[1]; - if (!slash::string2l(argv_[2].data(), argv_[2].size(), &start_rank_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } - if (!slash::string2l(argv_[3].data(), argv_[3].size(), &stop_rank_)) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } -} - -void ZRemrangebyrankCmd::Do(std::shared_ptr partition) { - int32_t count = 0; - rocksdb::Status s = partition->db()->ZRemrangebyrank(key_, start_rank_, stop_rank_, &count); - if (s.ok() || s.IsNotFound()) { - res_.AppendInteger(count); - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } - return; -} - -void ZRemrangebyscoreCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRemrangebyscore); - return; - } - key_ = argv_[1]; - int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); - if (ret == -1) { - res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); - return; - } - return; -} - -void ZRemrangebyscoreCmd::Do(std::shared_ptr partition) { - if (min_score_ == blackwidow::ZSET_SCORE_MAX || max_score_ == blackwidow::ZSET_SCORE_MIN) { - res_.AppendContent(":0"); - return; - } - int32_t count = 0; - rocksdb::Status s = partition->db()->ZRemrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &count); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - res_.AppendInteger(count); - return; -} - -void ZRemrangebylexCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZRemrangebylex); - return; - } - key_ = argv_[1]; - int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); - if (ret == -1) { - res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); - return; - } - return; -} - -void ZRemrangebylexCmd::Do(std::shared_ptr partition) { - if (min_member_ == "+" || max_member_ == "-") { - res_.AppendContent("*0"); - return; - } - int32_t count = 0; - rocksdb::Status s = partition->db()->ZRemrangebylex(key_, min_member_, max_member_, left_close_, right_close_, &count); - if (!s.ok() && !s.IsNotFound()) { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - return; - } - res_.AppendInteger(count); - return; -} - - -void ZPopmaxCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmax); - return; - } - key_ = argv_[1]; - if (argv_.size() == 2) { - count_ = 1; - return; - } - if (!slash::string2ll(argv_[2].data(), argv_[2].size(), (long long*)(&count_))) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } -} - -void ZPopmaxCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZPopMax(key_, count_, &score_members); - if (s.ok() || s.IsNotFound()) { - char buf[32]; - int64_t len; - res_.AppendArrayLen(score_members.size() * 2); - for (const auto& sm : score_members) { - res_.AppendString(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} - - -void ZPopminCmd::DoInitial() { - if (!CheckArg(argv_.size())) { - res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmin); - return; - } - key_ = argv_[1]; - if (argv_.size() == 2) { - count_ = 1; - return; - } - if (!slash::string2ll(argv_[2].data(), argv_[2].size(), (long long*)(&count_))) { - res_.SetRes(CmdRes::kInvalidInt); - return; - } -} - -void ZPopminCmd::Do(std::shared_ptr partition) { - std::vector score_members; - rocksdb::Status s = partition->db()->ZPopMin(key_, count_, &score_members); - if (s.ok() || s.IsNotFound()) { - char buf[32]; - int64_t len; - res_.AppendArrayLen(score_members.size() * 2); - for (const auto& sm : score_members) { - res_.AppendString(sm.member); - len = slash::d2string(buf, sizeof(buf), sm.score); - res_.AppendStringLen(len); - res_.AppendContent(buf); - } - } else { - res_.SetRes(CmdRes::kErrOther, s.ToString()); - } -} diff --git a/tools/pika_migrate/src/redis_sender.cc b/tools/pika_migrate/src/redis_sender.cc deleted file mode 100644 index 74c41eabbd..0000000000 --- a/tools/pika_migrate/src/redis_sender.cc +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - - -#include "include/redis_sender.h" - -#include -#include - -#include - -#include "slash/include/xdebug.h" - -static time_t kCheckDiff = 1; - -RedisSender::RedisSender(int id, std::string ip, int64_t port, std::string password): - id_(id), - cli_(NULL), - rsignal_(&commands_mutex_), - wsignal_(&commands_mutex_), - ip_(ip), - port_(port), - password_(password), - should_exit_(false), - cnt_(0), - elements_(0) { - - last_write_time_ = ::time(NULL); -} - -RedisSender::~RedisSender() { - LOG(INFO) << "RedisSender thread " << id_ << " exit!!!"; -} - -void RedisSender::ConnectRedis() { - while (cli_ == NULL) { - // Connect to redis - cli_ = pink::NewRedisCli(); - cli_->set_connect_timeout(1000); - cli_->set_recv_timeout(10000); - cli_->set_send_timeout(10000); - slash::Status s = cli_->Connect(ip_, port_); - if (!s.ok()) { - LOG(WARNING) << "Can not connect to " << ip_ << ":" << port_ << ", status: " << s.ToString(); - delete cli_; - cli_ = NULL; - sleep(3); - continue; - } else { - // Connect success - - // Authentication - if (!password_.empty()) { - pink::RedisCmdArgsType argv, resp; - std::string cmd; - - argv.push_back("AUTH"); - argv.push_back(password_); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); - - if (s.ok()) { - s = cli_->Recv(&resp); - if (resp[0] == "OK") { - } else { - LOG(FATAL) << "Connect to redis(" << ip_ << ":" << port_ << ") Invalid password"; - cli_->Close(); - delete cli_; - cli_ = NULL; - should_exit_ = true; - return; - } - } else { - LOG(WARNING) << "send auth failed: " << s.ToString(); - cli_->Close(); - delete cli_; - cli_ = NULL; - continue; - } - } else { - // If forget to input password - pink::RedisCmdArgsType argv, resp; - std::string cmd; - - argv.push_back("PING"); - pink::SerializeRedisCommand(argv, &cmd); - slash::Status s = cli_->Send(&cmd); - - if (s.ok()) { - s = cli_->Recv(&resp); - if (s.ok()) { - if (resp[0] == "NOAUTH Authentication required.") { - LOG(FATAL) << "Ping redis(" << ip_ << ":" << port_ << ") NOAUTH Authentication required"; - cli_->Close(); - delete cli_; - cli_ = NULL; - should_exit_ = true; - return; - } - } else { - LOG(WARNING) << s.ToString(); - cli_->Close(); - delete cli_; - cli_ = NULL; - } - } - } - } - } -} - -void RedisSender::Stop() { - set_should_stop(); - should_exit_ = true; - commands_mutex_.Lock(); - rsignal_.Signal(); - commands_mutex_.Unlock(); -} - -void RedisSender::SendRedisCommand(const std::string &command) { - commands_mutex_.Lock(); - if (commands_queue_.size() < 100000) { - commands_queue_.push(command); - rsignal_.Signal(); - commands_mutex_.Unlock(); - return; - } - - while (commands_queue_.size() > 100000) { - wsignal_.Wait(); - } - commands_queue_.push(command); - rsignal_.Signal(); - commands_mutex_.Unlock(); -} - -int RedisSender::SendCommand(std::string &command) { - time_t now = ::time(NULL); - if (kCheckDiff < now - last_write_time_) { - int ret = cli_->CheckAliveness(); - if (ret < 0) { - ConnectRedis(); - } - last_write_time_ = now; - } - - // Send command - int idx = 0; - do { - slash::Status s = cli_->Send(&command); - if (s.ok()) { - return 0; - } - - LOG(WARNING) << "RedisSender " << id_ << "fails to send redis command " << command << ", times: " << idx + 1 << ", error: " << s.ToString(); - - cli_->Close(); - delete cli_; - cli_ = NULL; - ConnectRedis(); - } while(++idx < 3); - - return -1; -} - -void *RedisSender::ThreadMain() { - LOG(INFO) << "Start redis sender " << id_ << " thread..."; - // sleep(15); - int ret = 0; - - ConnectRedis(); - - while (!should_exit_) { - commands_mutex_.Lock(); - while (commands_queue_.size() == 0 && !should_exit_) { - rsignal_.TimedWait(100); - // rsignal_.Wait(); - } - // if (commands_queue_.size() == 0 && should_exit_) { - if (should_exit_) { - commands_mutex_.Unlock(); - break; - } - - if (commands_queue_.size() == 0) { - commands_mutex_.Unlock(); - continue; - } - commands_mutex_.Unlock(); - - // get redis command - std::string command; - commands_mutex_.Lock(); - command = commands_queue_.front(); - // printf("%d, command %s\n", id_, command.c_str()); - elements_++; - commands_queue_.pop(); - wsignal_.Signal(); - commands_mutex_.Unlock(); - ret = SendCommand(command); - if (ret == 0) { - cnt_++; - } - - if (cnt_ >= 200) { - for(; cnt_ > 0; cnt_--) { - cli_->Recv(NULL); - } - } - } - for(; cnt_ > 0; cnt_--) { - cli_->Recv(NULL); - } - - LOG(INFO) << "RedisSender thread " << id_ << " complete"; - delete cli_; - cli_ = NULL; - return NULL; -} - diff --git a/tools/pika_migrate/tests/README.md b/tools/pika_migrate/tests/README.md deleted file mode 100644 index 47b371236f..0000000000 --- a/tools/pika_migrate/tests/README.md +++ /dev/null @@ -1,4 +0,0 @@ -### Pika test - - * 在Pika目录下执行 `./pikatests.sh geo` 测试Pika GEO命令 - * 如果是`unit/type`接口, 例如 SET, 执行 `./pikatests.sh type/set` 测试Pika SET命令 diff --git a/tools/pika_migrate/tests/assets/default.conf b/tools/pika_migrate/tests/assets/default.conf deleted file mode 100644 index c9cb8183f7..0000000000 --- a/tools/pika_migrate/tests/assets/default.conf +++ /dev/null @@ -1,79 +0,0 @@ -# Pika port -port : 9221 -# Thread Number -thread-num : 1 -# Sync Thread Number -sync-thread-num : 6 -# Item count of sync thread queue -sync-buffer-size : 10 -# Pika log path -log-path : ./log/ -# Pika glog level: only INFO and ERROR -loglevel : info -# Pika db path -db-path : ./db/ -# Pika write-buffer-size -write-buffer-size : 268435456 -# Pika timeout -timeout : 60 -# Requirepass -requirepass : -# Masterauth -masterauth : -# Userpass -userpass : -# User Blacklist -userblacklist : -# Dump Prefix -dump-prefix : -# daemonize [yes | no] -#daemonize : yes -# slotmigrate [yes | no] -#slotmigrate : no -# Dump Path -dump-path : ./dump/ -# Expire-dump-days -dump-expire : 0 -# pidfile Path -pidfile : ./pika.pid -# Max Connection -maxclients : 20000 -# the per file size of sst to compact, defalut is 2M -target-file-size-base : 20971520 -# Expire-logs-days -expire-logs-days : 7 -# Expire-logs-nums -expire-logs-nums : 10 -# Root-connection-num -root-connection-num : 2 -# Slowlog-log-slower-than -slowlog-log-slower-than : 10000 -# slave-read-only(yes/no, 1/0) -slave-read-only : 0 -# Pika db sync path -db-sync-path : ./dbsync/ -# db sync speed(MB) max is set to 125MB, min is set to 0, and if below 0 or above 125, the value will be adjust to 125 -db-sync-speed : -1 -# network interface -# network-interface : eth1 -# replication -# slaveof : master-ip:master-port -# CronTask, format: start:end-ratio, like 02-04/60, pika will check to schedule compaction between 2 to 4 o'clock everyday -# if the freesize/disksize > 60% -# compact-cron : - -################### -## Critical Settings -################### -# binlog file size: default is 100M, limited in [1K, 2G] -binlog-file-size : 104857600 -# Compression -compression : snappy -# max-background-flushes: default is 1, limited in [1, 4] -max-background-flushes : 1 -# max-background-compactions: default is 1, limited in [1, 4] -max-background-compactions : 2 -# max-cache-files default is 5000 -max-cache-files : 5000 -# max_bytes_for_level_multiplier: default is 10, you can change it to 5 -max-bytes-for-level-multiplier : 10 diff --git a/tools/pika_migrate/tests/assets/encodings.rdb b/tools/pika_migrate/tests/assets/encodings.rdb deleted file mode 100644 index 9fd9b705d16220065ee117a1c1c094f40fb122f2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 667 zcmbVKu}UOC5UuKJ7oAzb-~v%NHW5S&dS+bpFfmX#Qw_|N?w&>$M|aur5EcU?;j)7> zGV&XY4SF>ZBR^rkz`zf1tzKQwOdP0r-Cfn)uU@~+^|g&HrPRU;knEK1xGIbhsS?(T zOp!5$Ql%uLiRxVU_K~%gGG1r2V@aAV)EAeQf1$=iXe|~B7q#x40{=g8Nhbr35aH0(af04| z31?FkfXdOIL*v>$`m`ZiW?H~$fQQSK0B})18Q{+2^#ErNo(A|lG8gy_c?x0;Mx(`{ zoa#1QiP|F?FVK2I8DyCB=mk$S8nlC&4|~3w8;|#Ox&QtGwHmXU=HND1)gUq}8+2xM zS?Yc@4yO2OwUpuPICQ~2@KI=m_~m`huJS+FRQ_l1RQDc;oztC1%JaPY56LTRm2LPS*34j0q diff --git a/tools/pika_migrate/tests/helpers/bg_complex_data.tcl b/tools/pika_migrate/tests/helpers/bg_complex_data.tcl deleted file mode 100644 index dffd7c6688..0000000000 --- a/tools/pika_migrate/tests/helpers/bg_complex_data.tcl +++ /dev/null @@ -1,10 +0,0 @@ -source tests/support/redis.tcl -source tests/support/util.tcl - -proc bg_complex_data {host port db ops} { - set r [redis $host $port] - $r select $db - createComplexDataset $r $ops -} - -bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3] diff --git a/tools/pika_migrate/tests/helpers/gen_write_load.tcl b/tools/pika_migrate/tests/helpers/gen_write_load.tcl deleted file mode 100644 index 6d1a345166..0000000000 --- a/tools/pika_migrate/tests/helpers/gen_write_load.tcl +++ /dev/null @@ -1,15 +0,0 @@ -source tests/support/redis.tcl - -proc gen_write_load {host port seconds} { - set start_time [clock seconds] - set r [redis $host $port 1] - $r select 9 - while 1 { - $r set [expr rand()] [expr rand()] - if {[clock seconds]-$start_time > $seconds} { - exit 0 - } - } -} - -gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] diff --git a/tools/pika_migrate/tests/instances.tcl b/tools/pika_migrate/tests/instances.tcl deleted file mode 100644 index 426508f33a..0000000000 --- a/tools/pika_migrate/tests/instances.tcl +++ /dev/null @@ -1,407 +0,0 @@ -# Multi-instance test framework. -# This is used in order to test Sentinel and Redis Cluster, and provides -# basic capabilities for spawning and handling N parallel Redis / Sentinel -# instances. -# -# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -package require Tcl 8.5 - -set tcl_precision 17 -source ../support/redis.tcl -source ../support/util.tcl -source ../support/server.tcl -source ../support/test.tcl - -set ::verbose 0 -set ::pause_on_error 0 -set ::simulate_error 0 -set ::sentinel_instances {} -set ::redis_instances {} -set ::sentinel_base_port 20000 -set ::redis_base_port 30000 -set ::pids {} ; # We kill everything at exit -set ::dirs {} ; # We remove all the temp dirs at exit -set ::run_matching {} ; # If non empty, only tests matching pattern are run. - -if {[catch {cd tmp}]} { - puts "tmp directory not found." - puts "Please run this test from the Redis source root." - exit 1 -} - -# Spawn a redis or sentinel instance, depending on 'type'. -proc spawn_instance {type base_port count {conf {}}} { - for {set j 0} {$j < $count} {incr j} { - set port [find_available_port $base_port] - incr base_port - puts "Starting $type #$j at port $port" - - # Create a directory for this instance. - set dirname "${type}_${j}" - lappend ::dirs $dirname - catch {exec rm -rf $dirname} - file mkdir $dirname - - # Write the instance config file. - set cfgfile [file join $dirname $type.conf] - set cfg [open $cfgfile w] - puts $cfg "port $port" - puts $cfg "dir ./$dirname" - puts $cfg "logfile log.txt" - # Add additional config files - foreach directive $conf { - puts $cfg $directive - } - close $cfg - - # Finally exec it and remember the pid for later cleanup. - if {$type eq "redis"} { - set prgname redis-server - } elseif {$type eq "sentinel"} { - set prgname redis-sentinel - } else { - error "Unknown instance type." - } - set pid [exec ../../../src/${prgname} $cfgfile &] - lappend ::pids $pid - - # Check availability - if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$j: ping timeout" - } - - # Push the instance into the right list - set link [redis 127.0.0.1 $port] - $link reconnect 1 - lappend ::${type}_instances [list \ - pid $pid \ - host 127.0.0.1 \ - port $port \ - link $link \ - ] - } -} - -proc cleanup {} { - puts "Cleaning up..." - foreach pid $::pids { - catch {exec kill -9 $pid} - } - foreach dir $::dirs { - catch {exec rm -rf $dir} - } -} - -proc abort_sentinel_test msg { - puts "WARNING: Aborting the test." - puts ">>>>>>>> $msg" - cleanup - exit 1 -} - -proc parse_options {} { - for {set j 0} {$j < [llength $::argv]} {incr j} { - set opt [lindex $::argv $j] - set val [lindex $::argv [expr $j+1]] - if {$opt eq "--single"} { - incr j - set ::run_matching "*${val}*" - } elseif {$opt eq "--pause-on-error"} { - set ::pause_on_error 1 - } elseif {$opt eq "--fail"} { - set ::simulate_error 1 - } elseif {$opt eq "--help"} { - puts "Hello, I'm sentinel.tcl and I run Sentinel unit tests." - puts "\nOptions:" - puts "--single Only runs tests specified by pattern." - puts "--pause-on-error Pause for manual inspection on error." - puts "--fail Simulate a test failure." - puts "--help Shows this help." - exit 0 - } else { - puts "Unknown option $opt" - exit 1 - } - } -} - -# If --pause-on-error option was passed at startup this function is called -# on error in order to give the developer a chance to understand more about -# the error condition while the instances are still running. -proc pause_on_error {} { - puts "" - puts [colorstr yellow "*** Please inspect the error now ***"] - puts "\nType \"continue\" to resume the test, \"help\" for help screen.\n" - while 1 { - puts -nonewline "> " - flush stdout - set line [gets stdin] - set argv [split $line " "] - set cmd [lindex $argv 0] - if {$cmd eq {continue}} { - break - } elseif {$cmd eq {show-redis-logs}} { - set count 10 - if {[lindex $argv 1] ne {}} {set count [lindex $argv 1]} - foreach_redis_id id { - puts "=== REDIS $id ====" - puts [exec tail -$count redis_$id/log.txt] - puts "---------------------\n" - } - } elseif {$cmd eq {show-sentinel-logs}} { - set count 10 - if {[lindex $argv 1] ne {}} {set count [lindex $argv 1]} - foreach_sentinel_id id { - puts "=== SENTINEL $id ====" - puts [exec tail -$count sentinel_$id/log.txt] - puts "---------------------\n" - } - } elseif {$cmd eq {ls}} { - foreach_redis_id id { - puts -nonewline "Redis $id" - set errcode [catch { - set str {} - append str "@[RI $id tcp_port]: " - append str "[RI $id role] " - if {[RI $id role] eq {slave}} { - append str "[RI $id master_host]:[RI $id master_port]" - } - set str - } retval] - if {$errcode} { - puts " -- $retval" - } else { - puts $retval - } - } - foreach_sentinel_id id { - puts -nonewline "Sentinel $id" - set errcode [catch { - set str {} - append str "@[SI $id tcp_port]: " - append str "[join [S $id sentinel get-master-addr-by-name mymaster]]" - set str - } retval] - if {$errcode} { - puts " -- $retval" - } else { - puts $retval - } - } - } elseif {$cmd eq {help}} { - puts "ls List Sentinel and Redis instances." - puts "show-sentinel-logs \[N\] Show latest N lines of logs." - puts "show-redis-logs \[N\] Show latest N lines of logs." - puts "S cmd ... arg Call command in Sentinel ." - puts "R cmd ... arg Call command in Redis ." - puts "SI Show Sentinel INFO ." - puts "RI Show Sentinel INFO ." - puts "continue Resume test." - } else { - set errcode [catch {eval $line} retval] - if {$retval ne {}} {puts "$retval"} - } - } -} - -# We redefine 'test' as for Sentinel we don't use the server-client -# architecture for the test, everything is sequential. -proc test {descr code} { - set ts [clock format [clock seconds] -format %H:%M:%S] - puts -nonewline "$ts> $descr: " - flush stdout - - if {[catch {set retval [uplevel 1 $code]} error]} { - if {[string match "assertion:*" $error]} { - set msg [string range $error 10 end] - puts [colorstr red $msg] - if {$::pause_on_error} pause_on_error - puts "(Jumping to next unit after error)" - return -code continue - } else { - # Re-raise, let handler up the stack take care of this. - error $error $::errorInfo - } - } else { - puts [colorstr green OK] - } -} - -proc run_tests {} { - set tests [lsort [glob ../tests/*]] - foreach test $tests { - if {$::run_matching ne {} && [string match $::run_matching $test] == 0} { - continue - } - if {[file isdirectory $test]} continue - puts [colorstr yellow "Testing unit: [lindex [file split $test] end]"] - source $test - } -} - -# The "S" command is used to interact with the N-th Sentinel. -# The general form is: -# -# S command arg arg arg ... -# -# Example to ping the Sentinel 0 (first instance): S 0 PING -proc S {n args} { - set s [lindex $::sentinel_instances $n] - [dict get $s link] {*}$args -} - -# Like R but to chat with Redis instances. -proc R {n args} { - set r [lindex $::redis_instances $n] - [dict get $r link] {*}$args -} - -proc get_info_field {info field} { - set fl [string length $field] - append field : - foreach line [split $info "\n"] { - set line [string trim $line "\r\n "] - if {[string range $line 0 $fl] eq $field} { - return [string range $line [expr {$fl+1}] end] - } - } - return {} -} - -proc SI {n field} { - get_info_field [S $n info] $field -} - -proc RI {n field} { - get_info_field [R $n info] $field -} - -# Iterate over IDs of sentinel or redis instances. -proc foreach_instance_id {instances idvar code} { - upvar 1 $idvar id - for {set id 0} {$id < [llength $instances]} {incr id} { - set errcode [catch {uplevel 1 $code} result] - if {$errcode == 1} { - error $result $::errorInfo $::errorCode - } elseif {$errcode == 4} { - continue - } elseif {$errcode == 3} { - break - } elseif {$errcode != 0} { - return -code $errcode $result - } - } -} - -proc foreach_sentinel_id {idvar code} { - set errcode [catch {uplevel 1 [list foreach_instance_id $::sentinel_instances $idvar $code]} result] - return -code $errcode $result -} - -proc foreach_redis_id {idvar code} { - set errcode [catch {uplevel 1 [list foreach_instance_id $::redis_instances $idvar $code]} result] - return -code $errcode $result -} - -# Get the specific attribute of the specified instance type, id. -proc get_instance_attrib {type id attrib} { - dict get [lindex [set ::${type}_instances] $id] $attrib -} - -# Set the specific attribute of the specified instance type, id. -proc set_instance_attrib {type id attrib newval} { - set d [lindex [set ::${type}_instances] $id] - dict set d $attrib $newval - lset ::${type}_instances $id $d -} - -# Create a master-slave cluster of the given number of total instances. -# The first instance "0" is the master, all others are configured as -# slaves. -proc create_redis_master_slave_cluster n { - foreach_redis_id id { - if {$id == 0} { - # Our master. - R $id slaveof no one - R $id flushall - } elseif {$id < $n} { - R $id slaveof [get_instance_attrib redis 0 host] \ - [get_instance_attrib redis 0 port] - } else { - # Instances not part of the cluster. - R $id slaveof no one - } - } - # Wait for all the slaves to sync. - wait_for_condition 1000 50 { - [RI 0 connected_slaves] == ($n-1) - } else { - fail "Unable to create a master-slaves cluster." - } -} - -proc get_instance_id_by_port {type port} { - foreach_${type}_id id { - if {[get_instance_attrib $type $id port] == $port} { - return $id - } - } - fail "Instance $type port $port not found." -} - -# Kill an instance of the specified type/id with SIGKILL. -# This function will mark the instance PID as -1 to remember that this instance -# is no longer running and will remove its PID from the list of pids that -# we kill at cleanup. -# -# The instance can be restarted with restart-instance. -proc kill_instance {type id} { - set pid [get_instance_attrib $type $id pid] - if {$pid == -1} { - error "You tried to kill $type $id twice." - } - exec kill -9 $pid - set_instance_attrib $type $id pid -1 - set_instance_attrib $type $id link you_tried_to_talk_with_killed_instance - - # Remove the PID from the list of pids to kill at exit. - set ::pids [lsearch -all -inline -not -exact $::pids $pid] -} - -# Return true of the instance of the specified type/id is killed. -proc instance_is_killed {type id} { - set pid [get_instance_attrib $type $id pid] - expr {$pid == -1} -} - -# Restart an instance previously killed by kill_instance -proc restart_instance {type id} { - set dirname "${type}_${id}" - set cfgfile [file join $dirname $type.conf] - set port [get_instance_attrib $type $id port] - - # Execute the instance with its old setup and append the new pid - # file for cleanup. - if {$type eq "redis"} { - set prgname redis-server - } else { - set prgname redis-sentinel - } - set pid [exec ../../../src/${prgname} $cfgfile &] - set_instance_attrib $type $id pid $pid - lappend ::pids $pid - - # Check that the instance is running - if {[server_is_up 127.0.0.1 $port 100] == 0} { - abort_sentinel_test "Problems starting $type #$id: ping timeout" - } - - # Connect with it with a fresh link - set link [redis 127.0.0.1 $port] - $link reconnect 1 - set_instance_attrib $type $id link $link -} - diff --git a/tools/pika_migrate/tests/integration/aof-race.tcl b/tools/pika_migrate/tests/integration/aof-race.tcl deleted file mode 100644 index 207f207393..0000000000 --- a/tools/pika_migrate/tests/integration/aof-race.tcl +++ /dev/null @@ -1,35 +0,0 @@ -set defaults { appendonly {yes} appendfilename {appendonly.aof} } -set server_path [tmpdir server.aof] -set aof_path "$server_path/appendonly.aof" - -proc start_server_aof {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - start_server [list overrides $config] $code -} - -tags {"aof"} { - # Specific test for a regression where internal buffers were not properly - # cleaned after a child responsible for an AOF rewrite exited. This buffer - # was subsequently appended to the new AOF, resulting in duplicate commands. - start_server_aof [list dir $server_path] { - set client [redis [srv host] [srv port]] - set bench [open "|src/redis-benchmark -q -p [srv port] -c 20 -n 20000 incr foo" "r+"] - after 100 - - # Benchmark should be running by now: start background rewrite - $client bgrewriteaof - - # Read until benchmark pipe reaches EOF - while {[string length [read $bench]] > 0} {} - - # Check contents of foo - assert_equal 20000 [$client get foo] - } - - # Restart server to replay AOF - start_server_aof [list dir $server_path] { - set client [redis [srv host] [srv port]] - assert_equal 20000 [$client get foo] - } -} diff --git a/tools/pika_migrate/tests/integration/aof.tcl b/tools/pika_migrate/tests/integration/aof.tcl deleted file mode 100644 index 7ea70943c6..0000000000 --- a/tools/pika_migrate/tests/integration/aof.tcl +++ /dev/null @@ -1,236 +0,0 @@ -set defaults { appendonly {yes} appendfilename {appendonly.aof} } -set server_path [tmpdir server.aof] -set aof_path "$server_path/appendonly.aof" - -proc append_to_aof {str} { - upvar fp fp - puts -nonewline $fp $str -} - -proc create_aof {code} { - upvar fp fp aof_path aof_path - set fp [open $aof_path w+] - uplevel 1 $code - close $fp -} - -proc start_server_aof {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] - uplevel 1 $code - kill_server $srv -} - -tags {"aof"} { - ## Server can start when aof-load-truncated is set to yes and AOF - ## is truncated, with an incomplete MULTI block. - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [formatCommand multi] - append_to_aof [formatCommand set bar world] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Unfinished MULTI: Server should start if load-truncated is yes" { - assert_equal 1 [is_alive $srv] - } - } - - ## Should also start with truncated AOF without incomplete MULTI block. - create_aof { - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [formatCommand incr foo] - append_to_aof [string range [formatCommand incr foo] 0 end-1] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Short read: Server should start if load-truncated is yes" { - assert_equal 1 [is_alive $srv] - } - - set client [redis [dict get $srv host] [dict get $srv port]] - - test "Truncated AOF loaded: we expect foo to be equal to 5" { - assert {[$client get foo] eq "5"} - } - - test "Append a new command after loading an incomplete AOF" { - $client incr foo - } - } - - # Now the AOF file is expected to be correct - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Short read + command: Server should start" { - assert_equal 1 [is_alive $srv] - } - - set client [redis [dict get $srv host] [dict get $srv port]] - - test "Truncated AOF loaded: we expect foo to be equal to 6 now" { - assert {[$client get foo] eq "6"} - } - } - - ## Test that the server exits when the AOF contains a format error - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof "!!!" - append_to_aof [formatCommand set foo hello] - } - - start_server_aof [list dir $server_path aof-load-truncated yes] { - test "Bad format: Server should have logged an error" { - set pattern "*Bad file format reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test the server doesn't start when the AOF contains an unfinished MULTI - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [formatCommand multi] - append_to_aof [formatCommand set bar world] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Unfinished MULTI: Server should have logged an error" { - set pattern "*Unexpected end of file reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test that the server exits when the AOF contains a short read - create_aof { - append_to_aof [formatCommand set foo hello] - append_to_aof [string range [formatCommand set bar world] 0 end-1] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Short read: Server should have logged an error" { - set pattern "*Unexpected end of file reading the append only file*" - set retry 10 - while {$retry} { - set result [exec tail -n1 < [dict get $srv stdout]] - if {[string match $pattern $result]} { - break - } - incr retry -1 - after 1000 - } - if {$retry == 0} { - error "assertion:expected error not found on config file" - } - } - } - - ## Test that redis-check-aof indeed sees this AOF is not valid - test "Short read: Utility should confirm the AOF is not valid" { - catch { - exec src/redis-check-aof $aof_path - } result - assert_match "*not valid*" $result - } - - test "Short read: Utility should be able to fix the AOF" { - set result [exec src/redis-check-aof --fix $aof_path << "y\n"] - assert_match "*Successfully truncated AOF*" $result - } - - ## Test that the server can be started using the truncated AOF - start_server_aof [list dir $server_path aof-load-truncated no] { - test "Fixed AOF: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "Fixed AOF: Keyspace should contain values that were parseable" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal "hello" [$client get foo] - assert_equal "" [$client get bar] - } - } - - ## Test that SPOP (that modifies the client's argc/argv) is correctly free'd - create_aof { - append_to_aof [formatCommand sadd set foo] - append_to_aof [formatCommand sadd set bar] - append_to_aof [formatCommand spop set] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "AOF+SPOP: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "AOF+SPOP: Set should have 1 member" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal 1 [$client scard set] - } - } - - ## Test that EXPIREAT is loaded correctly - create_aof { - append_to_aof [formatCommand rpush list foo] - append_to_aof [formatCommand expireat list 1000] - append_to_aof [formatCommand rpush list bar] - } - - start_server_aof [list dir $server_path aof-load-truncated no] { - test "AOF+EXPIRE: Server should have been started" { - assert_equal 1 [is_alive $srv] - } - - test "AOF+EXPIRE: List should be empty" { - set client [redis [dict get $srv host] [dict get $srv port]] - wait_for_condition 50 100 { - [catch {$client ping} e] == 0 - } else { - fail "Loading DB is taking too much time." - } - assert_equal 0 [$client llen list] - } - } - - start_server {overrides {appendonly {yes} appendfilename {appendonly.aof}}} { - test {Redis should not try to convert DEL into EXPIREAT for EXPIRE -1} { - r set x 10 - r expire x -1 - } - } -} diff --git a/tools/pika_migrate/tests/integration/convert-zipmap-hash-on-load.tcl b/tools/pika_migrate/tests/integration/convert-zipmap-hash-on-load.tcl deleted file mode 100644 index cf3577f284..0000000000 --- a/tools/pika_migrate/tests/integration/convert-zipmap-hash-on-load.tcl +++ /dev/null @@ -1,35 +0,0 @@ -# Copy RDB with zipmap encoded hash to server path -set server_path [tmpdir "server.convert-zipmap-hash-on-load"] - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb"]] { - test "RDB load zipmap hash: converts to ziplist" { - r select 0 - - assert_match "*ziplist*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-entries" 1]] { - test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded" { - r select 0 - - assert_match "*hashtable*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} - -exec cp -f tests/assets/hash-zipmap.rdb $server_path -start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-value" 1]] { - test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded" { - r select 0 - - assert_match "*hashtable*" [r debug object hash] - assert_equal 2 [r hlen hash] - assert_match {v1 v2} [r hmget hash f1 f2] - } -} diff --git a/tools/pika_migrate/tests/integration/rdb.tcl b/tools/pika_migrate/tests/integration/rdb.tcl deleted file mode 100644 index 71876a6edc..0000000000 --- a/tools/pika_migrate/tests/integration/rdb.tcl +++ /dev/null @@ -1,98 +0,0 @@ -set server_path [tmpdir "server.rdb-encoding-test"] - -# Copy RDB with different encodings in server path -exec cp tests/assets/encodings.rdb $server_path - -start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] { - test "RDB encoding loading test" { - r select 0 - csvdump r - } {"compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" -"hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000", -"hash_zipped","hash","a","1","b","2","c","3", -"list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000", -"list_zipped","list","1","2","3","a","b","c","100000","6000000000", -"number","string","10" -"set","set","1","100000","2","3","6000000000","a","b","c", -"set_zipped_1","set","1","2","3","4", -"set_zipped_2","set","100000","200000","300000","400000", -"set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000", -"string","string","Hello World" -"zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000", -"zset_zipped","zset","a","1","b","2","c","3", -} -} - -set server_path [tmpdir "server.rdb-startup-test"] - -start_server [list overrides [list "dir" $server_path]] { - test {Server started empty with non-existing RDB file} { - r debug digest - } {0000000000000000000000000000000000000000} - # Save an RDB file, needed for the next test. - r save -} - -start_server [list overrides [list "dir" $server_path]] { - test {Server started empty with empty RDB file} { - r debug digest - } {0000000000000000000000000000000000000000} -} - -# Helper function to start a server and kill it, just to check the error -# logged. -set defaults {} -proc start_server_and_kill_it {overrides code} { - upvar defaults defaults srv srv server_path server_path - set config [concat $defaults $overrides] - set srv [start_server [list overrides $config]] - uplevel 1 $code - kill_server $srv -} - -# Make the RDB file unreadable -file attributes [file join $server_path dump.rdb] -permissions 0222 - -# Detect root account (it is able to read the file even with 002 perm) -set isroot 0 -catch { - open [file join $server_path dump.rdb] - set isroot 1 -} - -# Now make sure the server aborted with an error -if {!$isroot} { - start_server_and_kill_it [list "dir" $server_path] { - test {Server should not start if RDB file can't be open} { - wait_for_condition 50 100 { - [string match {*Fatal error loading*} \ - [exec tail -n1 < [dict get $srv stdout]]] - } else { - fail "Server started even if RDB was unreadable!" - } - } - } -} - -# Fix permissions of the RDB file. -file attributes [file join $server_path dump.rdb] -permissions 0666 - -# Corrupt its CRC64 checksum. -set filesize [file size [file join $server_path dump.rdb]] -set fd [open [file join $server_path dump.rdb] r+] -fconfigure $fd -translation binary -seek $fd -8 end -puts -nonewline $fd "foobar00"; # Corrupt the checksum -close $fd - -# Now make sure the server aborted with an error -start_server_and_kill_it [list "dir" $server_path] { - test {Server should not start if RDB is corrupted} { - wait_for_condition 50 100 { - [string match {*RDB checksum*} \ - [exec tail -n1 < [dict get $srv stdout]]] - } else { - fail "Server started even if RDB was corrupted!" - } - } -} diff --git a/tools/pika_migrate/tests/integration/redis-cli.tcl b/tools/pika_migrate/tests/integration/redis-cli.tcl deleted file mode 100644 index 40e4222e3e..0000000000 --- a/tools/pika_migrate/tests/integration/redis-cli.tcl +++ /dev/null @@ -1,208 +0,0 @@ -start_server {tags {"cli"}} { - proc open_cli {} { - set ::env(TERM) dumb - set fd [open [format "|src/redis-cli -p %d -n 9" [srv port]] "r+"] - fconfigure $fd -buffering none - fconfigure $fd -blocking false - fconfigure $fd -translation binary - assert_equal "redis> " [read_cli $fd] - set _ $fd - } - - proc close_cli {fd} { - close $fd - } - - proc read_cli {fd} { - set buf [read $fd] - while {[string length $buf] == 0} { - # wait some time and try again - after 10 - set buf [read $fd] - } - set _ $buf - } - - proc write_cli {fd buf} { - puts $fd $buf - flush $fd - } - - # Helpers to run tests in interactive mode - proc run_command {fd cmd} { - write_cli $fd $cmd - set lines [split [read_cli $fd] "\n"] - assert_equal "redis> " [lindex $lines end] - join [lrange $lines 0 end-1] "\n" - } - - proc test_interactive_cli {name code} { - set ::env(FAKETTY) 1 - set fd [open_cli] - test "Interactive CLI: $name" $code - close_cli $fd - unset ::env(FAKETTY) - } - - # Helpers to run tests where stdout is not a tty - proc write_tmpfile {contents} { - set tmp [tmpfile "cli"] - set tmpfd [open $tmp "w"] - puts -nonewline $tmpfd $contents - close $tmpfd - set _ $tmp - } - - proc _run_cli {opts args} { - set cmd [format "src/redis-cli -p %d -n 9 $args" [srv port]] - foreach {key value} $opts { - if {$key eq "pipe"} { - set cmd "sh -c \"$value | $cmd\"" - } - if {$key eq "path"} { - set cmd "$cmd < $value" - } - } - - set fd [open "|$cmd" "r"] - fconfigure $fd -buffering none - fconfigure $fd -translation binary - set resp [read $fd 1048576] - close $fd - set _ $resp - } - - proc run_cli {args} { - _run_cli {} {*}$args - } - - proc run_cli_with_input_pipe {cmd args} { - _run_cli [list pipe $cmd] {*}$args - } - - proc run_cli_with_input_file {path args} { - _run_cli [list path $path] {*}$args - } - - proc test_nontty_cli {name code} { - test "Non-interactive non-TTY CLI: $name" $code - } - - # Helpers to run tests where stdout is a tty (fake it) - proc test_tty_cli {name code} { - set ::env(FAKETTY) 1 - test "Non-interactive TTY CLI: $name" $code - unset ::env(FAKETTY) - } - - test_interactive_cli "INFO response should be printed raw" { - set lines [split [run_command $fd info] "\n"] - foreach line $lines { - assert [regexp {^[a-z0-9_]+:[a-z0-9_]+} $line] - } - } - - test_interactive_cli "Status reply" { - assert_equal "OK" [run_command $fd "set key foo"] - } - - test_interactive_cli "Integer reply" { - assert_equal "(integer) 1" [run_command $fd "incr counter"] - } - - test_interactive_cli "Bulk reply" { - r set key foo - assert_equal "\"foo\"" [run_command $fd "get key"] - } - - test_interactive_cli "Multi-bulk reply" { - r rpush list foo - r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"" [run_command $fd "lrange list 0 -1"] - } - - test_interactive_cli "Parsing quotes" { - assert_equal "OK" [run_command $fd "set key \"bar\""] - assert_equal "bar" [r get key] - assert_equal "OK" [run_command $fd "set key \" bar \""] - assert_equal " bar " [r get key] - assert_equal "OK" [run_command $fd "set key \"\\\"bar\\\"\""] - assert_equal "\"bar\"" [r get key] - assert_equal "OK" [run_command $fd "set key \"\tbar\t\""] - assert_equal "\tbar\t" [r get key] - - # invalid quotation - assert_equal "Invalid argument(s)" [run_command $fd "get \"\"key"] - assert_equal "Invalid argument(s)" [run_command $fd "get \"key\"x"] - - # quotes after the argument are weird, but should be allowed - assert_equal "OK" [run_command $fd "set key\"\" bar"] - assert_equal "bar" [r get key] - } - - test_tty_cli "Status reply" { - assert_equal "OK\n" [run_cli set key bar] - assert_equal "bar" [r get key] - } - - test_tty_cli "Integer reply" { - r del counter - assert_equal "(integer) 1\n" [run_cli incr counter] - } - - test_tty_cli "Bulk reply" { - r set key "tab\tnewline\n" - assert_equal "\"tab\\tnewline\\n\"\n" [run_cli get key] - } - - test_tty_cli "Multi-bulk reply" { - r del list - r rpush list foo - r rpush list bar - assert_equal "1. \"foo\"\n2. \"bar\"\n" [run_cli lrange list 0 -1] - } - - test_tty_cli "Read last argument from pipe" { - assert_equal "OK\n" [run_cli_with_input_pipe "echo foo" set key] - assert_equal "foo\n" [r get key] - } - - test_tty_cli "Read last argument from file" { - set tmpfile [write_tmpfile "from file"] - assert_equal "OK\n" [run_cli_with_input_file $tmpfile set key] - assert_equal "from file" [r get key] - } - - test_nontty_cli "Status reply" { - assert_equal "OK" [run_cli set key bar] - assert_equal "bar" [r get key] - } - - test_nontty_cli "Integer reply" { - r del counter - assert_equal "1" [run_cli incr counter] - } - - test_nontty_cli "Bulk reply" { - r set key "tab\tnewline\n" - assert_equal "tab\tnewline\n" [run_cli get key] - } - - test_nontty_cli "Multi-bulk reply" { - r del list - r rpush list foo - r rpush list bar - assert_equal "foo\nbar" [run_cli lrange list 0 -1] - } - - test_nontty_cli "Read last argument from pipe" { - assert_equal "OK" [run_cli_with_input_pipe "echo foo" set key] - assert_equal "foo\n" [r get key] - } - - test_nontty_cli "Read last argument from file" { - set tmpfile [write_tmpfile "from file"] - assert_equal "OK" [run_cli_with_input_file $tmpfile set key] - assert_equal "from file" [r get key] - } -} diff --git a/tools/pika_migrate/tests/integration/replication-2.tcl b/tools/pika_migrate/tests/integration/replication-2.tcl deleted file mode 100644 index 9446e5cd91..0000000000 --- a/tools/pika_migrate/tests/integration/replication-2.tcl +++ /dev/null @@ -1,87 +0,0 @@ -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - after 1000 - s -1 role - } {slave} - - test {If min-slaves-to-write is honored, write is accepted} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r set foo 12345 - wait_for_condition 50 100 { - [r -1 get foo] eq {12345} - } else { - fail "Write did not reached slave" - } - } - - test {No write if min-slaves-to-write is < attached slaves} { - r config set min-slaves-to-write 2 - r config set min-slaves-max-lag 10 - catch {r set foo 12345} err - set err - } {NOREPLICAS*} - - test {If min-slaves-to-write is honored, write is accepted (again)} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r set foo 12345 - wait_for_condition 50 100 { - [r -1 get foo] eq {12345} - } else { - fail "Write did not reached slave" - } - } - - test {No write if min-slaves-max-lag is > of the slave lag} { - r -1 deferred 1 - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 2 - r -1 debug sleep 6 - assert {[r set foo 12345] eq {OK}} - after 4000 - catch {r set foo 12345} err - assert {[r -1 read] eq {OK}} - r -1 deferred 0 - set err - } {NOREPLICAS*} - - test {min-slaves-to-write is ignored by slaves} { - r config set min-slaves-to-write 1 - r config set min-slaves-max-lag 10 - r -1 config set min-slaves-to-write 1 - r -1 config set min-slaves-max-lag 10 - r set foo aaabbb - wait_for_condition 50 100 { - [r -1 get foo] eq {aaabbb} - } else { - fail "Write did not reached slave" - } - } - - # Fix parameters for the next test to work - r config set min-slaves-to-write 0 - r -1 config set min-slaves-to-write 0 - r flushall - - test {MASTER and SLAVE dataset should be identical after complex ops} { - createComplexDataset r 10000 - after 500 - if {[r debug digest] ne [r -1 debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} diff --git a/tools/pika_migrate/tests/integration/replication-3.tcl b/tools/pika_migrate/tests/integration/replication-3.tcl deleted file mode 100644 index 0fcbad45b0..0000000000 --- a/tools/pika_migrate/tests/integration/replication-3.tcl +++ /dev/null @@ -1,101 +0,0 @@ -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - if {$::accurate} {set numops 50000} else {set numops 5000} - - test {MASTER and SLAVE consistency with expire} { - createComplexDataset r $numops useexpire - after 4000 ;# Make sure everything expired before taking the digest - r keys * ;# Force DEL syntesizing to slave - after 1000 ;# Wait another second. Now everything should be fine. - if {[r debug digest] ne [r -1 debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} - -start_server {tags {"repl"}} { - start_server {} { - test {First server should have role slave after SLAVEOF} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - set numops 20000 ;# Enough to trigger the Script Cache LRU eviction. - - # While we are at it, enable AOF to test it will be consistent as well - # after the test. - r config set appendonly yes - - test {MASTER and SLAVE consistency with EVALSHA replication} { - array set oldsha {} - for {set j 0} {$j < $numops} {incr j} { - set key "key:$j" - # Make sure to create scripts that have different SHA1s - set script "return redis.call('incr','$key')" - set sha1 [r eval "return redis.sha1hex(\"$script\")" 0] - set oldsha($j) $sha1 - r eval $script 0 - set res [r evalsha $sha1 0] - assert {$res == 2} - # Additionally call one of the old scripts as well, at random. - set res [r evalsha $oldsha([randomInt $j]) 0] - assert {$res > 2} - - # Trigger an AOF rewrite while we are half-way, this also - # forces the flush of the script cache, and we will cover - # more code as a result. - if {$j == $numops / 2} { - catch {r bgrewriteaof} - } - } - - wait_for_condition 50 100 { - [r dbsize] == $numops && - [r -1 dbsize] == $numops && - [r debug digest] eq [r -1 debug digest] - } else { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - - } - - set old_digest [r debug digest] - r config set appendonly no - r debug loadaof - set new_digest [r debug digest] - assert {$old_digest eq $new_digest} - } - } -} diff --git a/tools/pika_migrate/tests/integration/replication-4.tcl b/tools/pika_migrate/tests/integration/replication-4.tcl deleted file mode 100644 index 6db9ffe2bc..0000000000 --- a/tools/pika_migrate/tests/integration/replication-4.tcl +++ /dev/null @@ -1,136 +0,0 @@ -proc start_bg_complex_data {host port db ops} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & -} - -proc stop_bg_complex_data {handle} { - catch {exec /bin/kill -9 $handle} -} - -start_server {tags {"repl"}} { - start_server {} { - - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] - set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] - set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - after 1000 - s 0 role - } {slave} - - test {Test replication with parallel clients writing in differnet DBs} { - after 5000 - stop_bg_complex_data $load_handle0 - stop_bg_complex_data $load_handle1 - stop_bg_complex_data $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - } - } -} - -start_server {tags {"repl"}} { - start_server {} { - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [s 0 master_link_status] eq {up} - } else { - fail "Replication not started." - } - } - - test {With min-slaves-to-write (1,3): master should be writable} { - $master config set min-slaves-max-lag 3 - $master config set min-slaves-to-write 1 - $master set foo bar - } {OK} - - test {With min-slaves-to-write (2,3): master should not be writable} { - $master config set min-slaves-max-lag 3 - $master config set min-slaves-to-write 2 - catch {$master set foo bar} e - set e - } {NOREPLICAS*} - - test {With min-slaves-to-write: master not writable with lagged slave} { - $master config set min-slaves-max-lag 2 - $master config set min-slaves-to-write 1 - assert {[$master set foo bar] eq {OK}} - $slave deferred 1 - $slave debug sleep 6 - after 4000 - catch {$master set foo bar} e - set e - } {NOREPLICAS*} - } -} - -start_server {tags {"repl"}} { - start_server {} { - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - test {First server should have role slave after SLAVEOF} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [s 0 role] eq {slave} - } else { - fail "Replication not started." - } - } - - test {Replication: commands with many arguments (issue #1221)} { - # We now issue large MSET commands, that may trigger a specific - # class of bugs, see issue #1221. - for {set j 0} {$j < 100} {incr j} { - set cmd [list mset] - for {set x 0} {$x < 1000} {incr x} { - lappend cmd [randomKey] [randomValue] - } - $master {*}$cmd - } - - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - } - } -} diff --git a/tools/pika_migrate/tests/integration/replication-psync.tcl b/tools/pika_migrate/tests/integration/replication-psync.tcl deleted file mode 100644 index f131dafe31..0000000000 --- a/tools/pika_migrate/tests/integration/replication-psync.tcl +++ /dev/null @@ -1,115 +0,0 @@ -proc start_bg_complex_data {host port db ops} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops & -} - -proc stop_bg_complex_data {handle} { - catch {exec /bin/kill -9 $handle} -} - -# Creates a master-slave pair and breaks the link continuously to force -# partial resyncs attempts, all this while flooding the master with -# write queries. -# -# You can specifiy backlog size, ttl, delay before reconnection, test duration -# in seconds, and an additional condition to verify at the end. -proc test_psync {descr duration backlog_size backlog_ttl delay cond} { - start_server {tags {"repl"}} { - start_server {} { - - set master [srv -1 client] - set master_host [srv -1 host] - set master_port [srv -1 port] - set slave [srv 0 client] - - $master config set repl-backlog-size $backlog_size - $master config set repl-backlog-ttl $backlog_ttl - - set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000] - set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000] - set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000] - - test {Slave should be able to synchronize with the master} { - $slave slaveof $master_host $master_port - wait_for_condition 50 100 { - [lindex [r role] 0] eq {slave} && - [lindex [r role] 3] eq {connected} - } else { - fail "Replication not started." - } - } - - # Check that the background clients are actually writing. - test {Detect write load to master} { - wait_for_condition 50 100 { - [$master dbsize] > 100 - } else { - fail "Can't detect write load from background clients." - } - } - - test "Test replication partial resync: $descr" { - # Now while the clients are writing data, break the maste-slave - # link multiple times. - for {set j 0} {$j < $duration*10} {incr j} { - after 100 - # catch {puts "MASTER [$master dbsize] keys, SLAVE [$slave dbsize] keys"} - - if {($j % 20) == 0} { - catch { - if {$delay} { - $slave multi - $slave client kill $master_host:$master_port - $slave debug sleep $delay - $slave exec - } else { - $slave client kill $master_host:$master_port - } - } - } - } - stop_bg_complex_data $load_handle0 - stop_bg_complex_data $load_handle1 - stop_bg_complex_data $load_handle2 - set retry 10 - while {$retry && ([$master debug digest] ne [$slave debug digest])}\ - { - after 1000 - incr retry -1 - } - assert {[$master dbsize] > 0} - - if {[$master debug digest] ne [$slave debug digest]} { - set csv1 [csvdump r] - set csv2 [csvdump {r -1}] - set fd [open /tmp/repldump1.txt w] - puts -nonewline $fd $csv1 - close $fd - set fd [open /tmp/repldump2.txt w] - puts -nonewline $fd $csv2 - close $fd - puts "Master - Slave inconsistency" - puts "Run diff -u against /tmp/repldump*.txt for more info" - } - assert_equal [r debug digest] [r -1 debug digest] - eval $cond - } - } - } -} - -test_psync {ok psync} 6 1000000 3600 0 { - assert {[s -1 sync_partial_ok] > 0} -} - -test_psync {no backlog} 6 100 3600 0.5 { - assert {[s -1 sync_partial_err] > 0} -} - -test_psync {ok after delay} 3 100000000 3600 3 { - assert {[s -1 sync_partial_ok] > 0} -} - -test_psync {backlog expired} 3 100000000 1 3 { - assert {[s -1 sync_partial_err] > 0} -} diff --git a/tools/pika_migrate/tests/integration/replication.tcl b/tools/pika_migrate/tests/integration/replication.tcl deleted file mode 100644 index bb907eba8e..0000000000 --- a/tools/pika_migrate/tests/integration/replication.tcl +++ /dev/null @@ -1,215 +0,0 @@ -start_server {tags {"repl"}} { - set A [srv 0 client] - set A_host [srv 0 host] - set A_port [srv 0 port] - start_server {} { - set B [srv 0 client] - set B_host [srv 0 host] - set B_port [srv 0 port] - - test {Set instance A as slave of B} { - $A slaveof $B_host $B_port - wait_for_condition 50 100 { - [lindex [$A role] 0] eq {slave} && - [string match {*master_link_status:up*} [$A info replication]] - } else { - fail "Can't turn the instance into a slave" - } - } - - test {BRPOPLPUSH replication, when blocking against empty list} { - set rd [redis_deferring_client] - $rd brpoplpush a b 5 - r lpush a foo - wait_for_condition 50 100 { - [$A debug digest] eq [$B debug digest] - } else { - fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" - } - } - - test {BRPOPLPUSH replication, list exists} { - set rd [redis_deferring_client] - r lpush c 1 - r lpush c 2 - r lpush c 3 - $rd brpoplpush c d 5 - after 1000 - assert_equal [$A debug digest] [$B debug digest] - } - - test {BLPOP followed by role change, issue #2473} { - set rd [redis_deferring_client] - $rd blpop foo 0 ; # Block while B is a master - - # Turn B into master of A - $A slaveof no one - $B slaveof $A_host $A_port - wait_for_condition 50 100 { - [lindex [$B role] 0] eq {slave} && - [string match {*master_link_status:up*} [$B info replication]] - } else { - fail "Can't turn the instance into a slave" - } - - # Push elements into the "foo" list of the new slave. - # If the client is still attached to the instance, we'll get - # a desync between the two instances. - $A rpush foo a b c - after 100 - - wait_for_condition 50 100 { - [$A debug digest] eq [$B debug digest] && - [$A lrange foo 0 -1] eq {a b c} && - [$B lrange foo 0 -1] eq {a b c} - } else { - fail "Master and slave have different digest: [$A debug digest] VS [$B debug digest]" - } - } - } -} - -start_server {tags {"repl"}} { - r set mykey foo - - start_server {} { - test {Second server should have role master at first} { - s role - } {master} - - test {SLAVEOF should start with link status "down"} { - r slaveof [srv -1 host] [srv -1 port] - s master_link_status - } {down} - - test {The role should immediately be changed to "slave"} { - s role - } {slave} - - wait_for_sync r - test {Sync should have transferred keys from master} { - r get mykey - } {foo} - - test {The link status should be up} { - s master_link_status - } {up} - - test {SET on the master should immediately propagate} { - r -1 set mykey bar - - wait_for_condition 500 100 { - [r 0 get mykey] eq {bar} - } else { - fail "SET on master did not propagated on slave" - } - } - - test {FLUSHALL should replicate} { - r -1 flushall - if {$::valgrind} {after 2000} - list [r -1 dbsize] [r 0 dbsize] - } {0 0} - - test {ROLE in master reports master with a slave} { - set res [r -1 role] - lassign $res role offset slaves - assert {$role eq {master}} - assert {$offset > 0} - assert {[llength $slaves] == 1} - lassign [lindex $slaves 0] master_host master_port slave_offset - assert {$slave_offset <= $offset} - } - - test {ROLE in slave reports slave in connected state} { - set res [r role] - lassign $res role master_host master_port slave_state slave_offset - assert {$role eq {slave}} - assert {$slave_state eq {connected}} - } - } -} - -foreach dl {no yes} { - start_server {tags {"repl"}} { - set master [srv 0 client] - $master config set repl-diskless-sync $dl - set master_host [srv 0 host] - set master_port [srv 0 port] - set slaves {} - set load_handle0 [start_write_load $master_host $master_port 3] - set load_handle1 [start_write_load $master_host $master_port 5] - set load_handle2 [start_write_load $master_host $master_port 20] - set load_handle3 [start_write_load $master_host $master_port 8] - set load_handle4 [start_write_load $master_host $master_port 4] - start_server {} { - lappend slaves [srv 0 client] - start_server {} { - lappend slaves [srv 0 client] - start_server {} { - lappend slaves [srv 0 client] - test "Connect multiple slaves at the same time (issue #141), diskless=$dl" { - # Send SLAVEOF commands to slaves - [lindex $slaves 0] slaveof $master_host $master_port - [lindex $slaves 1] slaveof $master_host $master_port - [lindex $slaves 2] slaveof $master_host $master_port - - # Wait for all the three slaves to reach the "online" - # state from the POV of the master. - set retry 500 - while {$retry} { - set info [r -3 info] - if {[string match {*slave0:*state=online*slave1:*state=online*slave2:*state=online*} $info]} { - break - } else { - incr retry -1 - after 100 - } - } - if {$retry == 0} { - error "assertion:Slaves not correctly synchronized" - } - - # Wait that slaves acknowledge they are online so - # we are sure that DBSIZE and DEBUG DIGEST will not - # fail because of timing issues. - wait_for_condition 500 100 { - [lindex [[lindex $slaves 0] role] 3] eq {connected} && - [lindex [[lindex $slaves 1] role] 3] eq {connected} && - [lindex [[lindex $slaves 2] role] 3] eq {connected} - } else { - fail "Slaves still not connected after some time" - } - - # Stop the write load - stop_write_load $load_handle0 - stop_write_load $load_handle1 - stop_write_load $load_handle2 - stop_write_load $load_handle3 - stop_write_load $load_handle4 - - # Make sure that slaves and master have same - # number of keys - wait_for_condition 500 100 { - [$master dbsize] == [[lindex $slaves 0] dbsize] && - [$master dbsize] == [[lindex $slaves 1] dbsize] && - [$master dbsize] == [[lindex $slaves 2] dbsize] - } else { - fail "Different number of keys between masted and slave after too long time." - } - - # Check digests - set digest [$master debug digest] - set digest0 [[lindex $slaves 0] debug digest] - set digest1 [[lindex $slaves 1] debug digest] - set digest2 [[lindex $slaves 2] debug digest] - assert {$digest ne 0000000000000000000000000000000000000000} - assert {$digest eq $digest0} - assert {$digest eq $digest1} - assert {$digest eq $digest2} - } - } - } - } - } -} diff --git a/tools/pika_migrate/tests/sentinel/run.tcl b/tools/pika_migrate/tests/sentinel/run.tcl deleted file mode 100644 index f330299599..0000000000 --- a/tools/pika_migrate/tests/sentinel/run.tcl +++ /dev/null @@ -1,22 +0,0 @@ -# Sentinel test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -cd tests/sentinel -source ../instances.tcl - -set ::instances_count 5 ; # How many instances we use at max. - -proc main {} { - parse_options - spawn_instance sentinel $::sentinel_base_port $::instances_count - spawn_instance redis $::redis_base_port $::instances_count - run_tests - cleanup -} - -if {[catch main e]} { - puts $::errorInfo - cleanup - exit 1 -} diff --git a/tools/pika_migrate/tests/sentinel/tests/00-base.tcl b/tools/pika_migrate/tests/sentinel/tests/00-base.tcl deleted file mode 100644 index a79d0c371c..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/00-base.tcl +++ /dev/null @@ -1,126 +0,0 @@ -# Check the basic monitoring and failover capabilities. - -source "../tests/includes/init-tests.tcl" - -if {$::simulate_error} { - test "This test will fail" { - fail "Simulated error" - } -} - -test "Basic failover works if the master is down" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} - -test "All the other slaves now point to the new master" { - foreach_redis_id id { - if {$id != $master_id && $id != 0} { - wait_for_condition 1000 50 { - [RI $id master_port] == [lindex $addr 1] - } else { - fail "Redis ID $id not configured to replicate with new master" - } - } - } -} - -test "The old master eventually gets reconfigured as a slave" { - wait_for_condition 1000 50 { - [RI 0 master_port] == [lindex $addr 1] - } else { - fail "Old master not reconfigured as slave of new master" - } -} - -test "ODOWN is not possible without N (quorum) Sentinels reports" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum [expr $sentinels+1] - } - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - - # Make sure failover did not happened. - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - restart_instance redis $master_id -} - -test "Failover is not possible without majority agreement" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $quorum - } - - # Crash majority of sentinels - for {set id 0} {$id < $quorum} {incr id} { - kill_instance sentinel $id - } - - # Kill the current master - kill_instance redis $master_id - - # Make sure failover did not happened. - set addr [S $quorum SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - restart_instance redis $master_id - - # Cleanup: restart Sentinels to monitor the master. - for {set id 0} {$id < $quorum} {incr id} { - restart_instance sentinel $id - } -} - -test "Failover works if we configure for absolute agreement" { - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $sentinels - } - - # Wait for Sentinels to monitor the master again - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] info-refresh] < 100000 - } else { - fail "At least one Sentinel is not monitoring the master" - } - } - - kill_instance redis $master_id - - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] - - # Set the min ODOWN agreement back to strict majority. - foreach_sentinel_id id { - S $id SENTINEL SET mymaster quorum $quorum - } -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} diff --git a/tools/pika_migrate/tests/sentinel/tests/01-conf-update.tcl b/tools/pika_migrate/tests/sentinel/tests/01-conf-update.tcl deleted file mode 100644 index 4998104d2f..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/01-conf-update.tcl +++ /dev/null @@ -1,39 +0,0 @@ -# Test Sentinel configuration consistency after partitions heal. - -source "../tests/includes/init-tests.tcl" - -test "We can failover with Sentinel 1 crashed" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - - # Crash Sentinel 1 - kill_instance sentinel 1 - - kill_instance redis $master_id - foreach_sentinel_id id { - if {$id != 1} { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "Sentinel $id did not received failover info" - } - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "After Sentinel 1 is restarted, its config gets updated" { - restart_instance sentinel 1 - wait_for_condition 1000 50 { - [lindex [S 1 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "Restarted Sentinel did not received failover info" - } -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} diff --git a/tools/pika_migrate/tests/sentinel/tests/02-slaves-reconf.tcl b/tools/pika_migrate/tests/sentinel/tests/02-slaves-reconf.tcl deleted file mode 100644 index fa15d2efde..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/02-slaves-reconf.tcl +++ /dev/null @@ -1,84 +0,0 @@ -# Check that slaves are reconfigured at a latter time if they are partitioned. -# -# Here we should test: -# 1) That slaves point to the new master after failover. -# 2) That partitioned slaves point to new master when they are partitioned -# away during failover and return at a latter time. - -source "../tests/includes/init-tests.tcl" - -proc 02_test_slaves_replication {} { - uplevel 1 { - test "Check that slaves replicate from current master" { - set master_port [RI $master_id tcp_port] - foreach_redis_id id { - if {$id == $master_id} continue - if {[instance_is_killed redis $id]} continue - wait_for_condition 1000 50 { - ([RI $id master_port] == $master_port) && - ([RI $id master_link_status] eq {up}) - } else { - fail "Redis slave $id is replicating from wrong master" - } - } - } - } -} - -proc 02_crash_and_failover {} { - uplevel 1 { - test "Crash the master and force a failover" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - kill_instance redis $master_id - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - restart_instance redis $master_id - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] - } - } -} - -02_test_slaves_replication -02_crash_and_failover -02_test_slaves_replication - -test "Kill a slave instance" { - foreach_redis_id id { - if {$id == $master_id} continue - set killed_slave_id $id - kill_instance redis $id - break - } -} - -02_crash_and_failover -02_test_slaves_replication - -test "Wait for failover to end" { - set inprogress 1 - while {$inprogress} { - set inprogress 0 - foreach_sentinel_id id { - if {[dict exists [S $id SENTINEL MASTER mymaster] failover-state]} { - incr inprogress - } - } - if {$inprogress} {after 100} - } -} - -test "Restart killed slave and test replication of slaves again..." { - restart_instance redis $killed_slave_id -} - -# Now we check if the slave rejoining the partition is reconfigured even -# if the failover finished. -02_test_slaves_replication diff --git a/tools/pika_migrate/tests/sentinel/tests/03-runtime-reconf.tcl b/tools/pika_migrate/tests/sentinel/tests/03-runtime-reconf.tcl deleted file mode 100644 index 426596c37e..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/03-runtime-reconf.tcl +++ /dev/null @@ -1 +0,0 @@ -# Test runtime reconfiguration command SENTINEL SET. diff --git a/tools/pika_migrate/tests/sentinel/tests/04-slave-selection.tcl b/tools/pika_migrate/tests/sentinel/tests/04-slave-selection.tcl deleted file mode 100644 index 3d2ca64845..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/04-slave-selection.tcl +++ /dev/null @@ -1,5 +0,0 @@ -# Test slave selection algorithm. -# -# This unit should test: -# 1) That when there are no suitable slaves no failover is performed. -# 2) That among the available slaves, the one with better offset is picked. diff --git a/tools/pika_migrate/tests/sentinel/tests/05-manual.tcl b/tools/pika_migrate/tests/sentinel/tests/05-manual.tcl deleted file mode 100644 index 1a60d814b3..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/05-manual.tcl +++ /dev/null @@ -1,44 +0,0 @@ -# Test manual failover - -source "../tests/includes/init-tests.tcl" - -test "Manual failover works" { - set old_port [RI $master_id tcp_port] - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - assert {[lindex $addr 1] == $old_port} - S 0 SENTINEL FAILOVER mymaster - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port - } else { - fail "At least one Sentinel did not received failover info" - } - } - set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] - set master_id [get_instance_id_by_port redis [lindex $addr 1]] -} - -test "New master [join $addr {:}] role matches" { - assert {[RI $master_id role] eq {master}} -} - -test "All the other slaves now point to the new master" { - foreach_redis_id id { - if {$id != $master_id && $id != 0} { - wait_for_condition 1000 50 { - [RI $id master_port] == [lindex $addr 1] - } else { - fail "Redis ID $id not configured to replicate with new master" - } - } - } -} - -test "The old master eventually gets reconfigured as a slave" { - wait_for_condition 1000 50 { - [RI 0 master_port] == [lindex $addr 1] - } else { - fail "Old master not reconfigured as slave of new master" - } -} - diff --git a/tools/pika_migrate/tests/sentinel/tests/includes/init-tests.tcl b/tools/pika_migrate/tests/sentinel/tests/includes/init-tests.tcl deleted file mode 100644 index c8165dcfa9..0000000000 --- a/tools/pika_migrate/tests/sentinel/tests/includes/init-tests.tcl +++ /dev/null @@ -1,72 +0,0 @@ -# Initialization tests -- most units will start including this. - -test "(init) Restart killed instances" { - foreach type {redis sentinel} { - foreach_${type}_id id { - if {[get_instance_attrib $type $id pid] == -1} { - puts -nonewline "$type/$id " - flush stdout - restart_instance $type $id - } - } - } -} - -test "(init) Remove old master entry from sentinels" { - foreach_sentinel_id id { - catch {S $id SENTINEL REMOVE mymaster} - } -} - -set redis_slaves 4 -test "(init) Create a master-slaves cluster of [expr $redis_slaves+1] instances" { - create_redis_master_slave_cluster [expr {$redis_slaves+1}] -} -set master_id 0 - -test "(init) Sentinels can start monitoring a master" { - set sentinels [llength $::sentinel_instances] - set quorum [expr {$sentinels/2+1}] - foreach_sentinel_id id { - S $id SENTINEL MONITOR mymaster \ - [get_instance_attrib redis $master_id host] \ - [get_instance_attrib redis $master_id port] $quorum - } - foreach_sentinel_id id { - assert {[S $id sentinel master mymaster] ne {}} - S $id SENTINEL SET mymaster down-after-milliseconds 2000 - S $id SENTINEL SET mymaster failover-timeout 20000 - S $id SENTINEL SET mymaster parallel-syncs 10 - } -} - -test "(init) Sentinels can talk with the master" { - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [catch {S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster}] == 0 - } else { - fail "Sentinel $id can't talk with the master." - } - } -} - -test "(init) Sentinels are able to auto-discover other sentinels" { - set sentinels [llength $::sentinel_instances] - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1) - } else { - fail "At least some sentinel can't detect some other sentinel" - } - } -} - -test "(init) Sentinels are able to auto-discover slaves" { - foreach_sentinel_id id { - wait_for_condition 1000 50 { - [dict get [S $id SENTINEL MASTER mymaster] num-slaves] == $redis_slaves - } else { - fail "At least some sentinel can't detect some slave" - } - } -} diff --git a/tools/pika_migrate/tests/sentinel/tmp/.gitignore b/tools/pika_migrate/tests/sentinel/tmp/.gitignore deleted file mode 100644 index f581f73e2d..0000000000 --- a/tools/pika_migrate/tests/sentinel/tmp/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -redis_* -sentinel_* diff --git a/tools/pika_migrate/tests/support/redis.tcl b/tools/pika_migrate/tests/support/redis.tcl deleted file mode 100644 index 7c78360812..0000000000 --- a/tools/pika_migrate/tests/support/redis.tcl +++ /dev/null @@ -1,294 +0,0 @@ -# Tcl clinet library - used by test-redis.tcl script for now -# Copyright (C) 2009 Salvatore Sanfilippo -# Released under the BSD license like Redis itself -# -# Example usage: -# -# set r [redis 127.0.0.1 6379] -# $r lpush mylist foo -# $r lpush mylist bar -# $r lrange mylist 0 -1 -# $r close -# -# Non blocking usage example: -# -# proc handlePong {r type reply} { -# puts "PONG $type '$reply'" -# if {$reply ne "PONG"} { -# $r ping [list handlePong] -# } -# } -# -# set r [redis] -# $r blocking 0 -# $r get fo [list handlePong] -# -# vwait forever - -package require Tcl 8.5 -package provide redis 0.1 - -namespace eval redis {} -set ::redis::id 0 -array set ::redis::fd {} -array set ::redis::addr {} -array set ::redis::blocking {} -array set ::redis::deferred {} -array set ::redis::reconnect {} -array set ::redis::callback {} -array set ::redis::state {} ;# State in non-blocking reply reading -array set ::redis::statestack {} ;# Stack of states, for nested mbulks - -proc redis {{server 127.0.0.1} {port 6379} {defer 0}} { - set fd [socket $server $port] - fconfigure $fd -translation binary - set id [incr ::redis::id] - set ::redis::fd($id) $fd - set ::redis::addr($id) [list $server $port] - set ::redis::blocking($id) 1 - set ::redis::deferred($id) $defer - set ::redis::reconnect($id) 0 - ::redis::redis_reset_state $id - interp alias {} ::redis::redisHandle$id {} ::redis::__dispatch__ $id -} - -# This is a wrapper to the actual dispatching procedure that handles -# reconnection if needed. -proc ::redis::__dispatch__ {id method args} { - set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] - if {$errorcode && $::redis::reconnect($id) && $::redis::fd($id) eq {}} { - # Try again if the connection was lost. - # FIXME: we don't re-select the previously selected DB, nor we check - # if we are inside a transaction that needs to be re-issued from - # scratch. - set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval] - } - return -code $errorcode $retval -} - -proc ::redis::__dispatch__raw__ {id method argv} { - set fd $::redis::fd($id) - - # Reconnect the link if needed. - if {$fd eq {}} { - lassign $::redis::addr($id) host port - set ::redis::fd($id) [socket $host $port] - fconfigure $::redis::fd($id) -translation binary - set fd $::redis::fd($id) - } - - set blocking $::redis::blocking($id) - set deferred $::redis::deferred($id) - if {$blocking == 0} { - if {[llength $argv] == 0} { - error "Please provide a callback in non-blocking mode" - } - set callback [lindex $argv end] - set argv [lrange $argv 0 end-1] - } - if {[info command ::redis::__method__$method] eq {}} { - set cmd "*[expr {[llength $argv]+1}]\r\n" - append cmd "$[string length $method]\r\n$method\r\n" - foreach a $argv { - append cmd "$[string length $a]\r\n$a\r\n" - } - ::redis::redis_write $fd $cmd - if {[catch {flush $fd}]} { - set ::redis::fd($id) {} - return -code error "I/O error reading reply" - } - - if {!$deferred} { - if {$blocking} { - ::redis::redis_read_reply $id $fd - } else { - # Every well formed reply read will pop an element from this - # list and use it as a callback. So pipelining is supported - # in non blocking mode. - lappend ::redis::callback($id) $callback - fileevent $fd readable [list ::redis::redis_readable $fd $id] - } - } - } else { - uplevel 1 [list ::redis::__method__$method $id $fd] $argv - } -} - -proc ::redis::__method__blocking {id fd val} { - set ::redis::blocking($id) $val - fconfigure $fd -blocking $val -} - -proc ::redis::__method__reconnect {id fd val} { - set ::redis::reconnect($id) $val -} - -proc ::redis::__method__read {id fd} { - ::redis::redis_read_reply $id $fd -} - -proc ::redis::__method__write {id fd buf} { - ::redis::redis_write $fd $buf -} - -proc ::redis::__method__flush {id fd} { - flush $fd -} - -proc ::redis::__method__close {id fd} { - catch {close $fd} - catch {unset ::redis::fd($id)} - catch {unset ::redis::addr($id)} - catch {unset ::redis::blocking($id)} - catch {unset ::redis::deferred($id)} - catch {unset ::redis::reconnect($id)} - catch {unset ::redis::state($id)} - catch {unset ::redis::statestack($id)} - catch {unset ::redis::callback($id)} - catch {interp alias {} ::redis::redisHandle$id {}} -} - -proc ::redis::__method__channel {id fd} { - return $fd -} - -proc ::redis::__method__deferred {id fd val} { - set ::redis::deferred($id) $val -} - -proc ::redis::redis_write {fd buf} { - puts -nonewline $fd $buf -} - -proc ::redis::redis_writenl {fd buf} { - redis_write $fd $buf - redis_write $fd "\r\n" - flush $fd -} - -proc ::redis::redis_readnl {fd len} { - set buf [read $fd $len] - read $fd 2 ; # discard CR LF - return $buf -} - -proc ::redis::redis_bulk_read {fd} { - set count [redis_read_line $fd] - if {$count == -1} return {} - set buf [redis_readnl $fd $count] - return $buf -} - -proc ::redis::redis_multi_bulk_read {id fd} { - set count [redis_read_line $fd] - if {$count == -1} return {} - set l {} - set err {} - for {set i 0} {$i < $count} {incr i} { - if {[catch { - lappend l [redis_read_reply $id $fd] - } e] && $err eq {}} { - set err $e - } - } - if {$err ne {}} {return -code error $err} - return $l -} - -proc ::redis::redis_read_line fd { - string trim [gets $fd] -} - -proc ::redis::redis_read_reply {id fd} { - set type [read $fd 1] - switch -exact -- $type { - : - - + {redis_read_line $fd} - - {return -code error [redis_read_line $fd]} - $ {redis_bulk_read $fd} - * {redis_multi_bulk_read $id $fd} - default { - if {$type eq {}} { - set ::redis::fd($id) {} - return -code error "I/O error reading reply" - } - return -code error "Bad protocol, '$type' as reply type byte" - } - } -} - -proc ::redis::redis_reset_state id { - set ::redis::state($id) [dict create buf {} mbulk -1 bulk -1 reply {}] - set ::redis::statestack($id) {} -} - -proc ::redis::redis_call_callback {id type reply} { - set cb [lindex $::redis::callback($id) 0] - set ::redis::callback($id) [lrange $::redis::callback($id) 1 end] - uplevel #0 $cb [list ::redis::redisHandle$id $type $reply] - ::redis::redis_reset_state $id -} - -# Read a reply in non-blocking mode. -proc ::redis::redis_readable {fd id} { - if {[eof $fd]} { - redis_call_callback $id eof {} - ::redis::__method__close $id $fd - return - } - if {[dict get $::redis::state($id) bulk] == -1} { - set line [gets $fd] - if {$line eq {}} return ;# No complete line available, return - switch -exact -- [string index $line 0] { - : - - + {redis_call_callback $id reply [string range $line 1 end-1]} - - {redis_call_callback $id err [string range $line 1 end-1]} - $ { - dict set ::redis::state($id) bulk \ - [expr [string range $line 1 end-1]+2] - if {[dict get $::redis::state($id) bulk] == 1} { - # We got a $-1, hack the state to play well with this. - dict set ::redis::state($id) bulk 2 - dict set ::redis::state($id) buf "\r\n" - ::redis::redis_readable $fd $id - } - } - * { - dict set ::redis::state($id) mbulk [string range $line 1 end-1] - # Handle *-1 - if {[dict get $::redis::state($id) mbulk] == -1} { - redis_call_callback $id reply {} - } - } - default { - redis_call_callback $id err \ - "Bad protocol, $type as reply type byte" - } - } - } else { - set totlen [dict get $::redis::state($id) bulk] - set buflen [string length [dict get $::redis::state($id) buf]] - set toread [expr {$totlen-$buflen}] - set data [read $fd $toread] - set nread [string length $data] - dict append ::redis::state($id) buf $data - # Check if we read a complete bulk reply - if {[string length [dict get $::redis::state($id) buf]] == - [dict get $::redis::state($id) bulk]} { - if {[dict get $::redis::state($id) mbulk] == -1} { - redis_call_callback $id reply \ - [string range [dict get $::redis::state($id) buf] 0 end-2] - } else { - dict with ::redis::state($id) { - lappend reply [string range $buf 0 end-2] - incr mbulk -1 - set bulk -1 - } - if {[dict get $::redis::state($id) mbulk] == 0} { - redis_call_callback $id reply \ - [dict get $::redis::state($id) reply] - } - } - } - } -} diff --git a/tools/pika_migrate/tests/support/server.tcl b/tools/pika_migrate/tests/support/server.tcl deleted file mode 100644 index c7777fe5d3..0000000000 --- a/tools/pika_migrate/tests/support/server.tcl +++ /dev/null @@ -1,337 +0,0 @@ -set ::global_overrides {} -set ::tags {} -set ::valgrind_errors {} - -proc start_server_error {config_file error} { - set err {} - append err "Cant' start the Redis server\n" - append err "CONFIGURATION:" - append err [exec cat $config_file] - append err "\nERROR:" - append err [string trim $error] - send_data_packet $::test_server_fd err $err -} - -proc check_valgrind_errors stderr { - set fd [open $stderr] - set buf [read $fd] - close $fd - - if {[regexp -- { at 0x} $buf] || - (![regexp -- {definitely lost: 0 bytes} $buf] && - ![regexp -- {no leaks are possible} $buf])} { - send_data_packet $::test_server_fd err "Valgrind error: $buf\n" - } -} - -proc kill_server config { - # nothing to kill when running against external server - if {$::external} return - - # nevermind if its already dead - if {![is_alive $config]} { return } - set pid [dict get $config pid] - - # check for leaks - if {![dict exists $config "skipleaks"]} { - catch { - if {[string match {*Darwin*} [exec uname -a]]} { - tags {"leaks"} { - test "Check for memory leaks (pid $pid)" { - set output {0 leaks} - catch {exec leaks $pid} output - if {[string match {*process does not exist*} $output] || - [string match {*cannot examine*} $output]} { - # In a few tests we kill the server process. - set output "0 leaks" - } - set output - } {*0 leaks*} - } - } - } - } - - # kill server and wait for the process to be totally exited - catch {exec kill $pid} - while {[is_alive $config]} { - incr wait 10 - - if {$wait >= 5000} { - puts "Forcing process $pid to exit..." - catch {exec kill -KILL $pid} - } elseif {$wait % 1000 == 0} { - puts "Waiting for process $pid to exit..." - } - after 10 - } - - # Check valgrind errors if needed - if {$::valgrind} { - check_valgrind_errors [dict get $config stderr] - } - - # Remove this pid from the set of active pids in the test server. - send_data_packet $::test_server_fd server-killed $pid -} - -proc is_alive config { - set pid [dict get $config pid] - if {[catch {exec ps -p $pid} err]} { - return 0 - } else { - return 1 - } -} - -proc ping_server {host port} { - set retval 0 - if {[catch { - set fd [socket $host $port] - fconfigure $fd -translation binary - puts $fd "PING\r\n" - flush $fd - set reply [gets $fd] - if {[string range $reply 0 0] eq {+} || - [string range $reply 0 0] eq {-}} { - set retval 1 - } - close $fd - } e]} { - if {$::verbose} { - puts -nonewline "." - } - } else { - if {$::verbose} { - puts -nonewline "ok" - } - } - return $retval -} - -# Return 1 if the server at the specified addr is reachable by PING, otherwise -# returns 0. Performs a try every 50 milliseconds for the specified number -# of retries. -proc server_is_up {host port retrynum} { - after 10 ;# Use a small delay to make likely a first-try success. - set retval 0 - while {[incr retrynum -1]} { - if {[catch {ping_server $host $port} ping]} { - set ping 0 - } - if {$ping} {return 1} - after 50 - } - return 0 -} - -# doesn't really belong here, but highly coupled to code in start_server -proc tags {tags code} { - set ::tags [concat $::tags $tags] - uplevel 1 $code - set ::tags [lrange $::tags 0 end-[llength $tags]] -} - -proc start_server {options {code undefined}} { - # If we are running against an external server, we just push the - # host/port pair in the stack the first time - if {$::external} { - if {[llength $::servers] == 0} { - set srv {} - dict set srv "host" $::host - dict set srv "port" $::port - set client [redis $::host $::port] - dict set srv "client" $client - $client select 9 - - # append the server to the stack - lappend ::servers $srv - } - uplevel 1 $code - return - } - - # setup defaults - set baseconfig "default.conf" - set overrides {} - set tags {} - - # parse options - foreach {option value} $options { - switch $option { - "config" { - set baseconfig $value } - "overrides" { - set overrides $value } - "tags" { - set tags $value - set ::tags [concat $::tags $value] } - default { - error "Unknown option $option" } - } - } - - set data [split [exec cat "tests/assets/$baseconfig"] "\n"] - set config {} - foreach line $data { - if {[string length $line] > 0 && [string index $line 0] ne "#"} { - set elements [split $line " "] - set directive [lrange $elements 0 0] - set arguments [lrange $elements 1 end] - dict set config $directive $arguments - } - } - - # use a different directory every time a server is started - dict set config dir [tmpdir server] - - # start every server on a different port - set ::port [find_available_port [expr {$::port+1}]] - dict set config port $::port - - # apply overrides from global space and arguments - foreach {directive arguments} [concat $::global_overrides $overrides] { - dict set config $directive $arguments - } - - # write new configuration to temporary file - set config_file [tmpfile redis.conf] - set fp [open $config_file w+] - foreach directive [dict keys $config] { - if {$directive == "port"} { - puts -nonewline $fp "$directive : " - puts $fp [dict get $config $directive] - } elseif {$directive == "requirepass"} { - puts $fp "$directive :" - } elseif {$directive == "dump_prefix"} { - puts $fp "$directive :" - } else { - puts -nonewline $fp "$directive " - puts $fp [dict get $config $directive] - } - } - close $fp - - set stdout [format "%s/%s" [dict get $config "dir"] "stdout"] - set stderr [format "%s/%s" [dict get $config "dir"] "stderr"] - - if {$::valgrind} { - set pid [exec valgrind --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &] - } else { - set pid [exec src/redis-server -c $config_file > $stdout 2> $stderr &] - #set pid [exec src/redis-server $config_file > $stdout 2> $stderr &] - } - - puts "Starting ---- " - - # Tell the test server about this new instance. - send_data_packet $::test_server_fd server-spawned $pid - - # check that the server actually started - # ugly but tries to be as fast as possible... - if {$::valgrind} {set retrynum 1000} else {set retrynum 100} - - if {$::verbose} { - puts -nonewline "=== ($tags) Starting server ${::host}:${::port} " - } - - if {$code ne "undefined"} { - set serverisup [server_is_up $::host $::port $retrynum] - } else { - set serverisup 1 - } - - if {$::verbose} { - puts "" - } - - if {!$serverisup} { - set err {} - append err [exec cat $stdout] "\n" [exec cat $stderr] - start_server_error $config_file $err - return - } - - puts "Before Wait" - # Wait for actual startup - #while {![info exists _pid]} { - # regexp {PID:\s(\d+)} [exec cat $stdout] _ _pid - # after 100 - #} - puts "After Wait" - - # setup properties to be able to initialize a client object - set host $::host - set port $::port - if {[dict exists $config bind]} { set host [dict get $config bind] } - if {[dict exists $config port]} { set port [dict get $config port] } - - # setup config dict - dict set srv "config_file" $config_file - dict set srv "config" $config - dict set srv "pid" $pid - dict set srv "host" $host - dict set srv "port" $port - dict set srv "stdout" $stdout - dict set srv "stderr" $stderr - - # if a block of code is supplied, we wait for the server to become - # available, create a client object and kill the server afterwards - if {$code ne "undefined"} { - set line [exec head -n1 $stdout] - if {[string match {*already in use*} $line]} { - error_and_quit $config_file $line - } - - while 1 { - # check that the server actually started and is ready for connections - if {[exec grep "going to start" | wc -l < $stderr] > 0} { - break - } - puts "Fuck YYB" - after 10 - } - - # append the server to the stack - lappend ::servers $srv - - # connect client (after server dict is put on the stack) - reconnect - - # execute provided block - set num_tests $::num_tests - if {[catch { uplevel 1 $code } error]} { - set backtrace $::errorInfo - - # Kill the server without checking for leaks - dict set srv "skipleaks" 1 - kill_server $srv - - # Print warnings from log - puts [format "\nLogged warnings (pid %d):" [dict get $srv "pid"]] - set warnings [warnings_from_file [dict get $srv "stdout"]] - if {[string length $warnings] > 0} { - puts "$warnings" - } else { - puts "(none)" - } - puts "" - - error $error $backtrace - } - - # Don't do the leak check when no tests were run - if {$num_tests == $::num_tests} { - dict set srv "skipleaks" 1 - } - - # pop the server object - set ::servers [lrange $::servers 0 end-1] - - set ::tags [lrange $::tags 0 end-[llength $tags]] - kill_server $srv - } else { - set ::tags [lrange $::tags 0 end-[llength $tags]] - set _ $srv - } -} diff --git a/tools/pika_migrate/tests/support/test.tcl b/tools/pika_migrate/tests/support/test.tcl deleted file mode 100644 index 7d390cc47a..0000000000 --- a/tools/pika_migrate/tests/support/test.tcl +++ /dev/null @@ -1,130 +0,0 @@ -set ::num_tests 0 -set ::num_passed 0 -set ::num_failed 0 -set ::tests_failed {} - -proc fail {msg} { - error "assertion:$msg" -} - -proc assert {condition} { - if {![uplevel 1 [list expr $condition]]} { - error "assertion:Expected condition '$condition' to be true ([uplevel 1 [list subst -nocommands $condition]])" - } -} - -proc assert_match {pattern value} { - if {![string match $pattern $value]} { - error "assertion:Expected '$value' to match '$pattern'" - } -} - -proc assert_equal {expected value} { - if {$expected ne $value} { - error "assertion:Expected '$value' to be equal to '$expected'" - } -} - -proc assert_error {pattern code} { - if {[catch {uplevel 1 $code} error]} { - assert_match $pattern $error - } else { - error "assertion:Expected an error but nothing was caught" - } -} - -proc assert_encoding {enc key} { - # Swapped out values don't have an encoding, so make sure that - # the value is swapped in before checking the encoding. - set dbg [r debug object $key] - while {[string match "* swapped at:*" $dbg]} { - r debug swapin $key - set dbg [r debug object $key] - } - assert_match "* encoding:$enc *" $dbg -} - -proc assert_type {type key} { - assert_equal $type [r type $key] -} - -# Wait for the specified condition to be true, with the specified number of -# max retries and delay between retries. Otherwise the 'elsescript' is -# executed. -proc wait_for_condition {maxtries delay e _else_ elsescript} { - while {[incr maxtries -1] >= 0} { - set errcode [catch {uplevel 1 [list expr $e]} result] - if {$errcode == 0} { - if {$result} break - } else { - return -code $errcode $result - } - after $delay - } - if {$maxtries == -1} { - set errcode [catch [uplevel 1 $elsescript] result] - return -code $errcode $result - } -} - -proc test {name code {okpattern undefined}} { - # abort if tagged with a tag to deny - foreach tag $::denytags { - if {[lsearch $::tags $tag] >= 0} { - return - } - } - - # check if tagged with at least 1 tag to allow when there *is* a list - # of tags to allow, because default policy is to run everything - if {[llength $::allowtags] > 0} { - set matched 0 - foreach tag $::allowtags { - if {[lsearch $::tags $tag] >= 0} { - incr matched - } - } - if {$matched < 1} { - return - } - } - - incr ::num_tests - set details {} - lappend details "$name in $::curfile" - - send_data_packet $::test_server_fd testing $name - - if {[catch {set retval [uplevel 1 $code]} error]} { - if {[string match "assertion:*" $error]} { - set msg [string range $error 10 end] - lappend details $msg - lappend ::tests_failed $details - - incr ::num_failed - send_data_packet $::test_server_fd err [join $details "\n"] - } else { - # Re-raise, let handler up the stack take care of this. - error $error $::errorInfo - } - } else { - if {$okpattern eq "undefined" || $okpattern eq $retval || [string match $okpattern $retval]} { - incr ::num_passed - send_data_packet $::test_server_fd ok $name - } else { - set msg "Expected '$okpattern' to equal or match '$retval'" - lappend details $msg - lappend ::tests_failed $details - - incr ::num_failed - send_data_packet $::test_server_fd err [join $details "\n"] - } - } - - if {$::traceleaks} { - set output [exec leaks redis-server] - if {![string match {*0 leaks*} $output]} { - send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output" - } - } -} diff --git a/tools/pika_migrate/tests/support/tmpfile.tcl b/tools/pika_migrate/tests/support/tmpfile.tcl deleted file mode 100644 index 809f587306..0000000000 --- a/tools/pika_migrate/tests/support/tmpfile.tcl +++ /dev/null @@ -1,15 +0,0 @@ -set ::tmpcounter 0 -set ::tmproot "./tests/tmp" -file mkdir $::tmproot - -# returns a dirname unique to this process to write to -proc tmpdir {basename} { - set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]] - file mkdir $dir - set _ $dir -} - -# return a filename unique to this process to write to -proc tmpfile {basename} { - file join $::tmproot $basename.[pid].[incr ::tmpcounter] -} diff --git a/tools/pika_migrate/tests/support/util.tcl b/tools/pika_migrate/tests/support/util.tcl deleted file mode 100644 index cd5b9b511f..0000000000 --- a/tools/pika_migrate/tests/support/util.tcl +++ /dev/null @@ -1,371 +0,0 @@ -proc randstring {min max {type binary}} { - set len [expr {$min+int(rand()*($max-$min+1))}] - set output {} - if {$type eq {binary}} { - set minval 0 - set maxval 255 - } elseif {$type eq {alpha}} { - set minval 48 - set maxval 122 - } elseif {$type eq {compr}} { - set minval 48 - set maxval 52 - } - while {$len} { - append output [format "%c" [expr {$minval+int(rand()*($maxval-$minval+1))}]] - incr len -1 - } - return $output -} - -# Useful for some test -proc zlistAlikeSort {a b} { - if {[lindex $a 0] > [lindex $b 0]} {return 1} - if {[lindex $a 0] < [lindex $b 0]} {return -1} - string compare [lindex $a 1] [lindex $b 1] -} - -# Return all log lines starting with the first line that contains a warning. -# Generally, this will be an assertion error with a stack trace. -proc warnings_from_file {filename} { - set lines [split [exec cat $filename] "\n"] - set matched 0 - set logall 0 - set result {} - foreach line $lines { - if {[string match {*REDIS BUG REPORT START*} $line]} { - set logall 1 - } - if {[regexp {^\[\d+\]\s+\d+\s+\w+\s+\d{2}:\d{2}:\d{2} \#} $line]} { - set matched 1 - } - if {$logall || $matched} { - lappend result $line - } - } - join $result "\n" -} - -# Return value for INFO property -proc status {r property} { - if {[regexp "\r\n$property:(.*?)\r\n" [{*}$r info] _ value]} { - set _ $value - } -} - -proc waitForBgsave r { - while 1 { - if {[status r rdb_bgsave_in_progress] eq 1} { - if {$::verbose} { - puts -nonewline "\nWaiting for background save to finish... " - flush stdout - } - after 1000 - } else { - break - } - } -} - -proc waitForBgrewriteaof r { - while 1 { - if {[status r aof_rewrite_in_progress] eq 1} { - if {$::verbose} { - puts -nonewline "\nWaiting for background AOF rewrite to finish... " - flush stdout - } - after 1000 - } else { - break - } - } -} - -proc wait_for_sync r { - while 1 { - if {[status $r master_link_status] eq "down"} { - after 10 - } else { - break - } - } -} - -# Random integer between 0 and max (excluded). -proc randomInt {max} { - expr {int(rand()*$max)} -} - -# Random signed integer between -max and max (both extremes excluded). -proc randomSignedInt {max} { - set i [randomInt $max] - if {rand() > 0.5} { - set i -$i - } - return $i -} - -proc randpath args { - set path [expr {int(rand()*[llength $args])}] - uplevel 1 [lindex $args $path] -} - -proc randomValue {} { - randpath { - # Small enough to likely collide - randomSignedInt 1000 - } { - # 32 bit compressible signed/unsigned - randpath {randomSignedInt 2000000000} {randomSignedInt 4000000000} - } { - # 64 bit - randpath {randomSignedInt 1000000000000} - } { - # Random string - randpath {randstring 0 256 alpha} \ - {randstring 0 256 compr} \ - {randstring 0 256 binary} - } -} - -proc randomKey {} { - randpath { - # Small enough to likely collide - randomInt 1000 - } { - # 32 bit compressible signed/unsigned - randpath {randomInt 2000000000} {randomInt 4000000000} - } { - # 64 bit - randpath {randomInt 1000000000000} - } { - # Random string - randpath {randstring 1 256 alpha} \ - {randstring 1 256 compr} - } -} - -proc findKeyWithType {r type} { - for {set j 0} {$j < 20} {incr j} { - set k [{*}$r randomkey] - if {$k eq {}} { - return {} - } - if {[{*}$r type $k] eq $type} { - return $k - } - } - return {} -} - -proc createComplexDataset {r ops {opt {}}} { - for {set j 0} {$j < $ops} {incr j} { - set k [randomKey] - set k2 [randomKey] - set f [randomValue] - set v [randomValue] - - if {[lsearch -exact $opt useexpire] != -1} { - if {rand() < 0.1} { - {*}$r expire [randomKey] [randomInt 2] - } - } - - randpath { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - set d [expr {rand()}] - } { - randpath {set d +inf} {set d -inf} - } - set t [{*}$r type $k] - - if {$t eq {none}} { - randpath { - {*}$r set $k $v - } { - {*}$r lpush $k $v - } { - {*}$r sadd $k $v - } { - {*}$r zadd $k $d $v - } { - {*}$r hset $k $f $v - } { - {*}$r del $k - } - set t [{*}$r type $k] - } - - switch $t { - {string} { - # Nothing to do - } - {list} { - randpath {{*}$r lpush $k $v} \ - {{*}$r rpush $k $v} \ - {{*}$r lrem $k 0 $v} \ - {{*}$r rpop $k} \ - {{*}$r lpop $k} - } - {set} { - randpath {{*}$r sadd $k $v} \ - {{*}$r srem $k $v} \ - { - set otherset [findKeyWithType {*}$r set] - if {$otherset ne {}} { - randpath { - {*}$r sunionstore $k2 $k $otherset - } { - {*}$r sinterstore $k2 $k $otherset - } { - {*}$r sdiffstore $k2 $k $otherset - } - } - } - } - {zset} { - randpath {{*}$r zadd $k $d $v} \ - {{*}$r zrem $k $v} \ - { - set otherzset [findKeyWithType {*}$r zset] - if {$otherzset ne {}} { - randpath { - {*}$r zunionstore $k2 2 $k $otherzset - } { - {*}$r zinterstore $k2 2 $k $otherzset - } - } - } - } - {hash} { - randpath {{*}$r hset $k $f $v} \ - {{*}$r hdel $k $f} - } - } - } -} - -proc formatCommand {args} { - set cmd "*[llength $args]\r\n" - foreach a $args { - append cmd "$[string length $a]\r\n$a\r\n" - } - set _ $cmd -} - -proc csvdump r { - set o {} - foreach k [lsort [{*}$r keys *]] { - set type [{*}$r type $k] - append o [csvstring $k] , [csvstring $type] , - switch $type { - string { - append o [csvstring [{*}$r get $k]] "\n" - } - list { - foreach e [{*}$r lrange $k 0 -1] { - append o [csvstring $e] , - } - append o "\n" - } - set { - foreach e [lsort [{*}$r smembers $k]] { - append o [csvstring $e] , - } - append o "\n" - } - zset { - foreach e [{*}$r zrange $k 0 -1 withscores] { - append o [csvstring $e] , - } - append o "\n" - } - hash { - set fields [{*}$r hgetall $k] - set newfields {} - foreach {k v} $fields { - lappend newfields [list $k $v] - } - set fields [lsort -index 0 $newfields] - foreach kv $fields { - append o [csvstring [lindex $kv 0]] , - append o [csvstring [lindex $kv 1]] , - } - append o "\n" - } - } - } - return $o -} - -proc csvstring s { - return "\"$s\"" -} - -proc roundFloat f { - format "%.10g" $f -} - -proc find_available_port start { - for {set j $start} {$j < $start+1024} {incr j} { - if {[catch { - set fd [socket 127.0.0.1 $j] - }]} { - return $j - } else { - close $fd - } - } - if {$j == $start+1024} { - error "Can't find a non busy port in the $start-[expr {$start+1023}] range." - } -} - -# Test if TERM looks like to support colors -proc color_term {} { - expr {[info exists ::env(TERM)] && [string match *xterm* $::env(TERM)]} -} - -proc colorstr {color str} { - if {[color_term]} { - set b 0 - if {[string range $color 0 4] eq {bold-}} { - set b 1 - set color [string range $color 5 end] - } - switch $color { - red {set colorcode {31}} - green {set colorcode {32}} - yellow {set colorcode {33}} - blue {set colorcode {34}} - magenta {set colorcode {35}} - cyan {set colorcode {36}} - white {set colorcode {37}} - default {set colorcode {37}} - } - if {$colorcode ne {}} { - return "\033\[$b;${colorcode};49m$str\033\[0m" - } - } else { - return $str - } -} - -# Execute a background process writing random data for the specified number -# of seconds to the specified Redis instance. -proc start_write_load {host port seconds} { - set tclsh [info nameofexecutable] - exec $tclsh tests/helpers/gen_write_load.tcl $host $port $seconds & -} - -# Stop a process generating write load executed with start_write_load. -proc stop_write_load {handle} { - catch {exec /bin/kill -9 $handle} -} diff --git a/tools/pika_migrate/tests/test_helper.tcl b/tools/pika_migrate/tests/test_helper.tcl deleted file mode 100644 index d1ebde1c48..0000000000 --- a/tools/pika_migrate/tests/test_helper.tcl +++ /dev/null @@ -1,545 +0,0 @@ -# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com -# This software is released under the BSD License. See the COPYING file for -# more information. - -package require Tcl 8.5 - -set tcl_precision 17 -source tests/support/redis.tcl -source tests/support/server.tcl -source tests/support/tmpfile.tcl -source tests/support/test.tcl -source tests/support/util.tcl - -set ::all_tests { - unit/printver - unit/auth - unit/protocol - unit/basic - unit/scan - unit/type/list - unit/type/list-2 - unit/type/list-3 - unit/type/set - unit/type/zset - unit/type/hash - unit/sort - unit/expire - unit/other - unit/multi - unit/quit - unit/aofrw - integration/replication - integration/replication-2 - integration/replication-3 - integration/replication-4 - integration/replication-psync - integration/aof - integration/rdb - integration/convert-zipmap-hash-on-load - unit/pubsub - unit/slowlog - unit/scripting - unit/maxmemory - unit/introspection - unit/limits - unit/obuf-limits - unit/dump - unit/bitops - unit/memefficiency - unit/hyperloglog -} -# Index to the next test to run in the ::all_tests list. -set ::next_test 0 - -set ::host 127.0.0.1 -set ::port 21111 -set ::traceleaks 0 -set ::valgrind 0 -set ::verbose 0 -set ::quiet 0 -set ::denytags {} -set ::allowtags {} -set ::external 0; # If "1" this means, we are running against external instance -set ::file ""; # If set, runs only the tests in this comma separated list -set ::curfile ""; # Hold the filename of the current suite -set ::accurate 0; # If true runs fuzz tests with more iterations -set ::force_failure 0 -set ::timeout 600; # 10 minutes without progresses will quit the test. -set ::last_progress [clock seconds] -set ::active_servers {} ; # Pids of active Redis instances. - -# Set to 1 when we are running in client mode. The Redis test uses a -# server-client model to run tests simultaneously. The server instance -# runs the specified number of client instances that will actually run tests. -# The server is responsible of showing the result to the user, and exit with -# the appropriate exit code depending on the test outcome. -set ::client 0 -set ::numclients 16 - -proc execute_tests name { - set path "tests/$name.tcl" - set ::curfile $path - source $path - send_data_packet $::test_server_fd done "$name" -} - -# Setup a list to hold a stack of server configs. When calls to start_server -# are nested, use "srv 0 pid" to get the pid of the inner server. To access -# outer servers, use "srv -1 pid" etcetera. -set ::servers {} -proc srv {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set property [lindex $args 1] - } else { - set property [lindex $args 0] - } - set srv [lindex $::servers end+$level] - dict get $srv $property -} - -# Provide easy access to the client for the inner server. It's possible to -# prepend the argument list with a negative level to access clients for -# servers running in outer blocks. -proc r {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - [srv $level "client"] {*}$args -} - -proc reconnect {args} { - set level [lindex $args 0] - if {[string length $level] == 0 || ![string is integer $level]} { - set level 0 - } - - set srv [lindex $::servers end+$level] - set host [dict get $srv "host"] - set port [dict get $srv "port"] - set config [dict get $srv "config"] - set client [redis $host $port] - dict set srv "client" $client - - # select the right db when we don't have to authenticate - if {![dict exists $config "requirepass"]} { - $client select 9 - } - - # re-set $srv in the servers list - lset ::servers end+$level $srv -} - -proc redis_deferring_client {args} { - set level 0 - if {[llength $args] > 0 && [string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - - # create client that defers reading reply - set client [redis [srv $level "host"] [srv $level "port"] 1] - - # select the right db and read the response (OK) - $client select 9 - $client read - return $client -} - -# Provide easy access to INFO properties. Same semantic as "proc r". -proc s {args} { - set level 0 - if {[string is integer [lindex $args 0]]} { - set level [lindex $args 0] - set args [lrange $args 1 end] - } - status [srv $level "client"] [lindex $args 0] -} - -proc cleanup {} { - if {!$::quiet} {puts -nonewline "Cleanup: may take some time... "} - flush stdout - catch {exec rm -rf {*}[glob tests/tmp/redis.conf.*]} - catch {exec rm -rf {*}[glob tests/tmp/server.*]} - if {!$::quiet} {puts "OK"} -} - -proc test_server_main {} { - cleanup - set tclsh [info nameofexecutable] - # Open a listening socket, trying different ports in order to find a - # non busy one. - set port [find_available_port 11111] - if {!$::quiet} { - puts "Starting test server at port $port" - } - socket -server accept_test_clients -myaddr 127.0.0.1 $port - - # Start the client instances - set ::clients_pids {} - set start_port [expr {$::port+100}] - for {set j 0} {$j < $::numclients} {incr j} { - set start_port [find_available_port $start_port] - set p [exec $tclsh [info script] {*}$::argv \ - --client $port --port $start_port &] - lappend ::clients_pids $p - incr start_port 10 - } - - # Setup global state for the test server - set ::idle_clients {} - set ::active_clients {} - array set ::active_clients_task {} - array set ::clients_start_time {} - set ::clients_time_history {} - set ::failed_tests {} - - # Enter the event loop to handle clients I/O - after 100 test_server_cron - vwait forever -} - -# This function gets called 10 times per second. -proc test_server_cron {} { - set elapsed [expr {[clock seconds]-$::last_progress}] - - if {$elapsed > $::timeout} { - set err "\[[colorstr red TIMEOUT]\]: clients state report follows." - puts $err - show_clients_state - kill_clients - force_kill_all_servers - the_end - } - - after 100 test_server_cron -} - -proc accept_test_clients {fd addr port} { - fconfigure $fd -encoding binary - fileevent $fd readable [list read_from_test_client $fd] -} - -# This is the readable handler of our test server. Clients send us messages -# in the form of a status code such and additional data. Supported -# status types are: -# -# ready: the client is ready to execute the command. Only sent at client -# startup. The server will queue the client FD in the list of idle -# clients. -# testing: just used to signal that a given test started. -# ok: a test was executed with success. -# err: a test was executed with an error. -# exception: there was a runtime exception while executing the test. -# done: all the specified test file was processed, this test client is -# ready to accept a new task. -proc read_from_test_client fd { - set bytes [gets $fd] - set payload [read $fd $bytes] - foreach {status data} $payload break - set ::last_progress [clock seconds] - - if {$status eq {ready}} { - if {!$::quiet} { - puts "\[$status\]: $data" - } - signal_idle_client $fd - } elseif {$status eq {done}} { - set elapsed [expr {[clock seconds]-$::clients_start_time($fd)}] - set all_tests_count [llength $::all_tests] - set running_tests_count [expr {[llength $::active_clients]-1}] - set completed_tests_count [expr {$::next_test-$running_tests_count}] - puts "\[$completed_tests_count/$all_tests_count [colorstr yellow $status]\]: $data ($elapsed seconds)" - lappend ::clients_time_history $elapsed $data - signal_idle_client $fd - set ::active_clients_task($fd) DONE - } elseif {$status eq {ok}} { - if {!$::quiet} { - puts "\[[colorstr green $status]\]: $data" - } - set ::active_clients_task($fd) "(OK) $data" - } elseif {$status eq {err}} { - set err "\[[colorstr red $status]\]: $data" - puts $err - lappend ::failed_tests $err - set ::active_clients_task($fd) "(ERR) $data" - } elseif {$status eq {exception}} { - puts "\[[colorstr red $status]\]: $data" - kill_clients - force_kill_all_servers - exit 1 - } elseif {$status eq {testing}} { - set ::active_clients_task($fd) "(IN PROGRESS) $data" - } elseif {$status eq {server-spawned}} { - lappend ::active_servers $data - } elseif {$status eq {server-killed}} { - set ::active_servers [lsearch -all -inline -not -exact $::active_servers $data] - } else { - if {!$::quiet} { - puts "\[$status\]: $data" - } - } -} - -proc show_clients_state {} { - # The following loop is only useful for debugging tests that may - # enter an infinite loop. Commented out normally. - foreach x $::active_clients { - if {[info exist ::active_clients_task($x)]} { - puts "$x => $::active_clients_task($x)" - } else { - puts "$x => ???" - } - } -} - -proc kill_clients {} { - foreach p $::clients_pids { - catch {exec kill $p} - } -} - -proc force_kill_all_servers {} { - foreach p $::active_servers { - puts "Killing still running Redis server $p" - catch {exec kill -9 $p} - } -} - -# A new client is idle. Remove it from the list of active clients and -# if there are still test units to run, launch them. -proc signal_idle_client fd { - # Remove this fd from the list of active clients. - set ::active_clients \ - [lsearch -all -inline -not -exact $::active_clients $fd] - - if 0 {show_clients_state} - - # New unit to process? - if {$::next_test != [llength $::all_tests]} { - if {!$::quiet} { - puts [colorstr bold-white "Testing [lindex $::all_tests $::next_test]"] - set ::active_clients_task($fd) "ASSIGNED: $fd ([lindex $::all_tests $::next_test])" - } - set ::clients_start_time($fd) [clock seconds] - send_data_packet $fd run [lindex $::all_tests $::next_test] - lappend ::active_clients $fd - incr ::next_test - } else { - lappend ::idle_clients $fd - if {[llength $::active_clients] == 0} { - the_end - } - } -} - -# The the_end function gets called when all the test units were already -# executed, so the test finished. -proc the_end {} { - # TODO: print the status, exit with the rigth exit code. - puts "\n The End\n" - puts "Execution time of different units:" - foreach {time name} $::clients_time_history { - puts " $time seconds - $name" - } - if {[llength $::failed_tests]} { - puts "\n[colorstr bold-red {!!! WARNING}] The following tests failed:\n" - foreach failed $::failed_tests { - puts "*** $failed" - } - cleanup - exit 1 - } else { - puts "\n[colorstr bold-white {\o/}] [colorstr bold-green {All tests passed without errors!}]\n" - cleanup - exit 0 - } -} - -# The client is not even driven (the test server is instead) as we just need -# to read the command, execute, reply... all this in a loop. -proc test_client_main server_port { - set ::test_server_fd [socket localhost $server_port] - fconfigure $::test_server_fd -encoding binary - send_data_packet $::test_server_fd ready [pid] - while 1 { - set bytes [gets $::test_server_fd] - set payload [read $::test_server_fd $bytes] - foreach {cmd data} $payload break - if {$cmd eq {run}} { - execute_tests $data - } else { - error "Unknown test client command: $cmd" - } - } -} - -proc send_data_packet {fd status data} { - set payload [list $status $data] - puts $fd [string length $payload] - puts -nonewline $fd $payload - flush $fd -} - -proc print_help_screen {} { - puts [join { - "--valgrind Run the test over valgrind." - "--accurate Run slow randomized tests for more iterations." - "--quiet Don't show individual tests." - "--single Just execute the specified unit (see next option)." - "--list-tests List all the available test units." - "--clients Number of test clients (default 16)." - "--timeout Test timeout in seconds (default 10 min)." - "--force-failure Force the execution of a test that always fails." - "--help Print this help screen." - } "\n"] -} - -# parse arguments -for {set j 0} {$j < [llength $argv]} {incr j} { - set opt [lindex $argv $j] - set arg [lindex $argv [expr $j+1]] - if {$opt eq {--tags}} { - foreach tag $arg { - if {[string index $tag 0] eq "-"} { - lappend ::denytags [string range $tag 1 end] - } else { - lappend ::allowtags $tag - } - } - incr j - } elseif {$opt eq {--valgrind}} { - set ::valgrind 1 - } elseif {$opt eq {--quiet}} { - set ::quiet 1 - } elseif {$opt eq {--host}} { - set ::external 1 - set ::host $arg - incr j - } elseif {$opt eq {--port}} { - set ::port $arg - incr j - } elseif {$opt eq {--accurate}} { - set ::accurate 1 - } elseif {$opt eq {--force-failure}} { - set ::force_failure 1 - } elseif {$opt eq {--single}} { - set ::all_tests $arg - incr j - } elseif {$opt eq {--list-tests}} { - foreach t $::all_tests { - puts $t - } - exit 0 - } elseif {$opt eq {--client}} { - set ::client 1 - set ::test_server_port $arg - incr j - } elseif {$opt eq {--clients}} { - set ::numclients $arg - incr j - } elseif {$opt eq {--timeout}} { - set ::timeout $arg - incr j - } elseif {$opt eq {--help}} { - print_help_screen - exit 0 - } else { - puts "Wrong argument: $opt" - exit 1 - } -} - -proc attach_to_replication_stream {} { - set s [socket [srv 0 "host"] [srv 0 "port"]] - fconfigure $s -translation binary - puts -nonewline $s "SYNC\r\n" - flush $s - - # Get the count - set count [gets $s] - set prefix [string range $count 0 0] - if {$prefix ne {$}} { - error "attach_to_replication_stream error. Received '$count' as count." - } - set count [string range $count 1 end] - - # Consume the bulk payload - while {$count} { - set buf [read $s $count] - set count [expr {$count-[string length $buf]}] - } - return $s -} - -proc read_from_replication_stream {s} { - fconfigure $s -blocking 0 - set attempt 0 - while {[gets $s count] == -1} { - if {[incr attempt] == 10} return "" - after 100 - } - fconfigure $s -blocking 1 - set count [string range $count 1 end] - - # Return a list of arguments for the command. - set res {} - for {set j 0} {$j < $count} {incr j} { - read $s 1 - set arg [::redis::redis_bulk_read $s] - if {$j == 0} {set arg [string tolower $arg]} - lappend res $arg - } - return $res -} - -proc assert_replication_stream {s patterns} { - for {set j 0} {$j < [llength $patterns]} {incr j} { - assert_match [lindex $patterns $j] [read_from_replication_stream $s] - } -} - -proc close_replication_stream {s} { - close $s -} - -# With the parallel test running multiple Redis instances at the same time -# we need a fast enough computer, otherwise a lot of tests may generate -# false positives. -# If the computer is too slow we revert the sequential test without any -# parallelism, that is, clients == 1. -proc is_a_slow_computer {} { - set start [clock milliseconds] - for {set j 0} {$j < 1000000} {incr j} {} - set elapsed [expr [clock milliseconds]-$start] - expr {$elapsed > 200} -} - -if {$::client} { - if {[catch { test_client_main $::test_server_port } err]} { - set estr "Executing test client: $err.\n$::errorInfo" - if {[catch {send_data_packet $::test_server_fd exception $estr}]} { - puts $estr - } - exit 1 - } -} else { - if {[is_a_slow_computer]} { - puts "** SLOW COMPUTER ** Using a single client to avoid false positives." - set ::numclients 1 - } - - if {[catch { test_server_main } err]} { - if {[string length $err] > 0} { - # only display error when not generated by the test suite - if {$err ne "exception"} { - puts $::errorInfo - } - exit 1 - } - } -} diff --git a/tools/pika_migrate/tests/unit/aofrw.tcl b/tools/pika_migrate/tests/unit/aofrw.tcl deleted file mode 100644 index a2d74168f3..0000000000 --- a/tools/pika_migrate/tests/unit/aofrw.tcl +++ /dev/null @@ -1,210 +0,0 @@ -start_server {tags {"aofrw"}} { - # Enable the AOF - r config set appendonly yes - r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite. - waitForBgrewriteaof r - - test {AOF rewrite during write load} { - # Start a write load for 10 seconds - set master [srv 0 client] - set master_host [srv 0 host] - set master_port [srv 0 port] - set load_handle0 [start_write_load $master_host $master_port 10] - set load_handle1 [start_write_load $master_host $master_port 10] - set load_handle2 [start_write_load $master_host $master_port 10] - set load_handle3 [start_write_load $master_host $master_port 10] - set load_handle4 [start_write_load $master_host $master_port 10] - - # Make sure the instance is really receiving data - wait_for_condition 50 100 { - [r dbsize] > 0 - } else { - fail "No write load detected." - } - - # After 3 seconds, start a rewrite, while the write load is still - # active. - after 3000 - r bgrewriteaof - waitForBgrewriteaof r - - # Let it run a bit more so that we'll append some data to the new - # AOF. - after 1000 - - # Stop the processes generating the load if they are still active - stop_write_load $load_handle0 - stop_write_load $load_handle1 - stop_write_load $load_handle2 - stop_write_load $load_handle3 - stop_write_load $load_handle4 - - # Make sure that we remain the only connected client. - # This step is needed to make sure there are no pending writes - # that will be processed between the two "debug digest" calls. - wait_for_condition 50 100 { - [llength [split [string trim [r client list]] "\n"]] == 1 - } else { - puts [r client list] - fail "Clients generating loads are not disconnecting" - } - - # Get the data set digest - set d1 [r debug digest] - - # Load the AOF - r debug loadaof - set d2 [r debug digest] - - # Make sure they are the same - assert {$d1 eq $d2} - } -} - -start_server {tags {"aofrw"}} { - test {Turning off AOF kills the background writing child if any} { - r config set appendonly yes - waitForBgrewriteaof r - r multi - r bgrewriteaof - r config set appendonly no - r exec - wait_for_condition 50 100 { - [string match {*Killing*AOF*child*} [exec tail -n5 < [srv 0 stdout]]] - } else { - fail "Can't find 'Killing AOF child' into recent logs" - } - } - - foreach d {string int} { - foreach e {ziplist linkedlist} { - test "AOF rewrite of list with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r lpush key $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {intset hashtable} { - test "AOF rewrite of set with $e encoding, $d data" { - r flushall - if {$e eq {intset}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r sadd key $data - } - if {$d ne {string}} { - assert_equal [r object encoding key] $e - } - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {ziplist hashtable} { - test "AOF rewrite of hash with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r hset key $data $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - foreach d {string int} { - foreach e {ziplist skiplist} { - test "AOF rewrite of zset with $e encoding, $d data" { - r flushall - if {$e eq {ziplist}} {set len 10} else {set len 1000} - for {set j 0} {$j < $len} {incr j} { - if {$d eq {string}} { - set data [randstring 0 16 alpha] - } else { - set data [randomInt 4000000000] - } - r zadd key [expr rand()] $data - } - assert_equal [r object encoding key] $e - set d1 [r debug digest] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set d2 [r debug digest] - if {$d1 ne $d2} { - error "assertion:$d1 is not equal to $d2" - } - } - } - } - - test {BGREWRITEAOF is delayed if BGSAVE is in progress} { - r multi - r bgsave - r bgrewriteaof - r info persistence - set res [r exec] - assert_match {*scheduled*} [lindex $res 1] - assert_match {*aof_rewrite_scheduled:1*} [lindex $res 2] - while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { - after 100 - } - } - - test {BGREWRITEAOF is refused if already in progress} { - catch { - r multi - r bgrewriteaof - r bgrewriteaof - r exec - } e - assert_match {*ERR*already*} $e - while {[string match {*aof_rewrite_scheduled:1*} [r info persistence]]} { - after 100 - } - } -} diff --git a/tools/pika_migrate/tests/unit/auth.tcl b/tools/pika_migrate/tests/unit/auth.tcl deleted file mode 100644 index 633cda95c9..0000000000 --- a/tools/pika_migrate/tests/unit/auth.tcl +++ /dev/null @@ -1,27 +0,0 @@ -start_server {tags {"auth"}} { - test {AUTH fails if there is no password configured server side} { - catch {r auth foo} err - set _ $err - } {ERR*no password*} -} - -start_server {tags {"auth"} overrides {requirepass foobar}} { - test {AUTH fails when a wrong password is given} { - catch {r auth wrong!} err - set _ $err - } {ERR*invalid password} - - test {Arbitrary command gives an error when AUTH is required} { - catch {r set foo bar} err - set _ $err - } {NOAUTH*} - - test {AUTH succeeds when the right password is given} { - r auth foobar - } {OK} - - test {Once AUTH succeeded we can actually send commands to the server} { - r set foo 100 - r incr foo - } {101} -} diff --git a/tools/pika_migrate/tests/unit/basic.tcl b/tools/pika_migrate/tests/unit/basic.tcl deleted file mode 100644 index 6f725d299b..0000000000 --- a/tools/pika_migrate/tests/unit/basic.tcl +++ /dev/null @@ -1,783 +0,0 @@ -start_server {tags {"basic"}} { - test {DEL all keys to start with a clean DB} { - foreach key [r keys *] {r del $key} - r dbsize - } {0} - - test {SET and GET an item} { - r set x foobar - r get x - } {foobar} - - test {SET and GET an empty item} { - r set x {} - r get x - } {} - - test {DEL against a single item} { - r del x - r get x - } {} - - test {Vararg DEL} { - r set foo1 a - r set foo2 b - r set foo3 c - list [r del foo1 foo2 foo3 foo4] [r mget foo1 foo2 foo3] - } {3 {{} {} {}}} - - test {KEYS with pattern} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r set $key hello - } - lsort [r keys foo*] - } {foo_a foo_b foo_c} - - test {KEYS to get all keys} { - lsort [r keys *] - } {foo_a foo_b foo_c key_x key_y key_z} - - test {DBSIZE} { - r dbsize - } {6} - - test {DEL all keys} { - foreach key [r keys *] {r del $key} - r dbsize - } {0} - - test {Very big payload in GET/SET} { - set buf [string repeat "abcd" 1000000] - r set foo $buf - r get foo - } [string repeat "abcd" 1000000] - - tags {"slow"} { - test {Very big payload random access} { - set err {} - array set payload {} - for {set j 0} {$j < 100} {incr j} { - set size [expr 1+[randomInt 100000]] - set buf [string repeat "pl-$j" $size] - set payload($j) $buf - r set bigpayload_$j $buf - } - for {set j 0} {$j < 1000} {incr j} { - set index [randomInt 100] - set buf [r get bigpayload_$index] - if {$buf != $payload($index)} { - set err "Values differ: I set '$payload($index)' but I read back '$buf'" - break - } - } - unset payload - set _ $err - } {} - - test {SET 10000 numeric keys and access all them in reverse order} { - set err {} - for {set x 0} {$x < 10000} {incr x} { - r set $x $x - } - set sum 0 - for {set x 9999} {$x >= 0} {incr x -1} { - set val [r get $x] - if {$val ne $x} { - set err "Element at position $x is $val instead of $x" - break - } - } - set _ $err - } {} - - test {DBSIZE should be 10101 now} { - r dbsize - } {10101} - } - - test {INCR against non existing key} { - set res {} - append res [r incr novar] - append res [r get novar] - } {11} - - test {INCR against key created by incr itself} { - r incr novar - } {2} - - test {INCR against key originally set with SET} { - r set novar 100 - r incr novar - } {101} - - test {INCR over 32bit value} { - r set novar 17179869184 - r incr novar - } {17179869185} - - test {INCRBY over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrby novar 17179869184 - } {34359738368} - - test {INCR fails against key with spaces (left)} { - r set novar " 11" - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against key with spaces (right)} { - r set novar "11 " - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against key with spaces (both)} { - r set novar " 11 " - catch {r incr novar} err - format $err - } {ERR*} - - test {INCR fails against a key holding a list} { - r rpush mylist 1 - catch {r incr mylist} err - r rpop mylist - format $err - } {WRONGTYPE*} - - test {DECRBY over 32bit value with over 32bit increment, negative res} { - r set novar 17179869184 - r decrby novar 17179869185 - } {-1} - - test {INCRBYFLOAT against non existing key} { - r del novar - list [roundFloat [r incrbyfloat novar 1]] \ - [roundFloat [r get novar]] \ - [roundFloat [r incrbyfloat novar 0.25]] \ - [roundFloat [r get novar]] - } {1 1 1.25 1.25} - - test {INCRBYFLOAT against key originally set with SET} { - r set novar 1.5 - roundFloat [r incrbyfloat novar 1.5] - } {3} - - test {INCRBYFLOAT over 32bit value} { - r set novar 17179869184 - r incrbyfloat novar 1.5 - } {17179869185.5} - - test {INCRBYFLOAT over 32bit value with over 32bit increment} { - r set novar 17179869184 - r incrbyfloat novar 17179869184 - } {34359738368} - - test {INCRBYFLOAT fails against key with spaces (left)} { - set err {} - r set novar " 11" - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (right)} { - set err {} - r set novar "11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against key with spaces (both)} { - set err {} - r set novar " 11 " - catch {r incrbyfloat novar 1.0} err - format $err - } {ERR*valid*} - - test {INCRBYFLOAT fails against a key holding a list} { - r del mylist - set err {} - r rpush mylist 1 - catch {r incrbyfloat mylist 1.0} err - r del mylist - format $err - } {WRONGTYPE*} - - test {INCRBYFLOAT does not allow NaN or Infinity} { - r set foo 0 - set err {} - catch {r incrbyfloat foo +inf} err - set err - # p.s. no way I can force NaN to test it from the API because - # there is no way to increment / decrement by infinity nor to - # perform divisions. - } {ERR*would produce*} - - test {INCRBYFLOAT decrement} { - r set foo 1 - roundFloat [r incrbyfloat foo -1.1] - } {-0.1} - - test "SETNX target key missing" { - r del novar - assert_equal 1 [r setnx novar foobared] - assert_equal "foobared" [r get novar] - } - - test "SETNX target key exists" { - r set novar foobared - assert_equal 0 [r setnx novar blabla] - assert_equal "foobared" [r get novar] - } - - test "SETNX against not-expired volatile key" { - r set x 10 - r expire x 10000 - assert_equal 0 [r setnx x 20] - assert_equal 10 [r get x] - } - - test "SETNX against expired volatile key" { - # Make it very unlikely for the key this test uses to be expired by the - # active expiry cycle. This is tightly coupled to the implementation of - # active expiry and dbAdd() but currently the only way to test that - # SETNX expires a key when it should have been. - for {set x 0} {$x < 9999} {incr x} { - r setex key-$x 3600 value - } - - # This will be one of 10000 expiring keys. A cycle is executed every - # 100ms, sampling 10 keys for being expired or not. This key will be - # expired for at most 1s when we wait 2s, resulting in a total sample - # of 100 keys. The probability of the success of this test being a - # false positive is therefore approx. 1%. - r set x 10 - r expire x 1 - - # Wait for the key to expire - after 2000 - - assert_equal 1 [r setnx x 20] - assert_equal 20 [r get x] - } - - test "DEL against expired key" { - r debug set-active-expire 0 - r setex keyExpire 1 valExpire - after 1100 - assert_equal 0 [r del keyExpire] - r debug set-active-expire 1 - } - - test {EXISTS} { - set res {} - r set newkey test - append res [r exists newkey] - r del newkey - append res [r exists newkey] - } {10} - - test {Zero length value in key. SET/GET/EXISTS} { - r set emptykey {} - set res [r get emptykey] - append res [r exists emptykey] - r del emptykey - append res [r exists emptykey] - } {10} - - test {Commands pipelining} { - set fd [r channel] - puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n" - flush $fd - set res {} - append res [string match OK* [r read]] - append res [r read] - append res [string match PONG* [r read]] - format $res - } {1xyzk1} - - test {Non existing command} { - catch {r foobaredcommand} err - string match ERR* $err - } {1} - - test {RENAME basic usage} { - r set mykey hello - r rename mykey mykey1 - r rename mykey1 mykey2 - r get mykey2 - } {hello} - - test {RENAME source key should no longer exist} { - r exists mykey - } {0} - - test {RENAME against already existing key} { - r set mykey a - r set mykey2 b - r rename mykey2 mykey - set res [r get mykey] - append res [r exists mykey2] - } {b0} - - test {RENAMENX basic usage} { - r del mykey - r del mykey2 - r set mykey foobar - r renamenx mykey mykey2 - set res [r get mykey2] - append res [r exists mykey] - } {foobar0} - - test {RENAMENX against already existing key} { - r set mykey foo - r set mykey2 bar - r renamenx mykey mykey2 - } {0} - - test {RENAMENX against already existing key (2)} { - set res [r get mykey] - append res [r get mykey2] - } {foobar} - - test {RENAME against non existing source key} { - catch {r rename nokey foobar} err - format $err - } {ERR*} - - test {RENAME where source and dest key is the same} { - catch {r rename mykey mykey} err - format $err - } {ERR*} - - test {RENAME with volatile key, should move the TTL as well} { - r del mykey mykey2 - r set mykey foo - r expire mykey 100 - assert {[r ttl mykey] > 95 && [r ttl mykey] <= 100} - r rename mykey mykey2 - assert {[r ttl mykey2] > 95 && [r ttl mykey2] <= 100} - } - - test {RENAME with volatile key, should not inherit TTL of target key} { - r del mykey mykey2 - r set mykey foo - r set mykey2 bar - r expire mykey2 100 - assert {[r ttl mykey] == -1 && [r ttl mykey2] > 0} - r rename mykey mykey2 - r ttl mykey2 - } {-1} - - test {DEL all keys again (DB 0)} { - foreach key [r keys *] { - r del $key - } - r dbsize - } {0} - - test {DEL all keys again (DB 1)} { - r select 10 - foreach key [r keys *] { - r del $key - } - set res [r dbsize] - r select 9 - format $res - } {0} - - test {MOVE basic usage} { - r set mykey foobar - r move mykey 10 - set res {} - lappend res [r exists mykey] - lappend res [r dbsize] - r select 10 - lappend res [r get mykey] - lappend res [r dbsize] - r select 9 - format $res - } [list 0 0 foobar 1] - - test {MOVE against key existing in the target DB} { - r set mykey hello - r move mykey 10 - } {0} - - test {MOVE against non-integer DB (#1428)} { - r set mykey hello - catch {r move mykey notanumber} e - set e - } {*ERR*index out of range} - - test {SET/GET keys in different DBs} { - r set a hello - r set b world - r select 10 - r set a foo - r set b bared - r select 9 - set res {} - lappend res [r get a] - lappend res [r get b] - r select 10 - lappend res [r get a] - lappend res [r get b] - r select 9 - format $res - } {hello world foo bared} - - test {MGET} { - r flushdb - r set foo BAR - r set bar FOO - r mget foo bar - } {BAR FOO} - - test {MGET against non existing key} { - r mget foo baazz bar - } {BAR {} FOO} - - test {MGET against non-string key} { - r sadd myset ciao - r sadd myset bau - r mget foo baazz bar myset - } {BAR {} FOO {}} - - test {RANDOMKEY} { - r flushdb - r set foo x - r set bar y - set foo_seen 0 - set bar_seen 0 - for {set i 0} {$i < 100} {incr i} { - set rkey [r randomkey] - if {$rkey eq {foo}} { - set foo_seen 1 - } - if {$rkey eq {bar}} { - set bar_seen 1 - } - } - list $foo_seen $bar_seen - } {1 1} - - test {RANDOMKEY against empty DB} { - r flushdb - r randomkey - } {} - - test {RANDOMKEY regression 1} { - r flushdb - r set x 10 - r del x - r randomkey - } {} - - test {GETSET (set new value)} { - list [r getset foo xyz] [r get foo] - } {{} xyz} - - test {GETSET (replace old value)} { - r set foo bar - list [r getset foo xyz] [r get foo] - } {bar xyz} - - test {MSET base case} { - r mset x 10 y "foo bar" z "x x x x x x x\n\n\r\n" - r mget x y z - } [list 10 {foo bar} "x x x x x x x\n\n\r\n"] - - test {MSET wrong number of args} { - catch {r mset x 10 y "foo bar" z} err - format $err - } {*wrong number*} - - test {MSETNX with already existent key} { - list [r msetnx x1 xxx y2 yyy x 20] [r exists x1] [r exists y2] - } {0 0 0} - - test {MSETNX with not existing keys} { - list [r msetnx x1 xxx y2 yyy] [r get x1] [r get y2] - } {1 xxx yyy} - - test "STRLEN against non-existing key" { - assert_equal 0 [r strlen notakey] - } - - test "STRLEN against integer-encoded value" { - r set myinteger -555 - assert_equal 4 [r strlen myinteger] - } - - test "STRLEN against plain string" { - r set mystring "foozzz0123456789 baz" - assert_equal 20 [r strlen mystring] - } - - test "SETBIT against non-existing key" { - r del mykey - assert_equal 0 [r setbit mykey 1 1] - assert_equal [binary format B* 01000000] [r get mykey] - } - - test "SETBIT against string-encoded key" { - # Ascii "@" is integer 64 = 01 00 00 00 - r set mykey "@" - - assert_equal 0 [r setbit mykey 2 1] - assert_equal [binary format B* 01100000] [r get mykey] - assert_equal 1 [r setbit mykey 1 0] - assert_equal [binary format B* 00100000] [r get mykey] - } - - test "SETBIT against integer-encoded key" { - # Ascii "1" is integer 49 = 00 11 00 01 - r set mykey 1 - assert_encoding int mykey - - assert_equal 0 [r setbit mykey 6 1] - assert_equal [binary format B* 00110011] [r get mykey] - assert_equal 1 [r setbit mykey 2 0] - assert_equal [binary format B* 00010011] [r get mykey] - } - - test "SETBIT against key with wrong type" { - r del mykey - r lpush mykey "foo" - assert_error "WRONGTYPE*" {r setbit mykey 0 1} - } - - test "SETBIT with out of range bit offset" { - r del mykey - assert_error "*out of range*" {r setbit mykey [expr 4*1024*1024*1024] 1} - assert_error "*out of range*" {r setbit mykey -1 1} - } - - test "SETBIT with non-bit argument" { - r del mykey - assert_error "*out of range*" {r setbit mykey 0 -1} - assert_error "*out of range*" {r setbit mykey 0 2} - assert_error "*out of range*" {r setbit mykey 0 10} - assert_error "*out of range*" {r setbit mykey 0 20} - } - - test "SETBIT fuzzing" { - set str "" - set len [expr 256*8] - r del mykey - - for {set i 0} {$i < 2000} {incr i} { - set bitnum [randomInt $len] - set bitval [randomInt 2] - set fmt [format "%%-%ds%%d%%-s" $bitnum] - set head [string range $str 0 $bitnum-1] - set tail [string range $str $bitnum+1 end] - set str [string map {" " 0} [format $fmt $head $bitval $tail]] - - r setbit mykey $bitnum $bitval - assert_equal [binary format B* $str] [r get mykey] - } - } - - test "GETBIT against non-existing key" { - r del mykey - assert_equal 0 [r getbit mykey 0] - } - - test "GETBIT against string-encoded key" { - # Single byte with 2nd and 3rd bit set - r set mykey "`" - - # In-range - assert_equal 0 [r getbit mykey 0] - assert_equal 1 [r getbit mykey 1] - assert_equal 1 [r getbit mykey 2] - assert_equal 0 [r getbit mykey 3] - - # Out-range - assert_equal 0 [r getbit mykey 8] - assert_equal 0 [r getbit mykey 100] - assert_equal 0 [r getbit mykey 10000] - } - - test "GETBIT against integer-encoded key" { - r set mykey 1 - assert_encoding int mykey - - # Ascii "1" is integer 49 = 00 11 00 01 - assert_equal 0 [r getbit mykey 0] - assert_equal 0 [r getbit mykey 1] - assert_equal 1 [r getbit mykey 2] - assert_equal 1 [r getbit mykey 3] - - # Out-range - assert_equal 0 [r getbit mykey 8] - assert_equal 0 [r getbit mykey 100] - assert_equal 0 [r getbit mykey 10000] - } - - test "SETRANGE against non-existing key" { - r del mykey - assert_equal 3 [r setrange mykey 0 foo] - assert_equal "foo" [r get mykey] - - r del mykey - assert_equal 0 [r setrange mykey 0 ""] - assert_equal 0 [r exists mykey] - - r del mykey - assert_equal 4 [r setrange mykey 1 foo] - assert_equal "\000foo" [r get mykey] - } - - test "SETRANGE against string-encoded key" { - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 b] - assert_equal "boo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 0 ""] - assert_equal "foo" [r get mykey] - - r set mykey "foo" - assert_equal 3 [r setrange mykey 1 b] - assert_equal "fbo" [r get mykey] - - r set mykey "foo" - assert_equal 7 [r setrange mykey 4 bar] - assert_equal "foo\000bar" [r get mykey] - } - - test "SETRANGE against integer-encoded key" { - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 0 2] - assert_encoding raw mykey - assert_equal 2234 [r get mykey] - - # Shouldn't change encoding when nothing is set - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 0 ""] - assert_encoding int mykey - assert_equal 1234 [r get mykey] - - r set mykey 1234 - assert_encoding int mykey - assert_equal 4 [r setrange mykey 1 3] - assert_encoding raw mykey - assert_equal 1334 [r get mykey] - - r set mykey 1234 - assert_encoding int mykey - assert_equal 6 [r setrange mykey 5 2] - assert_encoding raw mykey - assert_equal "1234\0002" [r get mykey] - } - - test "SETRANGE against key with wrong type" { - r del mykey - r lpush mykey "foo" - assert_error "WRONGTYPE*" {r setrange mykey 0 bar} - } - - test "SETRANGE with out of range offset" { - r del mykey - assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} - - r set mykey "hello" - assert_error "*out of range*" {r setrange mykey -1 world} - assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world} - } - - test "GETRANGE against non-existing key" { - r del mykey - assert_equal "" [r getrange mykey 0 -1] - } - - test "GETRANGE against string value" { - r set mykey "Hello World" - assert_equal "Hell" [r getrange mykey 0 3] - assert_equal "Hello World" [r getrange mykey 0 -1] - assert_equal "orld" [r getrange mykey -4 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal " World" [r getrange mykey 5 5000] - assert_equal "Hello World" [r getrange mykey -5000 10000] - } - - test "GETRANGE against integer-encoded value" { - r set mykey 1234 - assert_equal "123" [r getrange mykey 0 2] - assert_equal "1234" [r getrange mykey 0 -1] - assert_equal "234" [r getrange mykey -3 -1] - assert_equal "" [r getrange mykey 5 3] - assert_equal "4" [r getrange mykey 3 5000] - assert_equal "1234" [r getrange mykey -5000 10000] - } - - test "GETRANGE fuzzing" { - for {set i 0} {$i < 1000} {incr i} { - r set bin [set bin [randstring 0 1024 binary]] - set _start [set start [randomInt 1500]] - set _end [set end [randomInt 1500]] - if {$_start < 0} {set _start "end-[abs($_start)-1]"} - if {$_end < 0} {set _end "end-[abs($_end)-1]"} - assert_equal [string range $bin $_start $_end] [r getrange bin $start $end] - } - } - - test {Extended SET can detect syntax errors} { - set e {} - catch {r set foo bar non-existing-option} e - set e - } {*syntax*} - - test {Extended SET NX option} { - r del foo - set v1 [r set foo 1 nx] - set v2 [r set foo 2 nx] - list $v1 $v2 [r get foo] - } {OK {} 1} - - test {Extended SET XX option} { - r del foo - set v1 [r set foo 1 xx] - r set foo bar - set v2 [r set foo 2 xx] - list $v1 $v2 [r get foo] - } {{} OK 2} - - test {Extended SET EX option} { - r del foo - r set foo bar ex 10 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {Extended SET PX option} { - r del foo - r set foo bar px 10000 - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {Extended SET using multiple options at once} { - r set foo val - assert {[r set foo bar xx px 10000] eq {OK}} - set ttl [r ttl foo] - assert {$ttl <= 10 && $ttl > 5} - } - - test {KEYS * two times with long key, Github issue #1208} { - r flushdb - r set dlskeriewrioeuwqoirueioqwrueoqwrueqw test - r keys * - r keys * - } {dlskeriewrioeuwqoirueioqwrueoqwrueqw} - - test {GETRANGE with huge ranges, Github issue #1844} { - r set foo bar - r getrange foo 0 4294967297 - } {bar} -} diff --git a/tools/pika_migrate/tests/unit/bitops.tcl b/tools/pika_migrate/tests/unit/bitops.tcl deleted file mode 100644 index 9751850ad4..0000000000 --- a/tools/pika_migrate/tests/unit/bitops.tcl +++ /dev/null @@ -1,341 +0,0 @@ -# Compare Redis commadns against Tcl implementations of the same commands. -proc count_bits s { - binary scan $s b* bits - string length [regsub -all {0} $bits {}] -} - -proc simulate_bit_op {op args} { - set maxlen 0 - set j 0 - set count [llength $args] - foreach a $args { - binary scan $a b* bits - set b($j) $bits - if {[string length $bits] > $maxlen} { - set maxlen [string length $bits] - } - incr j - } - for {set j 0} {$j < $count} {incr j} { - if {[string length $b($j)] < $maxlen} { - append b($j) [string repeat 0 [expr $maxlen-[string length $b($j)]]] - } - } - set out {} - for {set x 0} {$x < $maxlen} {incr x} { - set bit [string range $b(0) $x $x] - if {$op eq {not}} {set bit [expr {!$bit}]} - for {set j 1} {$j < $count} {incr j} { - set bit2 [string range $b($j) $x $x] - switch $op { - and {set bit [expr {$bit & $bit2}]} - or {set bit [expr {$bit | $bit2}]} - xor {set bit [expr {$bit ^ $bit2}]} - } - } - append out $bit - } - binary format b* $out -} - -start_server {tags {"bitops"}} { - test {BITCOUNT returns 0 against non existing key} { - r bitcount no-key - } 0 - - catch {unset num} - foreach vec [list "" "\xaa" "\x00\x00\xff" "foobar" "123"] { - incr num - test "BITCOUNT against test vector #$num" { - r set str $vec - assert {[r bitcount str] == [count_bits $vec]} - } - } - - test {BITCOUNT fuzzing without start/end} { - for {set j 0} {$j < 100} {incr j} { - set str [randstring 0 3000] - r set str $str - assert {[r bitcount str] == [count_bits $str]} - } - } - - test {BITCOUNT fuzzing with start/end} { - for {set j 0} {$j < 100} {incr j} { - set str [randstring 0 3000] - r set str $str - set l [string length $str] - set start [randomInt $l] - set end [randomInt $l] - if {$start > $end} { - lassign [list $end $start] start end - } - assert {[r bitcount str $start $end] == [count_bits [string range $str $start $end]]} - } - } - - test {BITCOUNT with start, end} { - r set s "foobar" - assert_equal [r bitcount s 0 -1] [count_bits "foobar"] - assert_equal [r bitcount s 1 -2] [count_bits "ooba"] - assert_equal [r bitcount s -2 1] [count_bits ""] - assert_equal [r bitcount s 0 1000] [count_bits "foobar"] - } - - test {BITCOUNT syntax error #1} { - catch {r bitcount s 0} e - set e - } {ERR*syntax*} - - test {BITCOUNT regression test for github issue #582} { - r del str - r setbit foo 0 1 - if {[catch {r bitcount foo 0 4294967296} e]} { - assert_match {*ERR*out of range*} $e - set _ 1 - } else { - set e - } - } {1} - - test {BITCOUNT misaligned prefix} { - r del str - r set str ab - r bitcount str 1 -1 - } {3} - - test {BITCOUNT misaligned prefix + full words + remainder} { - r del str - r set str __PPxxxxxxxxxxxxxxxxRR__ - r bitcount str 2 -3 - } {74} - - test {BITOP NOT (empty string)} { - r set s "" - r bitop not dest s - r get dest - } {} - - test {BITOP NOT (known string)} { - r set s "\xaa\x00\xff\x55" - r bitop not dest s - r get dest - } "\x55\xff\x00\xaa" - - test {BITOP where dest and target are the same key} { - r set s "\xaa\x00\xff\x55" - r bitop not s s - r get s - } "\x55\xff\x00\xaa" - - test {BITOP AND|OR|XOR don't change the string with single input key} { - r set a "\x01\x02\xff" - r bitop and res1 a - r bitop or res2 a - r bitop xor res3 a - list [r get res1] [r get res2] [r get res3] - } [list "\x01\x02\xff" "\x01\x02\xff" "\x01\x02\xff"] - - test {BITOP missing key is considered a stream of zero} { - r set a "\x01\x02\xff" - r bitop and res1 no-suck-key a - r bitop or res2 no-suck-key a no-such-key - r bitop xor res3 no-such-key a - list [r get res1] [r get res2] [r get res3] - } [list "\x00\x00\x00" "\x01\x02\xff" "\x01\x02\xff"] - - test {BITOP shorter keys are zero-padded to the key with max length} { - r set a "\x01\x02\xff\xff" - r set b "\x01\x02\xff" - r bitop and res1 a b - r bitop or res2 a b - r bitop xor res3 a b - list [r get res1] [r get res2] [r get res3] - } [list "\x01\x02\xff\x00" "\x01\x02\xff\xff" "\x00\x00\x00\xff"] - - foreach op {and or xor} { - test "BITOP $op fuzzing" { - for {set i 0} {$i < 10} {incr i} { - r flushall - set vec {} - set veckeys {} - set numvec [expr {[randomInt 10]+1}] - for {set j 0} {$j < $numvec} {incr j} { - set str [randstring 0 1000] - lappend vec $str - lappend veckeys vector_$j - r set vector_$j $str - } - r bitop $op target {*}$veckeys - assert_equal [r get target] [simulate_bit_op $op {*}$vec] - } - } - } - - test {BITOP NOT fuzzing} { - for {set i 0} {$i < 10} {incr i} { - r flushall - set str [randstring 0 1000] - r set str $str - r bitop not target str - assert_equal [r get target] [simulate_bit_op not $str] - } - } - - test {BITOP with integer encoded source objects} { - r set a 1 - r set b 2 - r bitop xor dest a b a - r get dest - } {2} - - test {BITOP with non string source key} { - r del c - r set a 1 - r set b 2 - r lpush c foo - catch {r bitop xor dest a b c d} e - set e - } {WRONGTYPE*} - - test {BITOP with empty string after non empty string (issue #529)} { - r flushdb - r set a "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" - r bitop or x a b - } {32} - - test {BITPOS bit=0 with empty key returns 0} { - r del str - r bitpos str 0 - } {0} - - test {BITPOS bit=1 with empty key returns -1} { - r del str - r bitpos str 1 - } {-1} - - test {BITPOS bit=0 with string less than 1 word works} { - r set str "\xff\xf0\x00" - r bitpos str 0 - } {12} - - test {BITPOS bit=1 with string less than 1 word works} { - r set str "\x00\x0f\x00" - r bitpos str 1 - } {12} - - test {BITPOS bit=0 starting at unaligned address} { - r set str "\xff\xf0\x00" - r bitpos str 0 1 - } {12} - - test {BITPOS bit=1 starting at unaligned address} { - r set str "\x00\x0f\xff" - r bitpos str 1 1 - } {12} - - test {BITPOS bit=0 unaligned+full word+reminder} { - r del str - r set str "\xff\xff\xff" ; # Prefix - # Followed by two (or four in 32 bit systems) full words - r append str "\xff\xff\xff\xff\xff\xff\xff\xff" - r append str "\xff\xff\xff\xff\xff\xff\xff\xff" - r append str "\xff\xff\xff\xff\xff\xff\xff\xff" - # First zero bit. - r append str "\x0f" - assert {[r bitpos str 0] == 216} - assert {[r bitpos str 0 1] == 216} - assert {[r bitpos str 0 2] == 216} - assert {[r bitpos str 0 3] == 216} - assert {[r bitpos str 0 4] == 216} - assert {[r bitpos str 0 5] == 216} - assert {[r bitpos str 0 6] == 216} - assert {[r bitpos str 0 7] == 216} - assert {[r bitpos str 0 8] == 216} - } - - test {BITPOS bit=1 unaligned+full word+reminder} { - r del str - r set str "\x00\x00\x00" ; # Prefix - # Followed by two (or four in 32 bit systems) full words - r append str "\x00\x00\x00\x00\x00\x00\x00\x00" - r append str "\x00\x00\x00\x00\x00\x00\x00\x00" - r append str "\x00\x00\x00\x00\x00\x00\x00\x00" - # First zero bit. - r append str "\xf0" - assert {[r bitpos str 1] == 216} - assert {[r bitpos str 1 1] == 216} - assert {[r bitpos str 1 2] == 216} - assert {[r bitpos str 1 3] == 216} - assert {[r bitpos str 1 4] == 216} - assert {[r bitpos str 1 5] == 216} - assert {[r bitpos str 1 6] == 216} - assert {[r bitpos str 1 7] == 216} - assert {[r bitpos str 1 8] == 216} - } - - test {BITPOS bit=1 returns -1 if string is all 0 bits} { - r set str "" - for {set j 0} {$j < 20} {incr j} { - assert {[r bitpos str 1] == -1} - r append str "\x00" - } - } - - test {BITPOS bit=0 works with intervals} { - r set str "\x00\xff\x00" - assert {[r bitpos str 0 0 -1] == 0} - assert {[r bitpos str 0 1 -1] == 16} - assert {[r bitpos str 0 2 -1] == 16} - assert {[r bitpos str 0 2 200] == 16} - assert {[r bitpos str 0 1 1] == -1} - } - - test {BITPOS bit=1 works with intervals} { - r set str "\x00\xff\x00" - assert {[r bitpos str 1 0 -1] == 8} - assert {[r bitpos str 1 1 -1] == 8} - assert {[r bitpos str 1 2 -1] == -1} - assert {[r bitpos str 1 2 200] == -1} - assert {[r bitpos str 1 1 1] == 8} - } - - test {BITPOS bit=0 changes behavior if end is given} { - r set str "\xff\xff\xff" - assert {[r bitpos str 0] == 24} - assert {[r bitpos str 0 0] == 24} - assert {[r bitpos str 0 0 -1] == -1} - } - - test {BITPOS bit=1 fuzzy testing using SETBIT} { - r del str - set max 524288; # 64k - set first_one_pos -1 - for {set j 0} {$j < 1000} {incr j} { - assert {[r bitpos str 1] == $first_one_pos} - set pos [randomInt $max] - r setbit str $pos 1 - if {$first_one_pos == -1 || $first_one_pos > $pos} { - # Update the position of the first 1 bit in the array - # if the bit we set is on the left of the previous one. - set first_one_pos $pos - } - } - } - - test {BITPOS bit=0 fuzzy testing using SETBIT} { - set max 524288; # 64k - set first_zero_pos $max - r set str [string repeat "\xff" [expr $max/8]] - for {set j 0} {$j < 1000} {incr j} { - assert {[r bitpos str 0] == $first_zero_pos} - set pos [randomInt $max] - r setbit str $pos 0 - if {$first_zero_pos > $pos} { - # Update the position of the first 0 bit in the array - # if the bit we clear is on the left of the previous one. - set first_zero_pos $pos - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/dump.tcl b/tools/pika_migrate/tests/unit/dump.tcl deleted file mode 100644 index b79c3ba9d0..0000000000 --- a/tools/pika_migrate/tests/unit/dump.tcl +++ /dev/null @@ -1,142 +0,0 @@ -start_server {tags {"dump"}} { - test {DUMP / RESTORE are able to serialize / unserialize a simple key} { - r set foo bar - set encoded [r dump foo] - r del foo - list [r exists foo] [r restore foo 0 $encoded] [r ttl foo] [r get foo] - } {0 OK -1 bar} - - test {RESTORE can set an arbitrary expire to the materialized key} { - r set foo bar - set encoded [r dump foo] - r del foo - r restore foo 5000 $encoded - set ttl [r pttl foo] - assert {$ttl >= 3000 && $ttl <= 5000} - r get foo - } {bar} - - test {RESTORE can set an expire that overflows a 32 bit integer} { - r set foo bar - set encoded [r dump foo] - r del foo - r restore foo 2569591501 $encoded - set ttl [r pttl foo] - assert {$ttl >= (2569591501-3000) && $ttl <= 2569591501} - r get foo - } {bar} - - test {RESTORE returns an error of the key already exists} { - r set foo bar - set e {} - catch {r restore foo 0 "..."} e - set e - } {*is busy*} - - test {DUMP of non existing key returns nil} { - r dump nonexisting_key - } {} - - test {MIGRATE is able to migrate a key between two instances} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 5000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second get key] eq {Some Value}} - assert {[$second ttl key] == -1} - } - } - - test {MIGRATE propagates TTL correctly} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - $first expire key 10 - set ret [r -1 migrate $second_host $second_port key 9 5000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second get key] eq {Some Value}} - assert {[$second ttl key] >= 7 && [$second ttl key] <= 10} - } - } - - test {MIGRATE can correctly transfer large values} { - set first [srv 0 client] - r del key - for {set j 0} {$j < 5000} {incr j} { - r rpush key 1 2 3 4 5 6 7 8 9 10 - r rpush key "item 1" "item 2" "item 3" "item 4" "item 5" \ - "item 6" "item 7" "item 8" "item 9" "item 10" - } - assert {[string length [r dump key]] > (1024*64)} - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 10000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second ttl key] == -1} - assert {[$second llen key] == 5000*20} - } - } - - test {MIGRATE can correctly transfer hashes} { - set first [srv 0 client] - r del key - r hmset key field1 "item 1" field2 "item 2" field3 "item 3" \ - field4 "item 4" field5 "item 5" field6 "item 6" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - set ret [r -1 migrate $second_host $second_port key 9 10000] - assert {$ret eq {OK}} - assert {[$first exists key] == 0} - assert {[$second exists key] == 1} - assert {[$second ttl key] == -1} - } - } - - test {MIGRATE timeout actually works} { - set first [srv 0 client] - r set key "Some Value" - start_server {tags {"repl"}} { - set second [srv 0 client] - set second_host [srv 0 host] - set second_port [srv 0 port] - - assert {[$first exists key] == 1} - assert {[$second exists key] == 0} - - set rd [redis_deferring_client] - $rd debug sleep 5.0 ; # Make second server unable to reply. - set e {} - catch {r -1 migrate $second_host $second_port key 9 1000} e - assert_match {IOERR*} $e - } - } -} diff --git a/tools/pika_migrate/tests/unit/expire.tcl b/tools/pika_migrate/tests/unit/expire.tcl deleted file mode 100644 index ff3dacb337..0000000000 --- a/tools/pika_migrate/tests/unit/expire.tcl +++ /dev/null @@ -1,201 +0,0 @@ -start_server {tags {"expire"}} { - test {EXPIRE - set timeouts multiple times} { - r set x foobar - set v1 [r expire x 5] - set v2 [r ttl x] - set v3 [r expire x 10] - set v4 [r ttl x] - r expire x 2 - list $v1 $v2 $v3 $v4 - } {1 [45] 1 10} - - test {EXPIRE - It should be still possible to read 'x'} { - r get x - } {foobar} - - tags {"slow"} { - test {EXPIRE - After 2.1 seconds the key should no longer be here} { - after 2100 - list [r get x] [r exists x] - } {{} 0} - } - - test {EXPIRE - write on expire should work} { - r del x - r lpush x foo - r expire x 1000 - r lpush x bar - r lrange x 0 -1 - } {bar foo} - - test {EXPIREAT - Check for EXPIRE alike behavior} { - r del x - r set x foo - r expireat x [expr [clock seconds]+15] - r ttl x - } {1[345]} - - test {SETEX - Set + Expire combo operation. Check for TTL} { - r setex x 12 test - r ttl x - } {1[012]} - - test {SETEX - Check value} { - r get x - } {test} - - test {SETEX - Overwrite old key} { - r setex y 1 foo - r get y - } {foo} - - tags {"slow"} { - test {SETEX - Wait for the key to expire} { - after 1100 - r get y - } {} - } - - test {SETEX - Wrong time parameter} { - catch {r setex z -10 foo} e - set _ $e - } {*invalid expire*} - - test {PERSIST can undo an EXPIRE} { - r set x foo - r expire x 50 - list [r ttl x] [r persist x] [r ttl x] [r get x] - } {50 1 -1 foo} - - test {PERSIST returns 0 against non existing or non volatile keys} { - r set x foo - list [r persist foo] [r persist nokeyatall] - } {0 0} - - test {EXPIRE pricision is now the millisecond} { - # This test is very likely to do a false positive if the - # server is under pressure, so if it does not work give it a few more - # chances. - for {set j 0} {$j < 3} {incr j} { - r del x - r setex x 1 somevalue - after 900 - set a [r get x] - after 1100 - set b [r get x] - if {$a eq {somevalue} && $b eq {}} break - } - list $a $b - } {somevalue {}} - - test {PEXPIRE/PSETEX/PEXPIREAT can set sub-second expires} { - # This test is very likely to do a false positive if the - # server is under pressure, so if it does not work give it a few more - # chances. - for {set j 0} {$j < 3} {incr j} { - r del x y z - r psetex x 100 somevalue - after 80 - set a [r get x] - after 120 - set b [r get x] - - r set x somevalue - r pexpire x 100 - after 80 - set c [r get x] - after 120 - set d [r get x] - - r set x somevalue - r pexpireat x [expr ([clock seconds]*1000)+100] - after 80 - set e [r get x] - after 120 - set f [r get x] - - if {$a eq {somevalue} && $b eq {} && - $c eq {somevalue} && $d eq {} && - $e eq {somevalue} && $f eq {}} break - } - list $a $b - } {somevalue {}} - - test {TTL returns tiem to live in seconds} { - r del x - r setex x 10 somevalue - set ttl [r ttl x] - assert {$ttl > 8 && $ttl <= 10} - } - - test {PTTL returns time to live in milliseconds} { - r del x - r setex x 1 somevalue - set ttl [r pttl x] - assert {$ttl > 900 && $ttl <= 1000} - } - - test {TTL / PTTL return -1 if key has no expire} { - r del x - r set x hello - list [r ttl x] [r pttl x] - } {-1 -1} - - test {TTL / PTTL return -2 if key does not exit} { - r del x - list [r ttl x] [r pttl x] - } {-2 -2} - - test {Redis should actively expire keys incrementally} { - r flushdb - r psetex key1 500 a - r psetex key2 500 a - r psetex key3 500 a - set size1 [r dbsize] - # Redis expires random keys ten times every second so we are - # fairly sure that all the three keys should be evicted after - # one second. - after 1000 - set size2 [r dbsize] - list $size1 $size2 - } {3 0} - - test {Redis should lazy expire keys} { - r flushdb - r debug set-active-expire 0 - r psetex key1 500 a - r psetex key2 500 a - r psetex key3 500 a - set size1 [r dbsize] - # Redis expires random keys ten times every second so we are - # fairly sure that all the three keys should be evicted after - # one second. - after 1000 - set size2 [r dbsize] - r mget key1 key2 key3 - set size3 [r dbsize] - r debug set-active-expire 1 - list $size1 $size2 $size3 - } {3 3 0} - - test {EXPIRE should not resurrect keys (issue #1026)} { - r debug set-active-expire 0 - r set foo bar - r pexpire foo 500 - after 1000 - r expire foo 10 - r debug set-active-expire 1 - r exists foo - } {0} - - test {5 keys in, 5 keys out} { - r flushdb - r set a c - r expire a 5 - r set t c - r set e c - r set s c - r set foo b - lsort [r keys *] - } {a e foo s t} -} diff --git a/tools/pika_migrate/tests/unit/geo.tcl b/tools/pika_migrate/tests/unit/geo.tcl deleted file mode 100644 index 7ed8710980..0000000000 --- a/tools/pika_migrate/tests/unit/geo.tcl +++ /dev/null @@ -1,311 +0,0 @@ -# Helper functions to simulate search-in-radius in the Tcl side in order to -# verify the Redis implementation with a fuzzy test. -proc geo_degrad deg {expr {$deg*atan(1)*8/360}} - -proc geo_distance {lon1d lat1d lon2d lat2d} { - set lon1r [geo_degrad $lon1d] - set lat1r [geo_degrad $lat1d] - set lon2r [geo_degrad $lon2d] - set lat2r [geo_degrad $lat2d] - set v [expr {sin(($lon2r - $lon1r) / 2)}] - set u [expr {sin(($lat2r - $lat1r) / 2)}] - expr {2.0 * 6372797.560856 * \ - asin(sqrt($u * $u + cos($lat1r) * cos($lat2r) * $v * $v))} -} - -proc geo_random_point {lonvar latvar} { - upvar 1 $lonvar lon - upvar 1 $latvar lat - # Note that the actual latitude limit should be -85 to +85, we restrict - # the test to -70 to +70 since in this range the algorithm is more precise - # while outside this range occasionally some element may be missing. - set lon [expr {-180 + rand()*360}] - set lat [expr {-70 + rand()*140}] -} - -# Return elements non common to both the lists. -# This code is from http://wiki.tcl.tk/15489 -proc compare_lists {List1 List2} { - set DiffList {} - foreach Item $List1 { - if {[lsearch -exact $List2 $Item] == -1} { - lappend DiffList $Item - } - } - foreach Item $List2 { - if {[lsearch -exact $List1 $Item] == -1} { - if {[lsearch -exact $DiffList $Item] == -1} { - lappend DiffList $Item - } - } - } - return $DiffList -} - -# The following list represents sets of random seed, search position -# and radius that caused bugs in the past. It is used by the randomized -# test later as a starting point. When the regression vectors are scanned -# the code reverts to using random data. -# -# The format is: seed km lon lat -set regression_vectors { - {1482225976969 7083 81.634948934258375 30.561509253718668} - {1482340074151 5416 -70.863281847379767 -46.347003465679947} - {1499014685896 6064 -89.818768962202014 -40.463868561416803} - {1412 156 149.29737817929004 15.95807862745508} - {441574 143 59.235461856813856 66.269555127373678} - {160645 187 -101.88575239939883 49.061997951502917} - {750269 154 -90.187939661642517 66.615930412251487} - {342880 145 163.03472387745728 64.012747720821181} - {729955 143 137.86663517256579 63.986745399416776} - {939895 151 59.149620271823181 65.204186651485145} - {1412 156 149.29737817929004 15.95807862745508} - {564862 149 84.062063109158544 -65.685403922426232} -} -set rv_idx 0 - -start_server {tags {"geo"}} { - test {GEOADD create} { - r geoadd nyc -73.9454966 40.747533 "lic market" - } {1} - - test {GEOADD update} { - r geoadd nyc -73.9454966 40.747533 "lic market" - } {0} - - test {GEOADD invalid coordinates} { - catch { - r geoadd nyc -73.9454966 40.747533 "lic market" \ - foo bar "luck market" - } err - set err - } {*valid*} - - test {GEOADD multi add} { - r geoadd nyc -73.9733487 40.7648057 "central park n/q/r" -73.9903085 40.7362513 "union square" -74.0131604 40.7126674 "wtc one" -73.7858139 40.6428986 "jfk" -73.9375699 40.7498929 "q4" -73.9564142 40.7480973 4545 - } {6} - - test {Check geoset values} { - r zrange nyc 0 -1 withscores - } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723} - - test {GEORADIUS simple (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km asc - } {{central park n/q/r} 4545 {union square}} - - test {GEORADIUS withdist (sorted)} { - r georadius nyc -73.9798091 40.7598464 3 km withdist asc - } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}} - - test {GEORADIUS with COUNT} { - r georadius nyc -73.9798091 40.7598464 10 km COUNT 3 - } {{wtc one} {union square} {central park n/q/r}} - - test {GEORADIUS with COUNT but missing integer argument} { - catch {r georadius nyc -73.9798091 40.7598464 10 km COUNT} e - set e - } {ERR*syntax*} - - test {GEORADIUS with COUNT DESC} { - r georadius nyc -73.9798091 40.7598464 10 km COUNT 2 DESC - } {{wtc one} q4} - - test {GEORADIUS HUGE, issue #2767} { - r geoadd users -47.271613776683807 -54.534504198047678 user_000000 - llength [r GEORADIUS users 0 0 50000 km WITHCOORD] - } {1} - - test {GEORADIUSBYMEMBER simple (sorted)} { - r georadiusbymember nyc "wtc one" 7 km - } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} - - test {GEORADIUSBYMEMBER withdist (sorted)} { - r georadiusbymember nyc "wtc one" 7 km withdist - } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}} - - test {GEOHASH is able to return geohash strings} { - # Example from Wikipedia. - r del points - r geoadd points -5.6 42.6 test - lindex [r geohash points test] 0 - } {ezs42e44yx0} - - test {GEOPOS simple} { - r del points - r geoadd points 10 20 a 30 40 b - lassign [lindex [r geopos points a b] 0] x1 y1 - lassign [lindex [r geopos points a b] 1] x2 y2 - assert {abs($x1 - 10) < 0.001} - assert {abs($y1 - 20) < 0.001} - assert {abs($x2 - 30) < 0.001} - assert {abs($y2 - 40) < 0.001} - } - - test {GEOPOS missing element} { - r del points - r geoadd points 10 20 a 30 40 b - lindex [r geopos points a x b] 1 - } {} - - test {GEODIST simple & unit} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - set m [r geodist points Palermo Catania] - assert {$m > 166274 && $m < 166275} - set km [r geodist points Palermo Catania km] - assert {$km > 166.2 && $km < 166.3} - } - - test {GEODIST missing elements} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - set m [r geodist points Palermo Agrigento] - assert {$m eq {}} - set m [r geodist points Ragusa Agrigento] - assert {$m eq {}} - set m [r geodist empty_key Palermo Catania] - assert {$m eq {}} - } - - test {GEORADIUS STORE option: syntax error} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - catch {r georadius points 13.361389 38.115556 50 km store} e - set e - } {*ERR*syntax*} - - test {GEORANGE STORE option: incompatible options} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - catch {r georadius points 13.361389 38.115556 50 km store points2 withdist} e - assert_match {*ERR*} $e - catch {r georadius points 13.361389 38.115556 50 km store points2 withhash} e - assert_match {*ERR*} $e - catch {r georadius points 13.361389 38.115556 50 km store points2 withcoords} e - assert_match {*ERR*} $e - } - - test {GEORANGE STORE option: plain usage} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km store points2 - assert_equal [r zrange points 0 -1] [r zrange points2 0 -1] - } - - test {GEORANGE STOREDIST option: plain usage} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km storedist points2 - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 1] < 1} - assert {[lindex $res 3] > 166} - assert {[lindex $res 3] < 167} - } - - test {GEORANGE STOREDIST option: COUNT ASC and DESC} { - r del points - r geoadd points 13.361389 38.115556 "Palermo" \ - 15.087269 37.502669 "Catania" - r georadius points 13.361389 38.115556 500 km storedist points2 asc count 1 - assert {[r zcard points2] == 1} - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 0] eq "Palermo"} - - r georadius points 13.361389 38.115556 500 km storedist points2 desc count 1 - assert {[r zcard points2] == 1} - set res [r zrange points2 0 -1 withscores] - assert {[lindex $res 0] eq "Catania"} - } - - test {GEOADD + GEORANGE randomized test} { - set attempt 30 - while {[incr attempt -1]} { - set rv [lindex $regression_vectors $rv_idx] - incr rv_idx - - unset -nocomplain debuginfo - set srand_seed [clock milliseconds] - if {$rv ne {}} {set srand_seed [lindex $rv 0]} - lappend debuginfo "srand_seed is $srand_seed" - expr {srand($srand_seed)} ; # If you need a reproducible run - r del mypoints - - if {[randomInt 10] == 0} { - # From time to time use very big radiuses - set radius_km [expr {[randomInt 50000]+10}] - } else { - # Normally use a few - ~200km radiuses to stress - # test the code the most in edge cases. - set radius_km [expr {[randomInt 200]+10}] - } - if {$rv ne {}} {set radius_km [lindex $rv 1]} - set radius_m [expr {$radius_km*1000}] - geo_random_point search_lon search_lat - if {$rv ne {}} { - set search_lon [lindex $rv 2] - set search_lat [lindex $rv 3] - } - lappend debuginfo "Search area: $search_lon,$search_lat $radius_km km" - set tcl_result {} - set argv {} - for {set j 0} {$j < 20000} {incr j} { - geo_random_point lon lat - lappend argv $lon $lat "place:$j" - set distance [geo_distance $lon $lat $search_lon $search_lat] - if {$distance < $radius_m} { - lappend tcl_result "place:$j" - } - lappend debuginfo "place:$j $lon $lat [expr {$distance/1000}] km" - } - r geoadd mypoints {*}$argv - set res [lsort [r georadius mypoints $search_lon $search_lat $radius_km km]] - set res2 [lsort $tcl_result] - set test_result OK - - if {$res != $res2} { - set rounding_errors 0 - set diff [compare_lists $res $res2] - foreach place $diff { - set mydist [geo_distance $lon $lat $search_lon $search_lat] - set mydist [expr $mydist/1000] - if {($mydist / $radius_km) > 0.999} {incr rounding_errors} - } - # Make sure this is a real error and not a rounidng issue. - if {[llength $diff] == $rounding_errors} { - set res $res2; # Error silenced - } - } - - if {$res != $res2} { - set diff [compare_lists $res $res2] - puts "*** Possible problem in GEO radius query ***" - puts "Redis: $res" - puts "Tcl : $res2" - puts "Diff : $diff" - puts [join $debuginfo "\n"] - foreach place $diff { - if {[lsearch -exact $res2 $place] != -1} { - set where "(only in Tcl)" - } else { - set where "(only in Redis)" - } - lassign [lindex [r geopos mypoints $place] 0] lon lat - set mydist [geo_distance $lon $lat $search_lon $search_lat] - set mydist [expr $mydist/1000] - puts "$place -> [r geopos mypoints $place] $mydist $where" - if {($mydist / $radius_km) > 0.999} {incr rounding_errors} - } - set test_result FAIL - } - unset -nocomplain debuginfo - if {$test_result ne {OK}} break - } - set test_result - } {OK} -} diff --git a/tools/pika_migrate/tests/unit/hyperloglog.tcl b/tools/pika_migrate/tests/unit/hyperloglog.tcl deleted file mode 100755 index 6d614bb156..0000000000 --- a/tools/pika_migrate/tests/unit/hyperloglog.tcl +++ /dev/null @@ -1,250 +0,0 @@ -start_server {tags {"hll"}} { -# test {HyperLogLog self test passes} { -# catch {r pfselftest} e -# set e -# } {OK} - - test {PFADD without arguments creates an HLL value} { - r pfadd hll - r exists hll - } {1} - - test {Approximated cardinality after creation is zero} { - r pfcount hll - } {0} - - test {PFADD returns 1 when at least 1 reg was modified} { - r pfadd hll a b c - } {1} - - test {PFADD returns 0 when no reg was modified} { - r pfadd hll a b c - } {0} - - test {PFADD works with empty string (regression)} { - r pfadd hll "" - } - - # Note that the self test stresses much better the - # cardinality estimation error. We are testing just the - # command implementation itself here. - test {PFCOUNT returns approximated cardinality of set} { - r del hll - set res {} - r pfadd hll 1 2 3 4 5 - lappend res [r pfcount hll] - # Call it again to test cached value invalidation. - r pfadd hll 6 7 8 8 9 10 - lappend res [r pfcount hll] - set res - } {5 10} - -# test {HyperLogLogs are promote from sparse to dense} { -# r del hll -# r config set hll-sparse-max-bytes 3000 -# set n 0 -# while {$n < 100000} { -# set elements {} -# for {set j 0} {$j < 100} {incr j} {lappend elements [expr rand()]} -# incr n 100 -# r pfadd hll {*}$elements -# set card [r pfcount hll] -# set err [expr {abs($card-$n)}] -# assert {$err < (double($card)/100)*5} -# if {$n < 1000} { -# assert {[r pfdebug encoding hll] eq {sparse}} -# } elseif {$n > 10000} { -# assert {[r pfdebug encoding hll] eq {dense}} -# } -# } -# } - -# test {HyperLogLog sparse encoding stress test} { -# for {set x 0} {$x < 1000} {incr x} { -# r del hll1 hll2 -# set numele [randomInt 100] -# set elements {} -# for {set j 0} {$j < $numele} {incr j} { -# lappend elements [expr rand()] -# } - # Force dense representation of hll2 -# r pfadd hll2 -# r pfdebug todense hll2 -# r pfadd hll1 {*}$elements -# r pfadd hll2 {*}$elements -# assert {[r pfdebug encoding hll1] eq {sparse}} -# assert {[r pfdebug encoding hll2] eq {dense}} - # Cardinality estimated should match exactly. -# assert {[r pfcount hll1] eq [r pfcount hll2]} -# } -# } - -# test {Corrupted sparse HyperLogLogs are detected: Additionl at tail} { -# r del hll -# r pfadd hll a b c -# r append hll "hello" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*INVALIDOBJ*} - -# test {Corrupted sparse HyperLogLogs are detected: Broken magic} { -# r del hll -# r pfadd hll a b c -# r setrange hll 0 "0123" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {Corrupted sparse HyperLogLogs are detected: Invalid encoding} { -# r del hll -# r pfadd hll a b c -# r setrange hll 4 "x" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {Corrupted dense HyperLogLogs are detected: Wrong length} { -# r del hll -# r pfadd hll a b c -# r setrange hll 4 "\x00" -# set e {} -# catch {r pfcount hll} e -# set e -# } {*WRONGTYPE*} - -# test {PFADD, PFCOUNT, PFMERGE type checking works} { -# r set foo bar -# catch {r pfadd foo 1} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfcount foo} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfmerge bar foo} e -# assert_match {*WRONGTYPE*} $e -# catch {r pfmerge foo bar} e -# assert_match {*WRONGTYPE*} $e -# } - - test {PFMERGE results on the cardinality of union of sets} { - r del hll hll1 hll2 hll3 - r pfadd hll1 a b c - r pfadd hll2 b c d - r pfadd hll3 c d e - r pfmerge hll hll1 hll2 hll3 - r pfcount hll - } {5} - - test {PFCOUNT multiple-keys merge returns cardinality of union} { - r del hll1 hll2 hll3 - for {set x 1} {$x < 100000} {incr x} { - # Force dense representation of hll2 - r pfadd hll1 "foo-$x" - r pfadd hll2 "bar-$x" - r pfadd hll3 "zap-$x" - - set card [r pfcount hll1 hll2 hll3] - set realcard [expr {$x*3}] - set err [expr {abs($card-$realcard)}] - assert {$err < (double($card)/100)*5} - } - } - - test {HYPERLOGLOG press test: 5w, 10w, 15w, 20w, 30w, 50w, 100w} { - r del hll1 - for {set x 1} {$x <= 1000000} {incr x} { - r pfadd hll1 "foo-$x" - if {$x == 50000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 100000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 150000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 300000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 500000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.01} - } - if {$x == 1000000} { - set card [r pfcount hll1] - set realcard [expr {$x*1}] - set err [expr {abs($card-$realcard)}] - - set d_err [expr {$err * 1.0}] - set d_realcard [expr {$realcard * 1.0}] - set err_precentage [expr {double($d_err / $d_realcard)}] - puts "$x error rate: $err_precentage" - assert {$err < $realcard * 0.03} - } - } - } - -# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { -# r del hll -# r pfadd hll 1 2 3 -# llength [r pfdebug getreg hll] -# } {16384} - - -# test {PFDEBUG GETREG returns the HyperLogLog raw registers} { -# r del hll -# r pfadd hll 1 2 3 -# llength [r pfdebug getreg hll] -# } {16384} - -# test {PFADD / PFCOUNT cache invalidation works} { -# r del hll -# r pfadd hll a b c -# r pfcount hll -# assert {[r getrange hll 15 15] eq "\x00"} -# r pfadd hll a b c -# assert {[r getrange hll 15 15] eq "\x00"} -# r pfadd hll 1 2 3 -# assert {[r getrange hll 15 15] eq "\x80"} -# } -} diff --git a/tools/pika_migrate/tests/unit/introspection.tcl b/tools/pika_migrate/tests/unit/introspection.tcl deleted file mode 100644 index 342bb939a8..0000000000 --- a/tools/pika_migrate/tests/unit/introspection.tcl +++ /dev/null @@ -1,59 +0,0 @@ -start_server {tags {"introspection"}} { - test {CLIENT LIST} { - r client list - } {*addr=*:* fd=* age=* idle=* flags=N db=9 sub=0 psub=0 multi=-1 qbuf=0 qbuf-free=* obl=0 oll=0 omem=0 events=r cmd=client*} - - test {MONITOR can log executed commands} { - set rd [redis_deferring_client] - $rd monitor - r set foo bar - r get foo - list [$rd read] [$rd read] [$rd read] - } {*OK*"set" "foo"*"get" "foo"*} - - test {MONITOR can log commands issued by the scripting engine} { - set rd [redis_deferring_client] - $rd monitor - r eval {redis.call('set',KEYS[1],ARGV[1])} 1 foo bar - $rd read ;# Discard the OK - assert_match {*eval*} [$rd read] - assert_match {*lua*"set"*"foo"*"bar"*} [$rd read] - } - - test {CLIENT GETNAME should return NIL if name is not assigned} { - r client getname - } {} - - test {CLIENT LIST shows empty fields for unassigned names} { - r client list - } {*name= *} - - test {CLIENT SETNAME does not accept spaces} { - catch {r client setname "foo bar"} e - set e - } {ERR*} - - test {CLIENT SETNAME can assign a name to this connection} { - assert_equal [r client setname myname] {OK} - r client list - } {*name=myname*} - - test {CLIENT SETNAME can change the name of an existing connection} { - assert_equal [r client setname someothername] {OK} - r client list - } {*name=someothername*} - - test {After CLIENT SETNAME, connection can still be closed} { - set rd [redis_deferring_client] - $rd client setname foobar - assert_equal [$rd read] "OK" - assert_match {*foobar*} [r client list] - $rd close - # Now the client should no longer be listed - wait_for_condition 50 100 { - [string match {*foobar*} [r client list]] == 0 - } else { - fail "Client still listed in CLIENT LIST after SETNAME." - } - } -} diff --git a/tools/pika_migrate/tests/unit/keys.tcl b/tools/pika_migrate/tests/unit/keys.tcl deleted file mode 100644 index cb62444f3f..0000000000 --- a/tools/pika_migrate/tests/unit/keys.tcl +++ /dev/null @@ -1,54 +0,0 @@ -start_server {tags {"keys"}} { - test {KEYS with pattern} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r set $key hello - } - assert_equal {foo_a foo_b foo_c} [r keys foo*] - assert_equal {foo_a foo_b foo_c} [r keys f*] - assert_equal {foo_a foo_b foo_c} [r keys f*o*] - } - - test {KEYS to get all keys} { - lsort [r keys *] - } {foo_a foo_b foo_c key_x key_y key_z} - - test {KEYS select by type} { - foreach key {key_x key_y key_z foo_a foo_b foo_c} { - r del $key - } - r set kv_1 value - r set kv_2 value - r hset hash_1 hash_field 1 - r hset hash_2 hash_field 1 - r lpush list_1 value - r lpush list_2 value - r zadd zset_1 1 "a" - r zadd zset_2 1 "a" - r sadd set_1 "a" - r sadd set_2 "a" - assert_equal {kv_1 kv_2} [r keys * string] - assert_equal {hash_1 hash_2} [r keys * hash] - assert_equal {list_1 list_2} [r keys * list] - assert_equal {zset_1 zset_2} [r keys * zset] - assert_equal {set_1 set_2} [r keys * set] - assert_equal {kv_1 kv_2 hash_1 hash_2 zset_1 zset_2 set_1 set_2 list_1 list_2} [r keys *] - assert_equal {kv_1 kv_2} [r keys * STRING] - assert_equal {hash_1 hash_2} [r keys * HASH] - assert_equal {list_1 list_2} [r keys * LIST] - assert_equal {zset_1 zset_2} [r keys * ZSET] - assert_equal {set_1 set_2} [r keys * SET] - } - - test {KEYS syntax error} { - catch {r keys * a} e1 - catch {r keys * strings} e2 - catch {r keys * c d} e3 - catch {r keys} e4 - catch {r keys * set zset} e5 - assert_equal {ERR syntax error} [set e1] - assert_equal {ERR syntax error} [set e2] - assert_equal {ERR syntax error} [set e3] - assert_equal {ERR wrong number of arguments for 'keys' command} [set e4] - assert_equal {ERR syntax error} [set e5] - } -} diff --git a/tools/pika_migrate/tests/unit/latency-monitor.tcl b/tools/pika_migrate/tests/unit/latency-monitor.tcl deleted file mode 100644 index b736cad98b..0000000000 --- a/tools/pika_migrate/tests/unit/latency-monitor.tcl +++ /dev/null @@ -1,50 +0,0 @@ -start_server {tags {"latency-monitor"}} { - # Set a threshold high enough to avoid spurious latency events. - r config set latency-monitor-threshold 200 - r latency reset - - test {Test latency events logging} { - r debug sleep 0.3 - after 1100 - r debug sleep 0.4 - after 1100 - r debug sleep 0.5 - assert {[r latency history command] >= 3} - } - - test {LATENCY HISTORY output is ok} { - set min 250 - set max 450 - foreach event [r latency history command] { - lassign $event time latency - assert {$latency >= $min && $latency <= $max} - incr min 100 - incr max 100 - set last_time $time ; # Used in the next test - } - } - - test {LATENCY LATEST output is ok} { - foreach event [r latency latest] { - lassign $event eventname time latency max - assert {$eventname eq "command"} - assert {$max >= 450 & $max <= 650} - assert {$time == $last_time} - break - } - } - - test {LATENCY HISTORY / RESET with wrong event name is fine} { - assert {[llength [r latency history blabla]] == 0} - assert {[r latency reset blabla] == 0} - } - - test {LATENCY DOCTOR produces some output} { - assert {[string length [r latency doctor]] > 0} - } - - test {LATENCY RESET is able to reset events} { - assert {[r latency reset] > 0} - assert {[r latency latest] eq {}} - } -} diff --git a/tools/pika_migrate/tests/unit/limits.tcl b/tools/pika_migrate/tests/unit/limits.tcl deleted file mode 100644 index b37ea9b0f5..0000000000 --- a/tools/pika_migrate/tests/unit/limits.tcl +++ /dev/null @@ -1,16 +0,0 @@ -start_server {tags {"limits"} overrides {maxclients 10}} { - test {Check if maxclients works refusing connections} { - set c 0 - catch { - while {$c < 50} { - incr c - set rd [redis_deferring_client] - $rd ping - $rd read - after 100 - } - } e - assert {$c > 8 && $c <= 10} - set e - } {*ERR max*reached*} -} diff --git a/tools/pika_migrate/tests/unit/maxmemory.tcl b/tools/pika_migrate/tests/unit/maxmemory.tcl deleted file mode 100644 index e6bf7860cb..0000000000 --- a/tools/pika_migrate/tests/unit/maxmemory.tcl +++ /dev/null @@ -1,144 +0,0 @@ -start_server {tags {"maxmemory"}} { - test "Without maxmemory small integers are shared" { - r config set maxmemory 0 - r set a 1 - assert {[r object refcount a] > 1} - } - - test "With maxmemory and non-LRU policy integers are still shared" { - r config set maxmemory 1073741824 - r config set maxmemory-policy allkeys-random - r set a 1 - assert {[r object refcount a] > 1} - } - - test "With maxmemory and LRU policy integers are not shared" { - r config set maxmemory 1073741824 - r config set maxmemory-policy allkeys-lru - r set a 1 - r config set maxmemory-policy volatile-lru - r set b 1 - assert {[r object refcount a] == 1} - assert {[r object refcount b] == 1} - r config set maxmemory 0 - } - - foreach policy { - allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl - } { - test "maxmemory - is the memory limit honoured? (policy $policy)" { - # make sure to start with a blank instance - r flushall - # Get the current memory limit and calculate a new limit. - # We just add 100k to the current memory size so that it is - # fast for us to reach that limit. - set used [s used_memory] - set limit [expr {$used+100*1024}] - r config set maxmemory $limit - r config set maxmemory-policy $policy - # Now add keys until the limit is almost reached. - set numkeys 0 - while 1 { - r setex [randomKey] 10000 x - incr numkeys - if {[s used_memory]+4096 > $limit} { - assert {$numkeys > 10} - break - } - } - # If we add the same number of keys already added again, we - # should still be under the limit. - for {set j 0} {$j < $numkeys} {incr j} { - r setex [randomKey] 10000 x - } - assert {[s used_memory] < ($limit+4096)} - } - } - - foreach policy { - allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl - } { - test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" { - # make sure to start with a blank instance - r flushall - # Get the current memory limit and calculate a new limit. - # We just add 100k to the current memory size so that it is - # fast for us to reach that limit. - set used [s used_memory] - set limit [expr {$used+100*1024}] - r config set maxmemory $limit - r config set maxmemory-policy $policy - # Now add keys until the limit is almost reached. - set numkeys 0 - while 1 { - r set [randomKey] x - incr numkeys - if {[s used_memory]+4096 > $limit} { - assert {$numkeys > 10} - break - } - } - # If we add the same number of keys already added again and - # the policy is allkeys-* we should still be under the limit. - # Otherwise we should see an error reported by Redis. - set err 0 - for {set j 0} {$j < $numkeys} {incr j} { - if {[catch {r set [randomKey] x} e]} { - if {[string match {*used memory*} $e]} { - set err 1 - } - } - } - if {[string match allkeys-* $policy]} { - assert {[s used_memory] < ($limit+4096)} - } else { - assert {$err == 1} - } - } - } - - foreach policy { - volatile-lru volatile-random volatile-ttl - } { - test "maxmemory - policy $policy should only remove volatile keys." { - # make sure to start with a blank instance - r flushall - # Get the current memory limit and calculate a new limit. - # We just add 100k to the current memory size so that it is - # fast for us to reach that limit. - set used [s used_memory] - set limit [expr {$used+100*1024}] - r config set maxmemory $limit - r config set maxmemory-policy $policy - # Now add keys until the limit is almost reached. - set numkeys 0 - while 1 { - # Odd keys are volatile - # Even keys are non volatile - if {$numkeys % 2} { - r setex "key:$numkeys" 10000 x - } else { - r set "key:$numkeys" x - } - if {[s used_memory]+4096 > $limit} { - assert {$numkeys > 10} - break - } - incr numkeys - } - # Now we add the same number of volatile keys already added. - # We expect Redis to evict only volatile keys in order to make - # space. - set err 0 - for {set j 0} {$j < $numkeys} {incr j} { - catch {r setex "foo:$j" 10000 x} - } - # We should still be under the limit. - assert {[s used_memory] < ($limit+4096)} - # However all our non volatile keys should be here. - for {set j 0} {$j < $numkeys} {incr j 2} { - assert {[r exists "key:$j"]} - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/memefficiency.tcl b/tools/pika_migrate/tests/unit/memefficiency.tcl deleted file mode 100644 index 7ca9a705bb..0000000000 --- a/tools/pika_migrate/tests/unit/memefficiency.tcl +++ /dev/null @@ -1,37 +0,0 @@ -proc test_memory_efficiency {range} { - r flushall - set rd [redis_deferring_client] - set base_mem [s used_memory] - set written 0 - for {set j 0} {$j < 10000} {incr j} { - set key key:$j - set val [string repeat A [expr {int(rand()*$range)}]] - $rd set $key $val - incr written [string length $key] - incr written [string length $val] - incr written 2 ;# A separator is the minimum to store key-value data. - } - for {set j 0} {$j < 10000} {incr j} { - $rd read ; # Discard replies - } - - set current_mem [s used_memory] - set used [expr {$current_mem-$base_mem}] - set efficiency [expr {double($written)/$used}] - return $efficiency -} - -start_server {tags {"memefficiency"}} { - foreach {size_range expected_min_efficiency} { - 32 0.15 - 64 0.25 - 128 0.35 - 1024 0.75 - 16384 0.82 - } { - test "Memory efficiency with values in range $size_range" { - set efficiency [test_memory_efficiency $size_range] - assert {$efficiency >= $expected_min_efficiency} - } - } -} diff --git a/tools/pika_migrate/tests/unit/multi.tcl b/tools/pika_migrate/tests/unit/multi.tcl deleted file mode 100644 index 6655bf62c2..0000000000 --- a/tools/pika_migrate/tests/unit/multi.tcl +++ /dev/null @@ -1,309 +0,0 @@ -start_server {tags {"multi"}} { - test {MUTLI / EXEC basics} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r multi - set v1 [r lrange mylist 0 -1] - set v2 [r ping] - set v3 [r exec] - list $v1 $v2 $v3 - } {QUEUED QUEUED {{a b c} PONG}} - - test {DISCARD} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r multi - set v1 [r del mylist] - set v2 [r discard] - set v3 [r lrange mylist 0 -1] - list $v1 $v2 $v3 - } {QUEUED OK {a b c}} - - test {Nested MULTI are not allowed} { - set err {} - r multi - catch {[r multi]} err - r exec - set _ $err - } {*ERR MULTI*} - - test {MULTI where commands alter argc/argv} { - r sadd myset a - r multi - r spop myset - list [r exec] [r exists myset] - } {a 0} - - test {WATCH inside MULTI is not allowed} { - set err {} - r multi - catch {[r watch x]} err - r exec - set _ $err - } {*ERR WATCH*} - - test {EXEC fails if there are errors while queueing commands #1} { - r del foo1 foo2 - r multi - r set foo1 bar1 - catch {r non-existing-command} - r set foo2 bar2 - catch {r exec} e - assert_match {EXECABORT*} $e - list [r exists foo1] [r exists foo2] - } {0 0} - - test {EXEC fails if there are errors while queueing commands #2} { - set rd [redis_deferring_client] - r del foo1 foo2 - r multi - r set foo1 bar1 - $rd config set maxmemory 1 - assert {[$rd read] eq {OK}} - catch {r lpush mylist myvalue} - $rd config set maxmemory 0 - assert {[$rd read] eq {OK}} - r set foo2 bar2 - catch {r exec} e - assert_match {EXECABORT*} $e - $rd close - list [r exists foo1] [r exists foo2] - } {0 0} - - test {If EXEC aborts, the client MULTI state is cleared} { - r del foo1 foo2 - r multi - r set foo1 bar1 - catch {r non-existing-command} - r set foo2 bar2 - catch {r exec} e - assert_match {EXECABORT*} $e - r ping - } {PONG} - - test {EXEC works on WATCHed key not modified} { - r watch x y z - r watch k - r multi - r ping - r exec - } {PONG} - - test {EXEC fail on WATCHed key modified (1 key of 1 watched)} { - r set x 30 - r watch x - r set x 40 - r multi - r ping - r exec - } {} - - test {EXEC fail on WATCHed key modified (1 key of 5 watched)} { - r set x 30 - r watch a b x k z - r set x 40 - r multi - r ping - r exec - } {} - - test {EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty} { - r flushdb - r lpush foo bar - r watch foo - r sort emptylist store foo - r multi - r ping - r exec - } {} - - test {After successful EXEC key is no longer watched} { - r set x 30 - r watch x - r multi - r ping - r exec - r set x 40 - r multi - r ping - r exec - } {PONG} - - test {After failed EXEC key is no longer watched} { - r set x 30 - r watch x - r set x 40 - r multi - r ping - r exec - r set x 40 - r multi - r ping - r exec - } {PONG} - - test {It is possible to UNWATCH} { - r set x 30 - r watch x - r set x 40 - r unwatch - r multi - r ping - r exec - } {PONG} - - test {UNWATCH when there is nothing watched works as expected} { - r unwatch - } {OK} - - test {FLUSHALL is able to touch the watched keys} { - r set x 30 - r watch x - r flushall - r multi - r ping - r exec - } {} - - test {FLUSHALL does not touch non affected keys} { - r del x - r watch x - r flushall - r multi - r ping - r exec - } {PONG} - - test {FLUSHDB is able to touch the watched keys} { - r set x 30 - r watch x - r flushdb - r multi - r ping - r exec - } {} - - test {FLUSHDB does not touch non affected keys} { - r del x - r watch x - r flushdb - r multi - r ping - r exec - } {PONG} - - test {WATCH is able to remember the DB a key belongs to} { - r select 5 - r set x 30 - r watch x - r select 1 - r set x 10 - r select 5 - r multi - r ping - set res [r exec] - # Restore original DB - r select 9 - set res - } {PONG} - - test {WATCH will consider touched keys target of EXPIRE} { - r del x - r set x foo - r watch x - r expire x 10 - r multi - r ping - r exec - } {} - - test {WATCH will not consider touched expired keys} { - r del x - r set x foo - r expire x 1 - r watch x - after 1100 - r multi - r ping - r exec - } {PONG} - - test {DISCARD should clear the WATCH dirty flag on the client} { - r watch x - r set x 10 - r multi - r discard - r multi - r incr x - r exec - } {11} - - test {DISCARD should UNWATCH all the keys} { - r watch x - r set x 10 - r multi - r discard - r set x 10 - r multi - r incr x - r exec - } {11} - - test {MULTI / EXEC is propagated correctly (single write command)} { - set repl [attach_to_replication_stream] - r multi - r set foo bar - r exec - assert_replication_stream $repl { - {select *} - {multi} - {set foo bar} - {exec} - } - close_replication_stream $repl - } - - test {MULTI / EXEC is propagated correctly (empty transaction)} { - set repl [attach_to_replication_stream] - r multi - r exec - r set foo bar - assert_replication_stream $repl { - {select *} - {set foo bar} - } - close_replication_stream $repl - } - - test {MULTI / EXEC is propagated correctly (read-only commands)} { - r set foo value1 - set repl [attach_to_replication_stream] - r multi - r get foo - r exec - r set foo value2 - assert_replication_stream $repl { - {select *} - {set foo value2} - } - close_replication_stream $repl - } - - test {MULTI / EXEC is propagated correctly (write command, no effect)} { - r del bar foo bar - set repl [attach_to_replication_stream] - r multi - r del foo - r exec - assert_replication_stream $repl { - {select *} - {multi} - {exec} - } - close_replication_stream $repl - } -} diff --git a/tools/pika_migrate/tests/unit/obuf-limits.tcl b/tools/pika_migrate/tests/unit/obuf-limits.tcl deleted file mode 100644 index 5d625cf453..0000000000 --- a/tools/pika_migrate/tests/unit/obuf-limits.tcl +++ /dev/null @@ -1,73 +0,0 @@ -start_server {tags {"obuf-limits"}} { - test {Client output buffer hard limit is enforced} { - r config set client-output-buffer-limit {pubsub 100000 0 0} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 200000} break - } - assert {$omem >= 90000 && $omem < 200000} - $rd1 close - } - - test {Client output buffer soft limit is not enforced if time is not overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 10} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 5} break - } - } - assert {$omem >= 100000 && $time_elapsed >= 5 && $time_elapsed <= 10} - $rd1 close - } - - test {Client output buffer soft limit is enforced if time is overreached} { - r config set client-output-buffer-limit {pubsub 0 100000 3} - set rd1 [redis_deferring_client] - - $rd1 subscribe foo - set reply [$rd1 read] - assert {$reply eq "subscribe foo 1"} - - set omem 0 - set start_time 0 - set time_elapsed 0 - while 1 { - r publish foo bar - set clients [split [r client list] "\r\n"] - set c [split [lindex $clients 1] " "] - if {![regexp {omem=([0-9]+)} $c - omem]} break - if {$omem > 100000} { - if {$start_time == 0} {set start_time [clock seconds]} - set time_elapsed [expr {[clock seconds]-$start_time}] - if {$time_elapsed >= 10} break - } - } - assert {$omem >= 100000 && $time_elapsed < 6} - $rd1 close - } -} diff --git a/tools/pika_migrate/tests/unit/other.tcl b/tools/pika_migrate/tests/unit/other.tcl deleted file mode 100644 index a53f3f5c81..0000000000 --- a/tools/pika_migrate/tests/unit/other.tcl +++ /dev/null @@ -1,245 +0,0 @@ -start_server {tags {"other"}} { - if {$::force_failure} { - # This is used just for test suite development purposes. - test {Failing test} { - format err - } {ok} - } - - test {SAVE - make sure there are all the types as values} { - # Wait for a background saving in progress to terminate - waitForBgsave r - r lpush mysavelist hello - r lpush mysavelist world - r set myemptykey {} - r set mynormalkey {blablablba} - r zadd mytestzset 10 a - r zadd mytestzset 20 b - r zadd mytestzset 30 c - r save - } {OK} - - tags {slow} { - if {$::accurate} {set iterations 10000} else {set iterations 1000} - foreach fuzztype {binary alpha compr} { - test "FUZZ stresser with data model $fuzztype" { - set err 0 - for {set i 0} {$i < $iterations} {incr i} { - set fuzz [randstring 0 512 $fuzztype] - r set foo $fuzz - set got [r get foo] - if {$got ne $fuzz} { - set err [list $fuzz $got] - break - } - } - set _ $err - } {0} - } - } - - test {BGSAVE} { - waitForBgsave r - r flushdb - r save - r set x 10 - r bgsave - waitForBgsave r - r debug reload - r get x - } {10} - - test {SELECT an out of range DB} { - catch {r select 1000000} err - set _ $err - } {*invalid*} - - tags {consistency} { - if {![catch {package require sha1}]} { - if {$::accurate} {set numops 10000} else {set numops 1000} - test {Check consistency of different data types after a reload} { - r flushdb - createComplexDataset r $numops - set dump [csvdump r] - set sha1 [r debug digest] - r debug reload - set sha1_after [r debug digest] - if {$sha1 eq $sha1_after} { - set _ 1 - } else { - set newdump [csvdump r] - puts "Consistency test failed!" - puts "You can inspect the two dumps in /tmp/repldump*.txt" - - set fd [open /tmp/repldump1.txt w] - puts $fd $dump - close $fd - set fd [open /tmp/repldump2.txt w] - puts $fd $newdump - close $fd - - set _ 0 - } - } {1} - - test {Same dataset digest if saving/reloading as AOF?} { - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set sha1_after [r debug digest] - if {$sha1 eq $sha1_after} { - set _ 1 - } else { - set newdump [csvdump r] - puts "Consistency test failed!" - puts "You can inspect the two dumps in /tmp/aofdump*.txt" - - set fd [open /tmp/aofdump1.txt w] - puts $fd $dump - close $fd - set fd [open /tmp/aofdump2.txt w] - puts $fd $newdump - close $fd - - set _ 0 - } - } {1} - } - } - - test {EXPIRES after a reload (snapshot + append only file rewrite)} { - r flushdb - r set x 10 - r expire x 1000 - r save - r debug reload - set ttl [r ttl x] - set e1 [expr {$ttl > 900 && $ttl <= 1000}] - r bgrewriteaof - waitForBgrewriteaof r - r debug loadaof - set ttl [r ttl x] - set e2 [expr {$ttl > 900 && $ttl <= 1000}] - list $e1 $e2 - } {1 1} - - test {EXPIRES after AOF reload (without rewrite)} { - r flushdb - r config set appendonly yes - r set x somevalue - r expire x 1000 - r setex y 2000 somevalue - r set z somevalue - r expireat z [expr {[clock seconds]+3000}] - - # Milliseconds variants - r set px somevalue - r pexpire px 1000000 - r psetex py 2000000 somevalue - r set pz somevalue - r pexpireat pz [expr {([clock seconds]+3000)*1000}] - - # Reload and check - waitForBgrewriteaof r - # We need to wait two seconds to avoid false positives here, otherwise - # the DEBUG LOADAOF command may read a partial file. - # Another solution would be to set the fsync policy to no, since this - # prevents write() to be delayed by the completion of fsync(). - after 2000 - r debug loadaof - set ttl [r ttl x] - assert {$ttl > 900 && $ttl <= 1000} - set ttl [r ttl y] - assert {$ttl > 1900 && $ttl <= 2000} - set ttl [r ttl z] - assert {$ttl > 2900 && $ttl <= 3000} - set ttl [r ttl px] - assert {$ttl > 900 && $ttl <= 1000} - set ttl [r ttl py] - assert {$ttl > 1900 && $ttl <= 2000} - set ttl [r ttl pz] - assert {$ttl > 2900 && $ttl <= 3000} - r config set appendonly no - } - - tags {protocol} { - test {PIPELINING stresser (also a regression for the old epoll bug)} { - set fd2 [socket $::host $::port] - fconfigure $fd2 -encoding binary -translation binary - puts -nonewline $fd2 "SELECT 9\r\n" - flush $fd2 - gets $fd2 - - for {set i 0} {$i < 100000} {incr i} { - set q {} - set val "0000${i}0000" - append q "SET key:$i $val\r\n" - puts -nonewline $fd2 $q - set q {} - append q "GET key:$i\r\n" - puts -nonewline $fd2 $q - } - flush $fd2 - - for {set i 0} {$i < 100000} {incr i} { - gets $fd2 line - gets $fd2 count - set count [string range $count 1 end] - set val [read $fd2 $count] - read $fd2 2 - } - close $fd2 - set _ 1 - } {1} - } - - test {APPEND basics} { - list [r append foo bar] [r get foo] \ - [r append foo 100] [r get foo] - } {3 bar 6 bar100} - - test {APPEND basics, integer encoded values} { - set res {} - r del foo - r append foo 1 - r append foo 2 - lappend res [r get foo] - r set foo 1 - r append foo 2 - lappend res [r get foo] - } {12 12} - - test {APPEND fuzzing} { - set err {} - foreach type {binary alpha compr} { - set buf {} - r del x - for {set i 0} {$i < 1000} {incr i} { - set bin [randstring 0 10 $type] - append buf $bin - r append x $bin - } - if {$buf != [r get x]} { - set err "Expected '$buf' found '[r get x]'" - break - } - } - set _ $err - } {} - - # Leave the user with a clean DB before to exit - test {FLUSHDB} { - set aux {} - r select 9 - r flushdb - lappend aux [r dbsize] - r select 10 - r flushdb - lappend aux [r dbsize] - } {0 0} - - test {Perform a final SAVE to leave a clean DB on disk} { - waitForBgsave r - r save - } {OK} -} diff --git a/tools/pika_migrate/tests/unit/printver.tcl b/tools/pika_migrate/tests/unit/printver.tcl deleted file mode 100644 index c80f45144d..0000000000 --- a/tools/pika_migrate/tests/unit/printver.tcl +++ /dev/null @@ -1,6 +0,0 @@ -start_server {} { - set i [r info] - regexp {redis_version:(.*?)\r\n} $i - version - regexp {redis_git_sha1:(.*?)\r\n} $i - sha1 - puts "Testing Redis version $version ($sha1)" -} diff --git a/tools/pika_migrate/tests/unit/protocol.tcl b/tools/pika_migrate/tests/unit/protocol.tcl deleted file mode 100644 index ac99c3abb4..0000000000 --- a/tools/pika_migrate/tests/unit/protocol.tcl +++ /dev/null @@ -1,117 +0,0 @@ -start_server {tags {"protocol"}} { - test "Handle an empty query" { - reconnect - r write "\r\n" - r flush - assert_equal "PONG" [r ping] - } - - test "Negative multibulk length" { - reconnect - r write "*-10\r\n" - r flush - assert_equal PONG [r ping] - } - - test "Out of range multibulk length" { - reconnect - r write "*20000000\r\n" - r flush - assert_error "*invalid multibulk length*" {r read} - } - - test "Wrong multibulk payload header" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\nfooz\r\n" - r flush - assert_error "*expected '$', got 'f'*" {r read} - } - - test "Negative multibulk payload length" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$-10\r\n" - r flush - assert_error "*invalid bulk length*" {r read} - } - - test "Out of range multibulk payload length" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$2000000000\r\n" - r flush - assert_error "*invalid bulk length*" {r read} - } - - test "Non-number multibulk payload length" { - reconnect - r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$blabla\r\n" - r flush - assert_error "*invalid bulk length*" {r read} - } - - test "Multi bulk request not followed by bulk arguments" { - reconnect - r write "*1\r\nfoo\r\n" - r flush - assert_error "*expected '$', got 'f'*" {r read} - } - - test "Generic wrong number of args" { - reconnect - assert_error "*wrong*arguments*ping*" {r ping x y z} - } - - test "Unbalanced number of quotes" { - reconnect - r write "set \"\"\"test-key\"\"\" test-value\r\n" - r write "ping\r\n" - r flush - assert_error "*unbalanced*" {r read} - } - - set c 0 - foreach seq [list "\x00" "*\x00" "$\x00"] { - incr c - test "Protocol desync regression test #$c" { - set s [socket [srv 0 host] [srv 0 port]] - puts -nonewline $s $seq - set payload [string repeat A 1024]"\n" - set test_start [clock seconds] - set test_time_limit 30 - while 1 { - if {[catch { - puts -nonewline $s payload - flush $s - incr payload_size [string length $payload] - }]} { - set retval [gets $s] - close $s - break - } else { - set elapsed [expr {[clock seconds]-$test_start}] - if {$elapsed > $test_time_limit} { - close $s - error "assertion:Redis did not closed connection after protocol desync" - } - } - } - set retval - } {*Protocol error*} - } - unset c -} - -start_server {tags {"regression"}} { - test "Regression for a crash with blocking ops and pipelining" { - set rd [redis_deferring_client] - set fd [r channel] - set proto "*3\r\n\$5\r\nBLPOP\r\n\$6\r\nnolist\r\n\$1\r\n0\r\n" - puts -nonewline $fd $proto$proto - flush $fd - set res {} - - $rd rpush nolist a - $rd read - $rd rpush nolist a - $rd read - } -} diff --git a/tools/pika_migrate/tests/unit/pubsub.tcl b/tools/pika_migrate/tests/unit/pubsub.tcl deleted file mode 100644 index 16c8c6a5f7..0000000000 --- a/tools/pika_migrate/tests/unit/pubsub.tcl +++ /dev/null @@ -1,399 +0,0 @@ -start_server {tags {"pubsub"}} { - proc __consume_subscribe_messages {client type channels} { - set numsub -1 - set counts {} - - for {set i [llength $channels]} {$i > 0} {incr i -1} { - set msg [$client read] - assert_equal $type [lindex $msg 0] - - # when receiving subscribe messages the channels names - # are ordered. when receiving unsubscribe messages - # they are unordered - set idx [lsearch -exact $channels [lindex $msg 1]] - if {[string match "*unsubscribe" $type]} { - assert {$idx >= 0} - } else { - assert {$idx == 0} - } - set channels [lreplace $channels $idx $idx] - - # aggregate the subscription count to return to the caller - lappend counts [lindex $msg 2] - } - - # we should have received messages for channels - assert {[llength $channels] == 0} - return $counts - } - - proc subscribe {client channels} { - $client subscribe {*}$channels - __consume_subscribe_messages $client subscribe $channels - } - - proc unsubscribe {client {channels {}}} { - $client unsubscribe {*}$channels - __consume_subscribe_messages $client unsubscribe $channels - } - - proc psubscribe {client channels} { - $client psubscribe {*}$channels - __consume_subscribe_messages $client psubscribe $channels - } - - proc punsubscribe {client {channels {}}} { - $client punsubscribe {*}$channels - __consume_subscribe_messages $client punsubscribe $channels - } - - test "Pub/Sub PING" { - set rd1 [redis_deferring_client] - subscribe $rd1 somechannel - # While subscribed to non-zero channels PING works in Pub/Sub mode. - $rd1 ping - set reply1 [$rd1 read] - unsubscribe $rd1 somechannel - # Now we are unsubscribed, PING should just return PONG. - $rd1 ping - set reply2 [$rd1 read] - $rd1 close - list $reply1 $reply2 - } {PONG PONG} - - test "PUBLISH/SUBSCRIBE basics" { - set rd1 [redis_deferring_client] - - # subscribe to two channels - assert_equal {1 2} [subscribe $rd1 {chan1 chan2}] - assert_equal 1 [r publish chan1 hello] - assert_equal 1 [r publish chan2 world] - assert_equal {message chan1 hello} [$rd1 read] - assert_equal {message chan2 world} [$rd1 read] - - # unsubscribe from one of the channels - unsubscribe $rd1 {chan1} - assert_equal 0 [r publish chan1 hello] - assert_equal 1 [r publish chan2 world] - assert_equal {message chan2 world} [$rd1 read] - - # unsubscribe from the remaining channel - unsubscribe $rd1 {chan2} - assert_equal 0 [r publish chan1 hello] - assert_equal 0 [r publish chan2 world] - - # clean up clients - $rd1 close - } - - test "PUBLISH/SUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - assert_equal {1} [subscribe $rd1 {chan1}] - assert_equal {1} [subscribe $rd2 {chan1}] - assert_equal 2 [r publish chan1 hello] - assert_equal {message chan1 hello} [$rd1 read] - assert_equal {message chan1 hello} [$rd2 read] - - # clean up clients - $rd1 close - $rd2 close - } - - test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] - assert_equal {1 2 3} [subscribe $rd1 {chan1 chan2 chan3}] - unsubscribe $rd1 - assert_equal 0 [r publish chan1 hello] - assert_equal 0 [r publish chan2 hello] - assert_equal 0 [r publish chan3 hello] - - # clean up clients - $rd1 close - } - - test "SUBSCRIBE to one channel more than once" { - set rd1 [redis_deferring_client] - assert_equal {1 1 1} [subscribe $rd1 {chan1 chan1 chan1}] - assert_equal 1 [r publish chan1 hello] - assert_equal {message chan1 hello} [$rd1 read] - - # clean up clients - $rd1 close - } - - test "UNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] - assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}] - - # clean up clients - $rd1 close - } - - test "PUBLISH/PSUBSCRIBE basics" { - set rd1 [redis_deferring_client] - - # subscribe to two patterns - assert_equal {1 2} [psubscribe $rd1 {foo.* bar.*}] - assert_equal 1 [r publish foo.1 hello] - assert_equal 1 [r publish bar.1 hello] - assert_equal 0 [r publish foo1 hello] - assert_equal 0 [r publish barfoo.1 hello] - assert_equal 0 [r publish qux.1 hello] - assert_equal {pmessage foo.* foo.1 hello} [$rd1 read] - assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] - - # unsubscribe from one of the patterns - assert_equal {1} [punsubscribe $rd1 {foo.*}] - assert_equal 0 [r publish foo.1 hello] - assert_equal 1 [r publish bar.1 hello] - assert_equal {pmessage bar.* bar.1 hello} [$rd1 read] - - # unsubscribe from the remaining pattern - assert_equal {0} [punsubscribe $rd1 {bar.*}] - assert_equal 0 [r publish foo.1 hello] - assert_equal 0 [r publish bar.1 hello] - - # clean up clients - $rd1 close - } - - test "PUBLISH/PSUBSCRIBE with two clients" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - assert_equal {1} [psubscribe $rd1 {chan.*}] - assert_equal {1} [psubscribe $rd2 {chan.*}] - assert_equal 2 [r publish chan.foo hello] - assert_equal {pmessage chan.* chan.foo hello} [$rd1 read] - assert_equal {pmessage chan.* chan.foo hello} [$rd2 read] - - # clean up clients - $rd1 close - $rd2 close - } - - test "PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments" { - set rd1 [redis_deferring_client] - assert_equal {1 2 3} [psubscribe $rd1 {chan1.* chan2.* chan3.*}] - punsubscribe $rd1 - assert_equal 0 [r publish chan1.hi hello] - assert_equal 0 [r publish chan2.hi hello] - assert_equal 0 [r publish chan3.hi hello] - - # clean up clients - $rd1 close - } - - test "PUNSUBSCRIBE from non-subscribed channels" { - set rd1 [redis_deferring_client] - assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}] - - # clean up clients - $rd1 close - } - - test "NUMSUB returns numbers, not strings (#1561)" { - r pubsub numsub abc def - } {abc 0 def 0} - - test "PubSub return value" { - set rd1 [redis_deferring_client] - assert_equal {1} [subscribe $rd1 {foo.bar}] - assert_equal {2} [psubscribe $rd1 {foo.*}] - assert_equal {foo.bar} [r pubsub channels] - assert_equal {1} [r pubsub numpat] - assert_equal {foo.bar 1} [r pubsub numsub foo.bar] - - $rd1 close - } - - test "Mix SUBSCRIBE and PSUBSCRIBE" { - set rd1 [redis_deferring_client] - assert_equal {1} [subscribe $rd1 {foo.bar}] - assert_equal {2} [psubscribe $rd1 {foo.*}] - - assert_equal 2 [r publish foo.bar hello] - assert_equal {message foo.bar hello} [$rd1 read] - assert_equal {pmessage foo.* foo.bar hello} [$rd1 read] - - # clean up clients - $rd1 close - } - - test "PUNSUBSCRIBE and UNSUBSCRIBE should always reply" { - # Make sure we are not subscribed to any channel at all. - r punsubscribe - r unsubscribe - # Now check if the commands still reply correctly. - set reply1 [r punsubscribe] - set reply2 [r unsubscribe] - concat $reply1 $reply2 - } {punsubscribe {} 0 unsubscribe {} 0} - - ### Keyspace events notification tests - -# test "Keyspace notifications: we receive keyspace notifications" { -# r config set notify-keyspace-events KA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we receive keyevent notifications" { -# r config set notify-keyspace-events EA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we can receive both kind of events" { -# r config set notify-keyspace-events KEA -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# assert_equal {pmessage * __keyspace@9__:foo set} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:set foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: we are able to mask events" { -# r config set notify-keyspace-events KEl -# r del mylist -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r lpush mylist a -# # No notification for set, because only list commands are enabled. -# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: general events test" { -# r config set notify-keyspace-events KEg -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r expire foo 1 -# r del foo -# assert_equal {pmessage * __keyspace@9__:foo expire} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:expire foo} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:foo del} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:del foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: list events test" { -# r config set notify-keyspace-events KEl -# r del mylist -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r lpush mylist a -# r rpush mylist a -# r rpop mylist -# assert_equal {pmessage * __keyspace@9__:mylist lpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:lpush mylist} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:mylist rpush} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:rpush mylist} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:mylist rpop} [$rd1 read] -# assert_equal {pmessage * __keyevent@9__:rpop mylist} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: set events test" { -# r config set notify-keyspace-events Ks -# r del myset -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r sadd myset a b c d -# r srem myset x -# r sadd myset x y z -# r srem myset x -# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myset sadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myset srem} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: zset events test" { -# r config set notify-keyspace-events Kz -# r del myzset -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r zadd myzset 1 a 2 b -# r zrem myzset x -# r zadd myzset 3 x 4 y 5 z -# r zrem myzset x -# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myzset zadd} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myzset zrem} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: hash events test" { -# r config set notify-keyspace-events Kh -# r del myhash -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r hmset myhash yes 1 no 0 -# r hincrby myhash yes 10 -# assert_equal {pmessage * __keyspace@9__:myhash hset} [$rd1 read] -# assert_equal {pmessage * __keyspace@9__:myhash hincrby} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: expired events (triggered expire)" { -# r config set notify-keyspace-events Ex -# r del foo -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r psetex foo 100 1 -# wait_for_condition 50 100 { -# [r exists foo] == 0 -# } else { -# fail "Key does not expire?!" -# } -# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: expired events (background expire)" { -# r config set notify-keyspace-events Ex -# r del foo -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r psetex foo 100 1 -# assert_equal {pmessage * __keyevent@9__:expired foo} [$rd1 read] -# $rd1 close -# } -# -# test "Keyspace notifications: evicted events" { -# r config set notify-keyspace-events Ee -# r config set maxmemory-policy allkeys-lru -# r flushdb -# set rd1 [redis_deferring_client] -# assert_equal {1} [psubscribe $rd1 *] -# r set foo bar -# r config set maxmemory 1 -# assert_equal {pmessage * __keyevent@9__:evicted foo} [$rd1 read] -# r config set maxmemory 0 -# $rd1 close -# } -# -# test "Keyspace notifications: test CONFIG GET/SET of event flags" { -# r config set notify-keyspace-events gKE -# assert_equal {gKE} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events {$lshzxeKE} -# assert_equal {$lshzxeKE} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events KA -# assert_equal {AK} [lindex [r config get notify-keyspace-events] 1] -# r config set notify-keyspace-events EA -# assert_equal {AE} [lindex [r config get notify-keyspace-events] 1] -# } -#} diff --git a/tools/pika_migrate/tests/unit/quit.tcl b/tools/pika_migrate/tests/unit/quit.tcl deleted file mode 100644 index 4cf440abf1..0000000000 --- a/tools/pika_migrate/tests/unit/quit.tcl +++ /dev/null @@ -1,40 +0,0 @@ -start_server {tags {"quit"}} { - proc format_command {args} { - set cmd "*[llength $args]\r\n" - foreach a $args { - append cmd "$[string length $a]\r\n$a\r\n" - } - set _ $cmd - } - - test "QUIT returns OK" { - reconnect - assert_equal OK [r quit] - assert_error * {r ping} - } - - test "Pipelined commands after QUIT must not be executed" { - reconnect - r write [format_command quit] - r write [format_command set foo bar] - r flush - assert_equal OK [r read] - assert_error * {r read} - - reconnect - assert_equal {} [r get foo] - } - - test "Pipelined commands after QUIT that exceed read buffer size" { - reconnect - r write [format_command quit] - r write [format_command set foo [string repeat "x" 1024]] - r flush - assert_equal OK [r read] - assert_error * {r read} - - reconnect - assert_equal {} [r get foo] - - } -} diff --git a/tools/pika_migrate/tests/unit/scan.tcl b/tools/pika_migrate/tests/unit/scan.tcl deleted file mode 100644 index 1d84f128da..0000000000 --- a/tools/pika_migrate/tests/unit/scan.tcl +++ /dev/null @@ -1,239 +0,0 @@ -start_server {tags {"scan"}} { - test "SCAN basic" { - r flushdb - r debug populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 1000 [llength $keys] - } - - test "SCAN COUNT" { - r flushdb - r debug populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur count 5] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 1000 [llength $keys] - } - - test "SCAN MATCH" { - r flushdb - r debug populate 1000 - - set cur 0 - set keys {} - while 1 { - set res [r scan $cur match "key:1??"] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 100 [llength $keys] - } - - foreach enc {intset hashtable} { - test "SSCAN with encoding $enc" { - # Create the Set - r del set - if {$enc eq {intset}} { - set prefix "" - } else { - set prefix "ele:" - } - set elements {} - for {set j 0} {$j < 100} {incr j} { - lappend elements ${prefix}${j} - } - r sadd set {*}$elements - - # Verify that the encoding matches. - assert {[r object encoding set] eq $enc} - - # Test SSCAN - set cur 0 - set keys {} - while 1 { - set res [r sscan set $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys [lsort -unique $keys] - assert_equal 100 [llength $keys] - } - } - - foreach enc {ziplist hashtable} { - test "HSCAN with encoding $enc" { - # Create the Hash - r del hash - if {$enc eq {ziplist}} { - set count 30 - } else { - set count 1000 - } - set elements {} - for {set j 0} {$j < $count} {incr j} { - lappend elements key:$j $j - } - r hmset hash {*}$elements - - # Verify that the encoding matches. - assert {[r object encoding hash] eq $enc} - - # Test HSCAN - set cur 0 - set keys {} - while 1 { - set res [r hscan hash $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys2 {} - foreach {k v} $keys { - assert {$k eq "key:$v"} - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal $count [llength $keys2] - } - } - - foreach enc {ziplist skiplist} { - test "ZSCAN with encoding $enc" { - # Create the Sorted Set - r del zset - if {$enc eq {ziplist}} { - set count 30 - } else { - set count 1000 - } - set elements {} - for {set j 0} {$j < $count} {incr j} { - lappend elements $j key:$j - } - r zadd zset {*}$elements - - # Verify that the encoding matches. - assert {[r object encoding zset] eq $enc} - - # Test ZSCAN - set cur 0 - set keys {} - while 1 { - set res [r zscan zset $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - } - - set keys2 {} - foreach {k v} $keys { - assert {$k eq "key:$v"} - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal $count [llength $keys2] - } - } - - test "SCAN guarantees check under write load" { - r flushdb - r debug populate 100 - - # We start scanning here, so keys from 0 to 99 should all be - # reported at the end of the iteration. - set keys {} - while 1 { - set res [r scan $cur] - set cur [lindex $res 0] - set k [lindex $res 1] - lappend keys {*}$k - if {$cur == 0} break - # Write 10 random keys at every SCAN iteration. - for {set j 0} {$j < 10} {incr j} { - r set addedkey:[randomInt 1000] foo - } - } - - set keys2 {} - foreach k $keys { - if {[string length $k] > 6} continue - lappend keys2 $k - } - - set keys2 [lsort -unique $keys2] - assert_equal 100 [llength $keys2] - } - - test "SSCAN with integer encoded object (issue #1345)" { - set objects {1 a} - r del set - r sadd set {*}$objects - set res [r sscan set 0 MATCH *a* COUNT 100] - assert_equal [lsort -unique [lindex $res 1]] {a} - set res [r sscan set 0 MATCH *1* COUNT 100] - assert_equal [lsort -unique [lindex $res 1]] {1} - } - - test "SSCAN with PATTERN" { - r del mykey - r sadd mykey foo fab fiz foobar 1 2 3 4 - set res [r sscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } {foo foobar} - - test "HSCAN with PATTERN" { - r del mykey - r hmset mykey foo 1 fab 2 fiz 3 foobar 10 1 a 2 b 3 c 4 d - set res [r hscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } {1 10 foo foobar} - - test "ZSCAN with PATTERN" { - r del mykey - r zadd mykey 1 foo 2 fab 3 fiz 10 foobar - set res [r zscan mykey 0 MATCH foo* COUNT 10000] - lsort -unique [lindex $res 1] - } - - test "ZSCAN scores: regression test for issue #2175" { - r del mykey - for {set j 0} {$j < 500} {incr j} { - r zadd mykey 9.8813129168249309e-323 $j - } - set res [lindex [r zscan mykey 0] 1] - set first_score [lindex $res 1] - assert {$first_score != 0} - } -} diff --git a/tools/pika_migrate/tests/unit/scripting.tcl b/tools/pika_migrate/tests/unit/scripting.tcl deleted file mode 100644 index e1cd2174ba..0000000000 --- a/tools/pika_migrate/tests/unit/scripting.tcl +++ /dev/null @@ -1,606 +0,0 @@ -start_server {tags {"scripting"}} { - test {EVAL - Does Lua interpreter replies to our requests?} { - r eval {return 'hello'} 0 - } {hello} - - test {EVAL - Lua integer -> Redis protocol type conversion} { - r eval {return 100.5} 0 - } {100} - - test {EVAL - Lua string -> Redis protocol type conversion} { - r eval {return 'hello world'} 0 - } {hello world} - - test {EVAL - Lua true boolean -> Redis protocol type conversion} { - r eval {return true} 0 - } {1} - - test {EVAL - Lua false boolean -> Redis protocol type conversion} { - r eval {return false} 0 - } {} - - test {EVAL - Lua status code reply -> Redis protocol type conversion} { - r eval {return {ok='fine'}} 0 - } {fine} - - test {EVAL - Lua error reply -> Redis protocol type conversion} { - catch { - r eval {return {err='this is an error'}} 0 - } e - set _ $e - } {this is an error} - - test {EVAL - Lua table -> Redis protocol type conversion} { - r eval {return {1,2,3,'ciao',{1,2}}} 0 - } {1 2 3 ciao {1 2}} - - test {EVAL - Are the KEYS and ARGV arrays populated correctly?} { - r eval {return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}} 2 a b c d - } {a b c d} - - test {EVAL - is Lua able to call Redis API?} { - r set mykey myval - r eval {return redis.call('get',KEYS[1])} 1 mykey - } {myval} - - test {EVALSHA - Can we call a SHA1 if already defined?} { - r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey - } {myval} - - test {EVALSHA - Can we call a SHA1 in uppercase?} { - r evalsha FD758D1589D044DD850A6F05D52F2EEFD27F033F 1 mykey - } {myval} - - test {EVALSHA - Do we get an error on invalid SHA1?} { - catch {r evalsha NotValidShaSUM 0} e - set _ $e - } {NOSCRIPT*} - - test {EVALSHA - Do we get an error on non defined SHA1?} { - catch {r evalsha ffd632c7d33e571e9f24556ebed26c3479a87130 0} e - set _ $e - } {NOSCRIPT*} - - test {EVAL - Redis integer -> Lua type conversion} { - r eval { - local foo = redis.pcall('incr','x') - return {type(foo),foo} - } 0 - } {number 1} - - test {EVAL - Redis bulk -> Lua type conversion} { - r set mykey myval - r eval { - local foo = redis.pcall('get','mykey') - return {type(foo),foo} - } 0 - } {string myval} - - test {EVAL - Redis multi bulk -> Lua type conversion} { - r del mylist - r rpush mylist a - r rpush mylist b - r rpush mylist c - r eval { - local foo = redis.pcall('lrange','mylist',0,-1) - return {type(foo),foo[1],foo[2],foo[3],# foo} - } 0 - } {table a b c 3} - - test {EVAL - Redis status reply -> Lua type conversion} { - r eval { - local foo = redis.pcall('set','mykey','myval') - return {type(foo),foo['ok']} - } 0 - } {table OK} - - test {EVAL - Redis error reply -> Lua type conversion} { - r set mykey myval - r eval { - local foo = redis.pcall('incr','mykey') - return {type(foo),foo['err']} - } 0 - } {table {ERR value is not an integer or out of range}} - - test {EVAL - Redis nil bulk reply -> Lua type conversion} { - r del mykey - r eval { - local foo = redis.pcall('get','mykey') - return {type(foo),foo == false} - } 0 - } {boolean 1} - - test {EVAL - Is the Lua client using the currently selected DB?} { - r set mykey "this is DB 9" - r select 10 - r set mykey "this is DB 10" - r eval {return redis.pcall('get','mykey')} 0 - } {this is DB 10} - - test {EVAL - SELECT inside Lua should not affect the caller} { - # here we DB 10 is selected - r set mykey "original value" - r eval {return redis.pcall('select','9')} 0 - set res [r get mykey] - r select 9 - set res - } {original value} - - if 0 { - test {EVAL - Script can't run more than configured time limit} { - r config set lua-time-limit 1 - catch { - r eval { - local i = 0 - while true do i=i+1 end - } 0 - } e - set _ $e - } {*execution time*} - } - - test {EVAL - Scripts can't run certain commands} { - set e {} - catch {r eval {return redis.pcall('spop','x')} 0} e - set e - } {*not allowed*} - - test {EVAL - Scripts can't run certain commands} { - set e {} - catch { - r eval "redis.pcall('randomkey'); return redis.pcall('set','x','ciao')" 0 - } e - set e - } {*not allowed after*} - - test {EVAL - No arguments to redis.call/pcall is considered an error} { - set e {} - catch {r eval {return redis.call()} 0} e - set e - } {*one argument*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - catch { - r eval "redis.call('nosuchcommand')" 0 - } e - set e - } {*Unknown Redis*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - catch { - r eval "redis.call('get','a','b','c')" 0 - } e - set e - } {*number of args*} - - test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} { - set e {} - r set foo bar - catch { - r eval {redis.call('lpush',KEYS[1],'val')} 1 foo - } e - set e - } {*against a key*} - - test {EVAL - JSON numeric decoding} { - # We must return the table as a string because otherwise - # Redis converts floats to ints and we get 0 and 1023 instead - # of 0.0003 and 1023.2 as the parsed output. - r eval {return - table.concat( - cjson.decode( - "[0.0, -5e3, -1, 0.3e-3, 1023.2, 0e10]"), " ") - } 0 - } {0 -5000 -1 0.0003 1023.2 0} - - test {EVAL - JSON string decoding} { - r eval {local decoded = cjson.decode('{"keya": "a", "keyb": "b"}') - return {decoded.keya, decoded.keyb} - } 0 - } {a b} - - test {EVAL - cmsgpack can pack double?} { - r eval {local encoded = cmsgpack.pack(0.1) - local h = "" - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - return h - } 0 - } {cb3fb999999999999a} - - test {EVAL - cmsgpack can pack negative int64?} { - r eval {local encoded = cmsgpack.pack(-1099511627776) - local h = "" - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - return h - } 0 - } {d3ffffff0000000000} - - test {EVAL - cmsgpack can pack and unpack circular references?} { - r eval {local a = {x=nil,y=5} - local b = {x=a} - a['x'] = b - local encoded = cmsgpack.pack(a) - local h = "" - -- cmsgpack encodes to a depth of 16, but can't encode - -- references, so the encoded object has a deep copy recusive - -- depth of 16. - for i = 1, #encoded do - h = h .. string.format("%02x",string.byte(encoded,i)) - end - -- when unpacked, re.x.x != re because the unpack creates - -- individual tables down to a depth of 16. - -- (that's why the encoded output is so large) - local re = cmsgpack.unpack(encoded) - assert(re) - assert(re.x) - assert(re.x.x.y == re.y) - assert(re.x.x.x.x.y == re.y) - assert(re.x.x.x.x.x.x.y == re.y) - assert(re.x.x.x.x.x.x.x.x.x.x.y == re.y) - -- maximum working depth: - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.y == re.y) - -- now the last x would be b above and has no y - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x) - -- so, the final x.x is at the depth limit and was assigned nil - assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x == nil) - return {h, re.x.x.x.x.x.x.x.x.y == re.y, re.y == 5} - } 0 - } {82a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a178c0 1 1} - - test {EVAL - Numerical sanity check from bitop} { - r eval {assert(0x7fffffff == 2147483647, "broken hex literals"); - assert(0xffffffff == -1 or 0xffffffff == 2^32-1, - "broken hex literals"); - assert(tostring(-1) == "-1", "broken tostring()"); - assert(tostring(0xffffffff) == "-1" or - tostring(0xffffffff) == "4294967295", - "broken tostring()") - } 0 - } {} - - test {EVAL - Verify minimal bitop functionality} { - r eval {assert(bit.tobit(1) == 1); - assert(bit.band(1) == 1); - assert(bit.bxor(1,2) == 3); - assert(bit.bor(1,2,4,8,16,32,64,128) == 255) - } 0 - } {} - - test {SCRIPTING FLUSH - is able to clear the scripts cache?} { - r set mykey myval - set v [r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey] - assert_equal $v myval - set e "" - r script flush - catch {r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey} e - set e - } {NOSCRIPT*} - - test {SCRIPT EXISTS - can detect already defined scripts?} { - r eval "return 1+1" 0 - r script exists a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bd9 a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bda - } {1 0} - - test {SCRIPT LOAD - is able to register scripts in the scripting cache} { - list \ - [r script load "return 'loaded'"] \ - [r evalsha b534286061d4b9e4026607613b95c06c06015ae8 0] - } {b534286061d4b9e4026607613b95c06c06015ae8 loaded} - - test "In the context of Lua the output of random commands gets ordered" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r eval {return redis.call('smembers',KEYS[1])} 1 myset - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT is normally not alpha re-ordered for the scripting engine" { - r del myset - r sadd myset 1 2 3 4 10 - r eval {return redis.call('sort',KEYS[1],'desc')} 1 myset - } {10 4 3 2 1} - - test "SORT BY output gets ordered for scripting" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r eval {return redis.call('sort',KEYS[1],'by','_')} 1 myset - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT BY with GET gets ordered for scripting" { - r del myset - r sadd myset a b c - r eval {return redis.call('sort',KEYS[1],'by','_','get','#','get','_:*')} 1 myset - } {a {} b {} c {}} - - test "redis.sha1hex() implementation" { - list [r eval {return redis.sha1hex('')} 0] \ - [r eval {return redis.sha1hex('Pizza & Mandolino')} 0] - } {da39a3ee5e6b4b0d3255bfef95601890afd80709 74822d82031af7493c20eefa13bd07ec4fada82f} - - test {Globals protection reading an undeclared global variable} { - catch {r eval {return a} 0} e - set e - } {*ERR*attempted to access unexisting global*} - - test {Globals protection setting an undeclared global*} { - catch {r eval {a=10} 0} e - set e - } {*ERR*attempted to create global*} - - test {Test an example script DECR_IF_GT} { - set decr_if_gt { - local current - - current = redis.call('get',KEYS[1]) - if not current then return nil end - if current > ARGV[1] then - return redis.call('decr',KEYS[1]) - else - return redis.call('get',KEYS[1]) - end - } - r set foo 5 - set res {} - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - lappend res [r eval $decr_if_gt 1 foo 2] - set res - } {4 3 2 2 2} - - test {Scripting engine resets PRNG at every script execution} { - set rand1 [r eval {return tostring(math.random())} 0] - set rand2 [r eval {return tostring(math.random())} 0] - assert_equal $rand1 $rand2 - } - - test {Scripting engine PRNG can be seeded correctly} { - set rand1 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 10] - set rand2 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 10] - set rand3 [r eval { - math.randomseed(ARGV[1]); return tostring(math.random()) - } 0 20] - assert_equal $rand1 $rand2 - assert {$rand2 ne $rand3} - } - - test {EVAL does not leak in the Lua stack} { - r set x 0 - # Use a non blocking client to speedup the loop. - set rd [redis_deferring_client] - for {set j 0} {$j < 10000} {incr j} { - $rd eval {return redis.call("incr",KEYS[1])} 1 x - } - for {set j 0} {$j < 10000} {incr j} { - $rd read - } - assert {[s used_memory_lua] < 1024*100} - $rd close - r get x - } {10000} - - test {EVAL processes writes from AOF in read-only slaves} { - r flushall - r config set appendonly yes - r eval {redis.call("set",KEYS[1],"100")} 1 foo - r eval {redis.call("incr",KEYS[1])} 1 foo - r eval {redis.call("incr",KEYS[1])} 1 foo - wait_for_condition 50 100 { - [s aof_rewrite_in_progress] == 0 - } else { - fail "AOF rewrite can't complete after CONFIG SET appendonly yes." - } - r config set slave-read-only yes - r slaveof 127.0.0.1 0 - r debug loadaof - set res [r get foo] - r slaveof no one - set res - } {102} - - test {We can call scripts rewriting client->argv from Lua} { - r del myset - r sadd myset a b c - r mset a 1 b 2 c 3 d 4 - assert {[r spop myset] ne {}} - assert {[r spop myset] ne {}} - assert {[r spop myset] ne {}} - assert {[r mget a b c d] eq {1 2 3 4}} - assert {[r spop myset] eq {}} - } - - test {Call Redis command with many args from Lua (issue #1764)} { - r eval { - local i - local x={} - redis.call('del','mylist') - for i=1,100 do - table.insert(x,i) - end - redis.call('rpush','mylist',unpack(x)) - return redis.call('lrange','mylist',0,-1) - } 0 - } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100} - - test {Number conversion precision test (issue #1118)} { - r eval { - local value = 9007199254740991 - redis.call("set","foo",value) - return redis.call("get","foo") - } 0 - } {9007199254740991} - - test {String containing number precision test (regression of issue #1118)} { - r eval { - redis.call("set", "key", "12039611435714932082") - return redis.call("get", "key") - } 0 - } {12039611435714932082} - - test {Verify negative arg count is error instead of crash (issue #1842)} { - catch { r eval { return "hello" } -12 } e - set e - } {ERR Number of keys can't be negative} - - test {Correct handling of reused argv (issue #1939)} { - r eval { - for i = 0, 10 do - redis.call('SET', 'a', '1') - redis.call('MGET', 'a', 'b', 'c') - redis.call('EXPIRE', 'a', 0) - redis.call('GET', 'a') - redis.call('MGET', 'a', 'b', 'c') - end - } 0 - } -} - -# Start a new server since the last test in this stanza will kill the -# instance at all. -start_server {tags {"scripting"}} { - test {Timedout read-only scripts can be killed by SCRIPT KILL} { - set rd [redis_deferring_client] - r config set lua-time-limit 10 - $rd eval {while true do end} 0 - after 200 - catch {r ping} e - assert_match {BUSY*} $e - r script kill - after 200 ; # Give some time to Lua to call the hook again... - assert_equal [r ping] "PONG" - } - - test {Timedout script link is still usable after Lua returns} { - r config set lua-time-limit 10 - r eval {for i=1,100000 do redis.call('ping') end return 'ok'} 0 - r ping - } {PONG} - - test {Timedout scripts that modified data can't be killed by SCRIPT KILL} { - set rd [redis_deferring_client] - r config set lua-time-limit 10 - $rd eval {redis.call('set',KEYS[1],'y'); while true do end} 1 x - after 200 - catch {r ping} e - assert_match {BUSY*} $e - catch {r script kill} e - assert_match {UNKILLABLE*} $e - catch {r ping} e - assert_match {BUSY*} $e - } - - # Note: keep this test at the end of this server stanza because it - # kills the server. - test {SHUTDOWN NOSAVE can kill a timedout script anyway} { - # The server sould be still unresponding to normal commands. - catch {r ping} e - assert_match {BUSY*} $e - catch {r shutdown nosave} - # Make sure the server was killed - catch {set rd [redis_deferring_client]} e - assert_match {*connection refused*} $e - } -} - -start_server {tags {"scripting repl"}} { - start_server {} { - test {Before the slave connects we issue two EVAL commands} { - # One with an error, but still executing a command. - # SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876 - catch { - r eval {redis.call('incr',KEYS[1]); redis.call('nonexisting')} 1 x - } - # One command is correct: - # SHA is: 6f5ade10a69975e903c6d07b10ea44c6382381a5 - r eval {return redis.call('incr',KEYS[1])} 1 x - } {2} - - test {Connect a slave to the main instance} { - r -1 slaveof [srv 0 host] [srv 0 port] - wait_for_condition 50 100 { - [s -1 role] eq {slave} && - [string match {*master_link_status:up*} [r -1 info replication]] - } else { - fail "Can't turn the instance into a slave" - } - } - - test {Now use EVALSHA against the master, with both SHAs} { - # The server should replicate successful and unsuccessful - # commands as EVAL instead of EVALSHA. - catch { - r evalsha 67164fc43fa971f76fd1aaeeaf60c1c178d25876 1 x - } - r evalsha 6f5ade10a69975e903c6d07b10ea44c6382381a5 1 x - } {4} - - test {If EVALSHA was replicated as EVAL, 'x' should be '4'} { - wait_for_condition 50 100 { - [r -1 get x] eq {4} - } else { - fail "Expected 4 in x, but value is '[r -1 get x]'" - } - } - - test {Replication of script multiple pushes to list with BLPOP} { - set rd [redis_deferring_client] - $rd brpop a 0 - r eval { - redis.call("lpush",KEYS[1],"1"); - redis.call("lpush",KEYS[1],"2"); - } 1 a - set res [$rd read] - $rd close - wait_for_condition 50 100 { - [r -1 lrange a 0 -1] eq [r lrange a 0 -1] - } else { - fail "Expected list 'a' in slave and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'" - } - set res - } {a 1} - - test {EVALSHA replication when first call is readonly} { - r del x - r eval {if tonumber(ARGV[1]) > 0 then redis.call('incr', KEYS[1]) end} 1 x 0 - r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 0 - r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 1 - wait_for_condition 50 100 { - [r -1 get x] eq {1} - } else { - fail "Expected 1 in x, but value is '[r -1 get x]'" - } - } - - test {Lua scripts using SELECT are replicated correctly} { - r eval { - redis.call("set","foo1","bar1") - redis.call("select","10") - redis.call("incr","x") - redis.call("select","11") - redis.call("incr","z") - } 0 - r eval { - redis.call("set","foo1","bar1") - redis.call("select","10") - redis.call("incr","x") - redis.call("select","11") - redis.call("incr","z") - } 0 - wait_for_condition 50 100 { - [r -1 debug digest] eq [r debug digest] - } else { - fail "Master-Slave desync after Lua script using SELECT." - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/slowlog.tcl b/tools/pika_migrate/tests/unit/slowlog.tcl deleted file mode 100644 index b25b91e2ce..0000000000 --- a/tools/pika_migrate/tests/unit/slowlog.tcl +++ /dev/null @@ -1,70 +0,0 @@ -start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { - test {SLOWLOG - check that it starts with an empty log} { - r slowlog len - } {0} - - test {SLOWLOG - only logs commands taking more time than specified} { - r config set slowlog-log-slower-than 100000 - r ping - assert_equal [r slowlog len] 0 - r debug sleep 0.2 - assert_equal [r slowlog len] 1 - } - - test {SLOWLOG - max entries is correctly handled} { - r config set slowlog-log-slower-than 0 - r config set slowlog-max-len 10 - for {set i 0} {$i < 100} {incr i} { - r ping - } - r slowlog len - } {10} - - test {SLOWLOG - GET optional argument to limit output len works} { - llength [r slowlog get 5] - } {5} - - test {SLOWLOG - RESET subcommand works} { - r config set slowlog-log-slower-than 100000 - r slowlog reset - r slowlog len - } {0} - - test {SLOWLOG - logged entry sanity check} { - r debug sleep 0.2 - set e [lindex [r slowlog get] 0] - assert_equal [llength $e] 4 - assert_equal [lindex $e 0] 105 - assert_equal [expr {[lindex $e 2] > 100000}] 1 - assert_equal [lindex $e 3] {debug sleep 0.2} - } - - test {SLOWLOG - commands with too many arguments are trimmed} { - r config set slowlog-log-slower-than 0 - r slowlog reset - r sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 - set e [lindex [r slowlog get] 0] - lindex $e 3 - } {sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 {... (2 more arguments)}} - - test {SLOWLOG - too long arguments are trimmed} { - r config set slowlog-log-slower-than 0 - r slowlog reset - set arg [string repeat A 129] - r sadd set foo $arg - set e [lindex [r slowlog get] 0] - lindex $e 3 - } {sadd set foo {AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA... (1 more bytes)}} - - test {SLOWLOG - EXEC is not logged, just executed commands} { - r config set slowlog-log-slower-than 100000 - r slowlog reset - assert_equal [r slowlog len] 0 - r multi - r debug sleep 0.2 - r exec - assert_equal [r slowlog len] 1 - set e [lindex [r slowlog get] 0] - assert_equal [lindex $e 3] {debug sleep 0.2} - } -} diff --git a/tools/pika_migrate/tests/unit/sort.tcl b/tools/pika_migrate/tests/unit/sort.tcl deleted file mode 100644 index a25ffeb5ce..0000000000 --- a/tools/pika_migrate/tests/unit/sort.tcl +++ /dev/null @@ -1,311 +0,0 @@ -start_server { - tags {"sort"} - overrides { - "list-max-ziplist-value" 16 - "list-max-ziplist-entries" 32 - "set-max-intset-entries" 32 - } -} { - proc create_random_dataset {num cmd} { - set tosort {} - set result {} - array set seenrand {} - r del tosort - for {set i 0} {$i < $num} {incr i} { - # Make sure all the weights are different because - # Redis does not use a stable sort but Tcl does. - while 1 { - randpath { - set rint [expr int(rand()*1000000)] - } { - set rint [expr rand()] - } - if {![info exists seenrand($rint)]} break - } - set seenrand($rint) x - r $cmd tosort $i - r set weight_$i $rint - r hset wobj_$i weight $rint - lappend tosort [list $i $rint] - } - set sorted [lsort -index 1 -real $tosort] - for {set i 0} {$i < $num} {incr i} { - lappend result [lindex $sorted $i 0] - } - set _ $result - } - - foreach {num cmd enc title} { - 16 lpush ziplist "Ziplist" - 1000 lpush linkedlist "Linked list" - 10000 lpush linkedlist "Big Linked list" - 16 sadd intset "Intset" - 1000 sadd hashtable "Hash table" - 10000 sadd hashtable "Big Hash table" - } { - set result [create_random_dataset $num $cmd] - assert_encoding $enc tosort - - test "$title: SORT BY key" { - assert_equal $result [r sort tosort BY weight_*] - } - - test "$title: SORT BY key with limit" { - assert_equal [lrange $result 5 9] [r sort tosort BY weight_* LIMIT 5 5] - } - - test "$title: SORT BY hash field" { - assert_equal $result [r sort tosort BY wobj_*->weight] - } - } - - set result [create_random_dataset 16 lpush] - test "SORT GET #" { - assert_equal [lsort -integer $result] [r sort tosort GET #] - } - - test "SORT GET " { - r del foo - set res [r sort tosort GET foo] - assert_equal 16 [llength $res] - foreach item $res { assert_equal {} $item } - } - - test "SORT GET (key and hash) with sanity check" { - set l1 [r sort tosort GET # GET weight_*] - set l2 [r sort tosort GET # GET wobj_*->weight] - foreach {id1 w1} $l1 {id2 w2} $l2 { - assert_equal $id1 $id2 - assert_equal $w1 [r get weight_$id1] - assert_equal $w2 [r get weight_$id1] - } - } - - test "SORT BY key STORE" { - r sort tosort BY weight_* store sort-res - assert_equal $result [r lrange sort-res 0 -1] - assert_equal 16 [r llen sort-res] - assert_encoding ziplist sort-res - } - - test "SORT BY hash field STORE" { - r sort tosort BY wobj_*->weight store sort-res - assert_equal $result [r lrange sort-res 0 -1] - assert_equal 16 [r llen sort-res] - assert_encoding ziplist sort-res - } - - test "SORT DESC" { - assert_equal [lsort -decreasing -integer $result] [r sort tosort DESC] - } - - test "SORT ALPHA against integer encoded strings" { - r del mylist - r lpush mylist 2 - r lpush mylist 1 - r lpush mylist 3 - r lpush mylist 10 - r sort mylist alpha - } {1 10 2 3} - - test "SORT sorted set" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r sort zset alpha desc - } {e d c b a} - - test "SORT sorted set BY nosort should retain ordering" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r multi - r sort zset by nosort asc - r sort zset by nosort desc - r exec - } {{a c e b d} {d b e c a}} - - test "SORT sorted set BY nosort + LIMIT" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - assert_equal [r sort zset by nosort asc limit 0 1] {a} - assert_equal [r sort zset by nosort desc limit 0 1] {d} - assert_equal [r sort zset by nosort asc limit 0 2] {a c} - assert_equal [r sort zset by nosort desc limit 0 2] {d b} - assert_equal [r sort zset by nosort limit 5 10] {} - assert_equal [r sort zset by nosort limit -10 100] {a c e b d} - } - - test "SORT sorted set BY nosort works as expected from scripts" { - r del zset - r zadd zset 1 a - r zadd zset 5 b - r zadd zset 2 c - r zadd zset 10 d - r zadd zset 3 e - r eval { - return {redis.call('sort',KEYS[1],'by','nosort','asc'), - redis.call('sort',KEYS[1],'by','nosort','desc')} - } 1 zset - } {{a c e b d} {d b e c a}} - - test "SORT sorted set: +inf and -inf handling" { - r del zset - r zadd zset -100 a - r zadd zset 200 b - r zadd zset -300 c - r zadd zset 1000000 d - r zadd zset +inf max - r zadd zset -inf min - r zrange zset 0 -1 - } {min c a b d max} - - test "SORT regression for issue #19, sorting floats" { - r flushdb - set floats {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15} - foreach x $floats { - r lpush mylist $x - } - assert_equal [lsort -real $floats] [r sort mylist] - } - - test "SORT with STORE returns zero if result is empty (github issue 224)" { - r flushdb - r sort foo store bar - } {0} - - test "SORT with STORE does not create empty lists (github issue 224)" { - r flushdb - r lpush foo bar - r sort foo alpha limit 10 10 store zap - r exists zap - } {0} - - test "SORT with STORE removes key if result is empty (github issue 227)" { - r flushdb - r lpush foo bar - r sort emptylist store foo - r exists foo - } {0} - - test "SORT with BY and STORE should still order output" { - r del myset mylist - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - r sort myset alpha by _ store mylist - r lrange mylist 0 -1 - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT will complain with numerical sorting and bad doubles (1)" { - r del myset - r sadd myset 1 2 3 4 not-a-double - set e {} - catch {r sort myset} e - set e - } {*ERR*double*} - - test "SORT will complain with numerical sorting and bad doubles (2)" { - r del myset - r sadd myset 1 2 3 4 - r mset score:1 10 score:2 20 score:3 30 score:4 not-a-double - set e {} - catch {r sort myset by score:*} e - set e - } {*ERR*double*} - - test "SORT BY sub-sorts lexicographically if score is the same" { - r del myset - r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz - foreach ele {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} { - set score:$ele 100 - } - r sort myset by score:* - } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} - - test "SORT GET with pattern ending with just -> does not get hash field" { - r del mylist - r lpush mylist a - r set x:a-> 100 - r sort mylist by num get x:*-> - } {100} - - test "SORT by nosort retains native order for lists" { - r del testa - r lpush testa 2 1 4 3 5 - r sort testa by nosort - } {5 3 4 1 2} - - test "SORT by nosort plus store retains native order for lists" { - r del testa - r lpush testa 2 1 4 3 5 - r sort testa by nosort store testb - r lrange testb 0 -1 - } {5 3 4 1 2} - - test "SORT by nosort with limit returns based on original list order" { - r sort testa by nosort limit 0 3 store testb - r lrange testb 0 -1 - } {5 3 4} - - tags {"slow"} { - set num 100 - set res [create_random_dataset $num lpush] - - test "SORT speed, $num element list BY key, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY weight_* LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list BY hash field, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY wobj_*->weight LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list directly, 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - - test "SORT speed, $num element list BY , 100 times" { - set start [clock clicks -milliseconds] - for {set i 0} {$i < 100} {incr i} { - set sorted [r sort tosort BY nokey LIMIT 0 10] - } - set elapsed [expr [clock clicks -milliseconds]-$start] - if {$::verbose} { - puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " - flush stdout - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/hash.tcl b/tools/pika_migrate/tests/unit/type/hash.tcl deleted file mode 100644 index 55441bd33a..0000000000 --- a/tools/pika_migrate/tests/unit/type/hash.tcl +++ /dev/null @@ -1,470 +0,0 @@ -start_server {tags {"hash"}} { - test {HSET/HLEN - Small hash creation} { - array set smallhash {} - for {set i 0} {$i < 8} {incr i} { - set key [randstring 0 8 alpha] - set val [randstring 0 8 alpha] - if {[info exists smallhash($key)]} { - incr i -1 - continue - } - r hset smallhash $key $val - set smallhash($key) $val - } - list [r hlen smallhash] - } {8} - -# test {Is the small hash encoded with a ziplist?} { -# assert_encoding ziplist smallhash -# } - - test {HSET/HLEN - Big hash creation} { - array set bighash {} - for {set i 0} {$i < 1024} {incr i} { - set key [randstring 0 8 alpha] - set val [randstring 0 8 alpha] - if {[info exists bighash($key)]} { - incr i -1 - continue - } - r hset bighash $key $val - set bighash($key) $val - } - list [r hlen bighash] - } {1024} - -# test {Is the big hash encoded with a ziplist?} { -# assert_encoding hashtable bighash -# } - - test {HGET against the small hash} { - set err {} - foreach k [array names smallhash *] { - if {$smallhash($k) ne [r hget smallhash $k]} { - set err "$smallhash($k) != [r hget smallhash $k]" - break - } - } - set _ $err - } {} - - test {HGET against the big hash} { - set err {} - foreach k [array names bighash *] { - if {$bighash($k) ne [r hget bighash $k]} { - set err "$bighash($k) != [r hget bighash $k]" - break - } - } - set _ $err - } {} - - test {HGET against non existing key} { - set rv {} - lappend rv [r hget smallhash __123123123__] - lappend rv [r hget bighash __123123123__] - set _ $rv - } {{} {}} - - test {HSET in update and insert mode} { - set rv {} - set k [lindex [array names smallhash *] 0] - lappend rv [r hset smallhash $k newval1] - set smallhash($k) newval1 - lappend rv [r hget smallhash $k] - lappend rv [r hset smallhash __foobar123__ newval] - set k [lindex [array names bighash *] 0] - lappend rv [r hset bighash $k newval2] - set bighash($k) newval2 - lappend rv [r hget bighash $k] - lappend rv [r hset bighash __foobar123__ newval] - lappend rv [r hdel smallhash __foobar123__] - lappend rv [r hdel bighash __foobar123__] - set _ $rv - } {0 newval1 1 0 newval2 1 1 1} - - test {HSETNX target key missing - small hash} { - r hsetnx smallhash __123123123__ foo - r hget smallhash __123123123__ - } {foo} - - test {HSETNX target key exists - small hash} { - r hsetnx smallhash __123123123__ bar - set result [r hget smallhash __123123123__] - r hdel smallhash __123123123__ - set _ $result - } {foo} - - test {HSETNX target key missing - big hash} { - r hsetnx bighash __123123123__ foo - r hget bighash __123123123__ - } {foo} - - test {HSETNX target key exists - big hash} { - r hsetnx bighash __123123123__ bar - set result [r hget bighash __123123123__] - r hdel bighash __123123123__ - set _ $result - } {foo} - - test {HMSET wrong number of args} { - catch {r hmset smallhash key1 val1 key2} err - format $err - } {*wrong number*} - - test {HMSET - small hash} { - set args {} - foreach {k v} [array get smallhash] { - set newval [randstring 0 8 alpha] - set smallhash($k) $newval - lappend args $k $newval - } - r hmset smallhash {*}$args - } {OK} - - test {HMSET - big hash} { - set args {} - foreach {k v} [array get bighash] { - set newval [randstring 0 8 alpha] - set bighash($k) $newval - lappend args $k $newval - } - r hmset bighash {*}$args - } {OK} - - test {HMGET against non existing key and fields} { - set rv {} - lappend rv [r hmget doesntexist __123123123__ __456456456__] - lappend rv [r hmget smallhash __123123123__ __456456456__] - lappend rv [r hmget bighash __123123123__ __456456456__] - set _ $rv - } {{{} {}} {{} {}} {{} {}}} - -# test {HMGET against wrong type} { -# r set wrongtype somevalue -# assert_error "*wrong*" {r hmget wrongtype field1 field2} -# } - - test {HMGET - small hash} { - set keys {} - set vals {} - foreach {k v} [array get smallhash] { - lappend keys $k - lappend vals $v - } - set err {} - set result [r hmget smallhash {*}$keys] - if {$vals ne $result} { - set err "$vals != $result" - break - } - set _ $err - } {} - - test {HMGET - big hash} { - set keys {} - set vals {} - foreach {k v} [array get bighash] { - lappend keys $k - lappend vals $v - } - set err {} - set result [r hmget bighash {*}$keys] - if {$vals ne $result} { - set err "$vals != $result" - break - } - set _ $err - } {} - - test {HKEYS - small hash} { - lsort [r hkeys smallhash] - } [lsort [array names smallhash *]] - - test {HKEYS - big hash} { - lsort [r hkeys bighash] - } [lsort [array names bighash *]] - - test {HVALS - small hash} { - set vals {} - foreach {k v} [array get smallhash] { - lappend vals $v - } - set _ [lsort $vals] - } [lsort [r hvals smallhash]] - - test {HVALS - big hash} { - set vals {} - foreach {k v} [array get bighash] { - lappend vals $v - } - set _ [lsort $vals] - } [lsort [r hvals bighash]] - - test {HGETALL - small hash} { - lsort [r hgetall smallhash] - } [lsort [array get smallhash]] - - test {HGETALL - big hash} { - lsort [r hgetall bighash] - } [lsort [array get bighash]] - - test {HDEL and return value} { - set rv {} - lappend rv [r hdel smallhash nokey] - lappend rv [r hdel bighash nokey] - set k [lindex [array names smallhash *] 0] - lappend rv [r hdel smallhash $k] - lappend rv [r hdel smallhash $k] - lappend rv [r hget smallhash $k] - unset smallhash($k) - set k [lindex [array names bighash *] 0] - lappend rv [r hdel bighash $k] - lappend rv [r hdel bighash $k] - lappend rv [r hget bighash $k] - unset bighash($k) - set _ $rv - } {0 0 1 0 {} 1 0 {}} - - test {HDEL - more than a single value} { - set rv {} - r del myhash - r hmset myhash a 1 b 2 c 3 - assert_equal 0 [r hdel myhash x y] - assert_equal 2 [r hdel myhash a c f] - r hgetall myhash - } {b 2} - - test {HDEL - hash becomes empty before deleting all specified fields} { - r del myhash - r hmset myhash a 1 b 2 c 3 - assert_equal 3 [r hdel myhash a b c d e] - assert_equal 0 [r exists myhash] - } - - test {HEXISTS} { - set rv {} - set k [lindex [array names smallhash *] 0] - lappend rv [r hexists smallhash $k] - lappend rv [r hexists smallhash nokey] - set k [lindex [array names bighash *] 0] - lappend rv [r hexists bighash $k] - lappend rv [r hexists bighash nokey] - } {1 0 1 0} - -# test {Is a ziplist encoded Hash promoted on big payload?} { -# r hset smallhash foo [string repeat a 1024] -# r debug object smallhash -# } {*hashtable*} - - test {HINCRBY against non existing database key} { - r del htest - list [r hincrby htest foo 2] - } {2} - - test {HINCRBY against non existing hash key} { - set rv {} - r hdel smallhash tmp - r hdel bighash tmp - lappend rv [r hincrby smallhash tmp 2] - lappend rv [r hget smallhash tmp] - lappend rv [r hincrby bighash tmp 2] - lappend rv [r hget bighash tmp] - } {2 2 2 2} - - test {HINCRBY against hash key created by hincrby itself} { - set rv {} - lappend rv [r hincrby smallhash tmp 3] - lappend rv [r hget smallhash tmp] - lappend rv [r hincrby bighash tmp 3] - lappend rv [r hget bighash tmp] - } {5 5 5 5} - - test {HINCRBY against hash key originally set with HSET} { - r hset smallhash tmp 100 - r hset bighash tmp 100 - list [r hincrby smallhash tmp 2] [r hincrby bighash tmp 2] - } {102 102} - - test {HINCRBY over 32bit value} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrby smallhash tmp 1] [r hincrby bighash tmp 1] - } {17179869185 17179869185} - - test {HINCRBY over 32bit value with over 32bit increment} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrby smallhash tmp 17179869184] [r hincrby bighash tmp 17179869184] - } {34359738368 34359738368} - - test {HINCRBY fails against hash value with spaces (left)} { - r hset smallhash str " 11" - r hset bighash str " 11" - catch {r hincrby smallhash str 1} smallerr - catch {r hincrby smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not an integer*" $smallerr] - lappend rv [string match "ERR*not an integer*" $bigerr] - } {1 1} - - test {HINCRBY fails against hash value with spaces (right)} { - r hset smallhash str "11 " - r hset bighash str "11 " - catch {r hincrby smallhash str 1} smallerr - catch {r hincrby smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not an integer*" $smallerr] - lappend rv [string match "ERR*not an integer*" $bigerr] - } {1 1} - - test {HINCRBY can detect overflows} { - set e {} - r hset hash n -9223372036854775484 - assert {[r hincrby hash n -1] == -9223372036854775485} - catch {r hincrby hash n -10000} e - set e - } {*overflow*} - - test {HINCRBYFLOAT against non existing database key} { - r del htest - list [r hincrbyfloat htest foo 2.5] - } {2.5} - - test {HINCRBYFLOAT against non existing hash key} { - set rv {} - r hdel smallhash tmp - r hdel bighash tmp - lappend rv [roundFloat [r hincrbyfloat smallhash tmp 2.5]] - lappend rv [roundFloat [r hget smallhash tmp]] - lappend rv [roundFloat [r hincrbyfloat bighash tmp 2.5]] - lappend rv [roundFloat [r hget bighash tmp]] - } {2.5 2.5 2.5 2.5} - - test {HINCRBYFLOAT against hash key created by hincrby itself} { - set rv {} - lappend rv [roundFloat [r hincrbyfloat smallhash tmp 3.5]] - lappend rv [roundFloat [r hget smallhash tmp]] - lappend rv [roundFloat [r hincrbyfloat bighash tmp 3.5]] - lappend rv [roundFloat [r hget bighash tmp]] - } {6 6 6 6} - - test {HINCRBYFLOAT against hash key originally set with HSET} { - r hset smallhash tmp 100 - r hset bighash tmp 100 - list [roundFloat [r hincrbyfloat smallhash tmp 2.5]] \ - [roundFloat [r hincrbyfloat bighash tmp 2.5]] - } {102.5 102.5} - - test {HINCRBYFLOAT over 32bit value} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrbyfloat smallhash tmp 1] \ - [r hincrbyfloat bighash tmp 1] - } {17179869185 17179869185} - - test {HINCRBYFLOAT over 32bit value with over 32bit increment} { - r hset smallhash tmp 17179869184 - r hset bighash tmp 17179869184 - list [r hincrbyfloat smallhash tmp 17179869184] \ - [r hincrbyfloat bighash tmp 17179869184] - } {34359738368 34359738368} - - test {HINCRBYFLOAT fails against hash value with spaces (left)} { - r hset smallhash str " 11" - r hset bighash str " 11" - catch {r hincrbyfloat smallhash str 1} smallerr - catch {r hincrbyfloat smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not*float*" $smallerr] - lappend rv [string match "ERR*not*float*" $bigerr] - } {1 1} - - test {HINCRBYFLOAT fails against hash value with spaces (right)} { - r hset smallhash str "11 " - r hset bighash str "11 " - catch {r hincrbyfloat smallhash str 1} smallerr - catch {r hincrbyfloat smallhash str 1} bigerr - set rv {} - lappend rv [string match "ERR*not*float*" $smallerr] - lappend rv [string match "ERR*not*float*" $bigerr] - } {1 1} - - test {Hash ziplist regression test for large keys} { - r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a - r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b - r hget hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk - } {b} - - foreach size {10 512} { - test "Hash fuzzing #1 - $size fields" { - for {set times 0} {$times < 10} {incr times} { - catch {unset hash} - array set hash {} - r del hash - - # Create - for {set j 0} {$j < $size} {incr j} { - set field [randomValue] - set value [randomValue] - r hset hash $field $value - set hash($field) $value - } - - # Verify - foreach {k v} [array get hash] { - assert_equal $v [r hget hash $k] - } - assert_equal [array size hash] [r hlen hash] - } - } - - test "Hash fuzzing #2 - $size fields" { - for {set times 0} {$times < 10} {incr times} { - catch {unset hash} - array set hash {} - r del hash - - # Create - for {set j 0} {$j < $size} {incr j} { - randpath { - set field [randomValue] - set value [randomValue] - r hset hash $field $value - set hash($field) $value - } { - set field [randomSignedInt 512] - set value [randomSignedInt 512] - r hset hash $field $value - set hash($field) $value - } { - randpath { - set field [randomValue] - } { - set field [randomSignedInt 512] - } - r hdel hash $field - unset -nocomplain hash($field) - } - } - - # Verify - foreach {k v} [array get hash] { - assert_equal $v [r hget hash $k] - } - assert_equal [array size hash] [r hlen hash] - } - } - } - -# test {Stress test the hash ziplist -> hashtable encoding conversion} { -# r config set hash-max-ziplist-entries 32 -# for {set j 0} {$j < 100} {incr j} { -# r del myhash -# for {set i 0} {$i < 64} {incr i} { -# r hset myhash [randomValue] [randomValue] -# } -# assert {[r object encoding myhash] eq {hashtable}} -# } -# } -} diff --git a/tools/pika_migrate/tests/unit/type/list-2.tcl b/tools/pika_migrate/tests/unit/type/list-2.tcl deleted file mode 100644 index bf6a055eba..0000000000 --- a/tools/pika_migrate/tests/unit/type/list-2.tcl +++ /dev/null @@ -1,44 +0,0 @@ -start_server { - tags {"list"} - overrides { - "list-max-ziplist-value" 16 - "list-max-ziplist-entries" 256 - } -} { - source "tests/unit/type/list-common.tcl" - - foreach {type large} [array get largevalue] { - tags {"slow"} { - test "LTRIM stress testing - $type" { - set mylist {} - set startlen 32 - r del mylist - - # Start with the large value to ensure the - # right encoding is used. - r rpush mylist $large - lappend mylist $large - - for {set i 0} {$i < $startlen} {incr i} { - set str [randomInt 9223372036854775807] - r rpush mylist $str - lappend mylist $str - } - - for {set i 0} {$i < 1000} {incr i} { - set min [expr {int(rand()*$startlen)}] - set max [expr {$min+int(rand()*$startlen)}] - set mylist [lrange $mylist $min $max] - r ltrim mylist $min $max - assert_equal $mylist [r lrange mylist 0 -1] - - for {set j [r llen mylist]} {$j < $startlen} {incr j} { - set str [randomInt 9223372036854775807] - r rpush mylist $str - lappend mylist $str - } - } - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/list-3.tcl b/tools/pika_migrate/tests/unit/type/list-3.tcl deleted file mode 100644 index 94f9a0b797..0000000000 --- a/tools/pika_migrate/tests/unit/type/list-3.tcl +++ /dev/null @@ -1,79 +0,0 @@ -start_server { - tags {list ziplist} - overrides { - "list-max-ziplist-value" 200000 - "list-max-ziplist-entries" 256 - } -} { - test {Explicit regression for a list bug} { - set mylist {49376042582 {BkG2o\pIC]4YYJa9cJ4GWZalG[4tin;1D2whSkCOW`mX;SFXGyS8sedcff3fQI^tgPCC@^Nu1J6o]meM@Lko]t_jRyotK?tH[\EvWqS]b`o2OCtjg:?nUTwdjpcUm]y:pg5q24q7LlCOwQE^}} - r del l - r rpush l [lindex $mylist 0] - r rpush l [lindex $mylist 1] - assert_equal [r lindex l 0] [lindex $mylist 0] - assert_equal [r lindex l 1] [lindex $mylist 1] - } - - tags {slow} { - test {ziplist implementation: value encoding and backlink} { - if {$::accurate} {set iterations 100} else {set iterations 10} - for {set j 0} {$j < $iterations} {incr j} { - r del l - set l {} - for {set i 0} {$i < 200} {incr i} { - randpath { - set data [string repeat x [randomInt 100000]] - } { - set data [randomInt 65536] - } { - set data [randomInt 4294967296] - } { - set data [randomInt 18446744073709551616] - } { - set data -[randomInt 65536] - if {$data eq {-0}} {set data 0} - } { - set data -[randomInt 4294967296] - if {$data eq {-0}} {set data 0} - } { - set data -[randomInt 18446744073709551616] - if {$data eq {-0}} {set data 0} - } - lappend l $data - r rpush l $data - } - assert_equal [llength $l] [r llen l] - # Traverse backward - for {set i 199} {$i >= 0} {incr i -1} { - if {[lindex $l $i] ne [r lindex l $i]} { - assert_equal [lindex $l $i] [r lindex l $i] - } - } - } - } - - test {ziplist implementation: encoding stress testing} { - for {set j 0} {$j < 200} {incr j} { - r del l - set l {} - set len [randomInt 400] - for {set i 0} {$i < $len} {incr i} { - set rv [randomValue] - randpath { - lappend l $rv - r rpush l $rv - } { - set l [concat [list $rv] $l] - r lpush l $rv - } - } - assert_equal [llength $l] [r llen l] - for {set i 0} {$i < $len} {incr i} { - if {[lindex $l $i] ne [r lindex l $i]} { - assert_equal [lindex $l $i] [r lindex l $i] - } - } - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/list-common.tcl b/tools/pika_migrate/tests/unit/type/list-common.tcl deleted file mode 100644 index ab45f0b31b..0000000000 --- a/tools/pika_migrate/tests/unit/type/list-common.tcl +++ /dev/null @@ -1,5 +0,0 @@ -# We need a value larger than list-max-ziplist-value to make sure -# the list has the right encoding when it is swapped in again. -array set largevalue {} -set largevalue(ziplist) "hello" -set largevalue(linkedlist) [string repeat "hello" 4] diff --git a/tools/pika_migrate/tests/unit/type/list.tcl b/tools/pika_migrate/tests/unit/type/list.tcl deleted file mode 100644 index 17358ae378..0000000000 --- a/tools/pika_migrate/tests/unit/type/list.tcl +++ /dev/null @@ -1,896 +0,0 @@ -start_server { - tags {"list"} - overrides { - "list-max-ziplist-value" 16 - "list-max-ziplist-entries" 256 - } -} { - source "tests/unit/type/list-common.tcl" - - test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { - # first lpush then rpush - assert_equal 1 [r lpush myziplist1 a] - assert_equal 2 [r rpush myziplist1 b] - assert_equal 3 [r rpush myziplist1 c] - assert_equal 3 [r llen myziplist1] - assert_equal a [r lindex myziplist1 0] - assert_equal b [r lindex myziplist1 1] - assert_equal c [r lindex myziplist1 2] - assert_equal {} [r lindex myziplist2 3] - assert_equal c [r rpop myziplist1] - assert_equal a [r lpop myziplist1] -# assert_encoding ziplist myziplist1 - - # first rpush then lpush - assert_equal 1 [r rpush myziplist2 a] - assert_equal 2 [r lpush myziplist2 b] - assert_equal 3 [r lpush myziplist2 c] - assert_equal 3 [r llen myziplist2] - assert_equal c [r lindex myziplist2 0] - assert_equal b [r lindex myziplist2 1] - assert_equal a [r lindex myziplist2 2] - assert_equal {} [r lindex myziplist2 3] - assert_equal a [r rpop myziplist2] - assert_equal c [r lpop myziplist2] -# assert_encoding ziplist myziplist2 - } - - test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - regular list} { - # first lpush then rpush - assert_equal 1 [r lpush mylist1 $largevalue(linkedlist)] -# assert_encoding linkedlist mylist1 - assert_equal 2 [r rpush mylist1 b] - assert_equal 3 [r rpush mylist1 c] - assert_equal 3 [r llen mylist1] - assert_equal $largevalue(linkedlist) [r lindex mylist1 0] - assert_equal b [r lindex mylist1 1] - assert_equal c [r lindex mylist1 2] - assert_equal {} [r lindex mylist1 3] - assert_equal c [r rpop mylist1] - assert_equal $largevalue(linkedlist) [r lpop mylist1] - - # first rpush then lpush - assert_equal 1 [r rpush mylist2 $largevalue(linkedlist)] -# assert_encoding linkedlist mylist2 - assert_equal 2 [r lpush mylist2 b] - assert_equal 3 [r lpush mylist2 c] - assert_equal 3 [r llen mylist2] - assert_equal c [r lindex mylist2 0] - assert_equal b [r lindex mylist2 1] - assert_equal $largevalue(linkedlist) [r lindex mylist2 2] - assert_equal {} [r lindex mylist2 3] - assert_equal $largevalue(linkedlist) [r rpop mylist2] - assert_equal c [r lpop mylist2] - } - - test {R/LPOP against empty list} { - r lpop non-existing-list - } {} - - test {Variadic RPUSH/LPUSH} { - r del mylist - assert_equal 4 [r lpush mylist a b c d] - assert_equal 8 [r rpush mylist 0 1 2 3] - assert_equal {d c b a 0 1 2 3} [r lrange mylist 0 -1] - } - - test {DEL a list - ziplist} { - assert_equal 1 [r del myziplist2] - assert_equal 0 [r exists myziplist2] - assert_equal 0 [r llen myziplist2] - } - - test {DEL a list - regular list} { - assert_equal 1 [r del mylist2] - assert_equal 0 [r exists mylist2] - assert_equal 0 [r llen mylist2] - } - - proc create_ziplist {key entries} { - r del $key - foreach entry $entries { r rpush $key $entry } -# assert_encoding ziplist $key - } - - proc create_linkedlist {key entries} { - r del $key - foreach entry $entries { r rpush $key $entry } -# assert_encoding linkedlist $key - } - -# foreach {type large} [array get largevalue] { -# test "BLPOP, BRPOP: single existing list - $type" { -# set rd [redis_deferring_client] -# create_$type blist "a b $large c d" -# -# $rd blpop blist 1 -# assert_equal {blist a} [$rd read] -# $rd brpop blist 1 -# assert_equal {blist d} [$rd read] -# -# $rd blpop blist 1 -# assert_equal {blist b} [$rd read] -# $rd brpop blist 1 -# assert_equal {blist c} [$rd read] -# } -# -# test "BLPOP, BRPOP: multiple existing lists - $type" { -# set rd [redis_deferring_client] -# create_$type blist1 "a $large c" -# create_$type blist2 "d $large f" -# -# $rd blpop blist1 blist2 1 -# assert_equal {blist1 a} [$rd read] -# $rd brpop blist1 blist2 1 -# assert_equal {blist1 c} [$rd read] -# assert_equal 1 [r llen blist1] -# assert_equal 3 [r llen blist2] -# -# $rd blpop blist2 blist1 1 -# assert_equal {blist2 d} [$rd read] -# $rd brpop blist2 blist1 1 -# assert_equal {blist2 f} [$rd read] -# assert_equal 1 [r llen blist1] -# assert_equal 1 [r llen blist2] -# } -# -# test "BLPOP, BRPOP: second list has an entry - $type" { -# set rd [redis_deferring_client] -# r del blist1 -# create_$type blist2 "d $large f" -# -# $rd blpop blist1 blist2 1 -# assert_equal {blist2 d} [$rd read] -# $rd brpop blist1 blist2 1 -# assert_equal {blist2 f} [$rd read] -# assert_equal 0 [r llen blist1] -# assert_equal 1 [r llen blist2] -# } -# -# test "BRPOPLPUSH - $type" { -# r del target -# -# set rd [redis_deferring_client] -# create_$type blist "a b $large c d" -# -# $rd brpoplpush blist target 1 -# assert_equal d [$rd read] -# -# assert_equal d [r rpop target] -# assert_equal "a b $large c" [r lrange blist 0 -1] -# } -# } -# -# test "BLPOP, LPUSH + DEL should not awake blocked client" { -# set rd [redis_deferring_client] -# r del list -# -# $rd blpop list 0 -# r multi -# r lpush list a -# r del list -# r exec -# r del list -# r lpush list b -# $rd read -# } {list b} -# -# test "BLPOP, LPUSH + DEL + SET should not awake blocked client" { -# set rd [redis_deferring_client] -# r del list -# -# $rd blpop list 0 -# r multi -# r lpush list a -# r del list -# r set list foo -# r exec -# r del list -# r lpush list b -# $rd read -# } {list b} -# -# test "BLPOP with same key multiple times should work (issue #801)" { -# set rd [redis_deferring_client] -# r del list1 list2 -# -# # Data arriving after the BLPOP. -# $rd blpop list1 list2 list2 list1 0 -# r lpush list1 a -# assert_equal [$rd read] {list1 a} -# $rd blpop list1 list2 list2 list1 0 -# r lpush list2 b -# assert_equal [$rd read] {list2 b} -# -# # Data already there. -# r lpush list1 a -# r lpush list2 b -# $rd blpop list1 list2 list2 list1 0 -# assert_equal [$rd read] {list1 a} -# $rd blpop list1 list2 list2 list1 0 -# assert_equal [$rd read] {list2 b} -# } -# -# test "MULTI/EXEC is isolated from the point of view of BLPOP" { -# set rd [redis_deferring_client] -# r del list -# $rd blpop list 0 -# r multi -# r lpush list a -# r lpush list b -# r lpush list c -# r exec -# $rd read -# } {list c} -# -# test "BLPOP with variadic LPUSH" { -# set rd [redis_deferring_client] -# r del blist target -# if {$::valgrind} {after 100} -# $rd blpop blist 0 -# if {$::valgrind} {after 100} -# assert_equal 2 [r lpush blist foo bar] -# if {$::valgrind} {after 100} -# assert_equal {blist bar} [$rd read] -# assert_equal foo [lindex [r lrange blist 0 -1] 0] -# } -# -# test "BRPOPLPUSH with zero timeout should block indefinitely" { -# set rd [redis_deferring_client] -# r del blist target -# $rd brpoplpush blist target 0 -# after 1000 -# r rpush blist foo -# assert_equal foo [$rd read] -# assert_equal {foo} [r lrange target 0 -1] -# } -# -# test "BRPOPLPUSH with a client BLPOPing the target list" { -# set rd [redis_deferring_client] -# set rd2 [redis_deferring_client] -# r del blist target -# $rd2 blpop target 0 -# $rd brpoplpush blist target 0 -# after 1000 -# r rpush blist foo -# assert_equal foo [$rd read] -# assert_equal {target foo} [$rd2 read] -# assert_equal 0 [r exists target] -# } -# -# test "BRPOPLPUSH with wrong source type" { -# set rd [redis_deferring_client] -# r del blist target -# r set blist nolist -# $rd brpoplpush blist target 1 -# assert_error "WRONGTYPE*" {$rd read} -# } -# -# test "BRPOPLPUSH with wrong destination type" { -# set rd [redis_deferring_client] -# r del blist target -# r set target nolist -# r lpush blist foo -# $rd brpoplpush blist target 1 -# assert_error "WRONGTYPE*" {$rd read} -# -# set rd [redis_deferring_client] -# r del blist target -# r set target nolist -# $rd brpoplpush blist target 0 -# after 1000 -# r rpush blist foo -# assert_error "WRONGTYPE*" {$rd read} -# assert_equal {foo} [r lrange blist 0 -1] -# } -# -# test "BRPOPLPUSH maintains order of elements after failure" { -# set rd [redis_deferring_client] -# r del blist target -# r set target nolist -# $rd brpoplpush blist target 0 -# r rpush blist a b c -# assert_error "WRONGTYPE*" {$rd read} -# r lrange blist 0 -1 -# } {a b c} -# -# test "BRPOPLPUSH with multiple blocked clients" { -# set rd1 [redis_deferring_client] -# set rd2 [redis_deferring_client] -# r del blist target1 target2 -# r set target1 nolist -# $rd1 brpoplpush blist target1 0 -# $rd2 brpoplpush blist target2 0 -# r lpush blist foo -# -# assert_error "WRONGTYPE*" {$rd1 read} -# assert_equal {foo} [$rd2 read] -# assert_equal {foo} [r lrange target2 0 -1] -# } -# -# test "Linked BRPOPLPUSH" { -# set rd1 [redis_deferring_client] -# set rd2 [redis_deferring_client] -# -# r del list1 list2 list3 -# -# $rd1 brpoplpush list1 list2 0 -# $rd2 brpoplpush list2 list3 0 -# -# r rpush list1 foo -# -# assert_equal {} [r lrange list1 0 -1] -# assert_equal {} [r lrange list2 0 -1] -# assert_equal {foo} [r lrange list3 0 -1] -# } -# -# test "Circular BRPOPLPUSH" { -# set rd1 [redis_deferring_client] -# set rd2 [redis_deferring_client] -# -# r del list1 list2 -# -# $rd1 brpoplpush list1 list2 0 -# $rd2 brpoplpush list2 list1 0 -# -# r rpush list1 foo -# -# assert_equal {foo} [r lrange list1 0 -1] -# assert_equal {} [r lrange list2 0 -1] -# } -# -# test "Self-referential BRPOPLPUSH" { -# set rd [redis_deferring_client] -# -# r del blist -# -# $rd brpoplpush blist blist 0 -# -# r rpush blist foo -# -# assert_equal {foo} [r lrange blist 0 -1] -# } -# -# test "BRPOPLPUSH inside a transaction" { -# r del xlist target -# r lpush xlist foo -# r lpush xlist bar -# -# r multi -# r brpoplpush xlist target 0 -# r brpoplpush xlist target 0 -# r brpoplpush xlist target 0 -# r lrange xlist 0 -1 -# r lrange target 0 -1 -# r exec -# } {foo bar {} {} {bar foo}} -# -# test "PUSH resulting from BRPOPLPUSH affect WATCH" { -# set blocked_client [redis_deferring_client] -# set watching_client [redis_deferring_client] -# r del srclist dstlist somekey -# r set somekey somevalue -# $blocked_client brpoplpush srclist dstlist 0 -# $watching_client watch dstlist -# $watching_client read -# $watching_client multi -# $watching_client read -# $watching_client get somekey -# $watching_client read -# r lpush srclist element -# $watching_client exec -# $watching_client read -# } {} -# -# test "BRPOPLPUSH does not affect WATCH while still blocked" { -# set blocked_client [redis_deferring_client] -# set watching_client [redis_deferring_client] -# r del srclist dstlist somekey -# r set somekey somevalue -# $blocked_client brpoplpush srclist dstlist 0 -# $watching_client watch dstlist -# $watching_client read -# $watching_client multi -# $watching_client read -# $watching_client get somekey -# $watching_client read -# $watching_client exec -# # Blocked BLPOPLPUSH may create problems, unblock it. -# r lpush srclist element -# $watching_client read -# } {somevalue} -# -# test {BRPOPLPUSH timeout} { -# set rd [redis_deferring_client] -# -# $rd brpoplpush foo_list bar_list 1 -# after 2000 -# $rd read -# } {} -# -# test "BLPOP when new key is moved into place" { -# set rd [redis_deferring_client] -# -# $rd blpop foo 5 -# r lpush bob abc def hij -# r rename bob foo -# $rd read -# } {foo hij} -# -# test "BLPOP when result key is created by SORT..STORE" { -# set rd [redis_deferring_client] -# -# # zero out list from previous test without explicit delete -# r lpop foo -# r lpop foo -# r lpop foo -# -# $rd blpop foo 5 -# r lpush notfoo hello hola aguacate konichiwa zanzibar -# r sort notfoo ALPHA store foo -# $rd read -# } {foo aguacate} -# -# foreach {pop} {BLPOP BRPOP} { -# test "$pop: with single empty list argument" { -# set rd [redis_deferring_client] -# r del blist1 -# $rd $pop blist1 1 -# r rpush blist1 foo -# assert_equal {blist1 foo} [$rd read] -# assert_equal 0 [r exists blist1] -# } -# -# test "$pop: with negative timeout" { -# set rd [redis_deferring_client] -# $rd $pop blist1 -1 -# assert_error "ERR*is negative*" {$rd read} -# } -# -# test "$pop: with non-integer timeout" { -# set rd [redis_deferring_client] -# $rd $pop blist1 1.1 -# assert_error "ERR*not an integer*" {$rd read} -# } -# -# test "$pop: with zero timeout should block indefinitely" { -# # To test this, use a timeout of 0 and wait a second. -# # The blocking pop should still be waiting for a push. -# set rd [redis_deferring_client] -# $rd $pop blist1 0 -# after 1000 -# r rpush blist1 foo -# assert_equal {blist1 foo} [$rd read] -# } -# -# test "$pop: second argument is not a list" { -# set rd [redis_deferring_client] -# r del blist1 blist2 -# r set blist2 nolist -# $rd $pop blist1 blist2 1 -# assert_error "WRONGTYPE*" {$rd read} -# } -# -# test "$pop: timeout" { -# set rd [redis_deferring_client] -# r del blist1 blist2 -# $rd $pop blist1 blist2 1 -# assert_equal {} [$rd read] -# } -# -# test "$pop: arguments are empty" { -# set rd [redis_deferring_client] -# r del blist1 blist2 -# -# $rd $pop blist1 blist2 1 -# r rpush blist1 foo -# assert_equal {blist1 foo} [$rd read] -# assert_equal 0 [r exists blist1] -# assert_equal 0 [r exists blist2] -# -# $rd $pop blist1 blist2 1 -# r rpush blist2 foo -# assert_equal {blist2 foo} [$rd read] -# assert_equal 0 [r exists blist1] -# assert_equal 0 [r exists blist2] -# } -# } -# -# test {BLPOP inside a transaction} { -# r del xlist -# r lpush xlist foo -# r lpush xlist bar -# r multi -# r blpop xlist 0 -# r blpop xlist 0 -# r blpop xlist 0 -# r exec -# } {{xlist bar} {xlist foo} {}} - - test {LPUSHX, RPUSHX - generic} { - r del xlist - assert_equal 0 [r lpushx xlist a] - assert_equal 0 [r llen xlist] - assert_equal 0 [r rpushx xlist a] - assert_equal 0 [r llen xlist] - } - - foreach {type large} [array get largevalue] { - test "LPUSHX, RPUSHX - $type" { - create_$type xlist "$large c" - assert_equal 3 [r rpushx xlist d] - assert_equal 4 [r lpushx xlist a] - assert_equal "a $large c d" [r lrange xlist 0 -1] - } - - test "LINSERT - $type" { - create_$type xlist "a $large c d" - assert_equal 5 [r linsert xlist before c zz] - assert_equal "a $large zz c d" [r lrange xlist 0 10] - assert_equal 6 [r linsert xlist after c yy] - assert_equal "a $large zz c yy d" [r lrange xlist 0 10] - assert_equal 7 [r linsert xlist after d dd] - assert_equal -1 [r linsert xlist after bad ddd] - assert_equal "a $large zz c yy d dd" [r lrange xlist 0 10] - assert_equal 8 [r linsert xlist before a aa] - assert_equal -1 [r linsert xlist before bad aaa] - assert_equal "aa a $large zz c yy d dd" [r lrange xlist 0 10] - - # check inserting integer encoded value - assert_equal 9 [r linsert xlist before aa 42] - assert_equal 42 [r lrange xlist 0 0] - } - } - - test {LINSERT raise error on bad syntax} { - catch {[r linsert xlist aft3r aa 42]} e - set e - } {*ERR*syntax*error*} - -# test {LPUSHX, RPUSHX convert from ziplist to list} { -# set large $largevalue(linkedlist) -# -# # convert when a large value is pushed -# create_ziplist xlist a -# assert_equal 2 [r rpushx xlist $large] -# assert_encoding linkedlist xlist -# create_ziplist xlist a -# assert_equal 2 [r lpushx xlist $large] -# assert_encoding linkedlist xlist -# -# # convert when the length threshold is exceeded -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r rpushx xlist b] -# assert_encoding linkedlist xlist -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r lpushx xlist b] -# assert_encoding linkedlist xlist -# } - -# test {LINSERT convert from ziplist to list} { -# set large $largevalue(linkedlist) -# -# # convert when a large value is inserted -# create_ziplist xlist a -# assert_equal 2 [r linsert xlist before a $large] -# assert_encoding linkedlist xlist -# create_ziplist xlist a -# assert_equal 2 [r linsert xlist after a $large] -# assert_encoding linkedlist xlist -# -# # convert when the length threshold is exceeded -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r linsert xlist before a a] -# assert_encoding linkedlist xlist -# create_ziplist xlist [lrepeat 256 a] -# assert_equal 257 [r linsert xlist after a a] -# assert_encoding linkedlist xlist -# -# # don't convert when the value could not be inserted -# create_ziplist xlist [lrepeat 256 a] -# assert_equal -1 [r linsert xlist before foo a] -# assert_encoding ziplist xlist -# create_ziplist xlist [lrepeat 256 a] -# assert_equal -1 [r linsert xlist after foo a] -# assert_encoding ziplist xlist -# } - - foreach {type num} {ziplist 250 linkedlist 500} { - proc check_numbered_list_consistency {key} { - set len [r llen $key] - for {set i 0} {$i < $len} {incr i} { - assert_equal $i [r lindex $key $i] - assert_equal [expr $len-1-$i] [r lindex $key [expr (-$i)-1]] - } - } - - proc check_random_access_consistency {key} { - set len [r llen $key] - for {set i 0} {$i < $len} {incr i} { - set rint [expr int(rand()*$len)] - assert_equal $rint [r lindex $key $rint] - assert_equal [expr $len-1-$rint] [r lindex $key [expr (-$rint)-1]] - } - } - - test "LINDEX consistency test - $type" { - r del mylist - for {set i 0} {$i < $num} {incr i} { - r rpush mylist $i - } -# assert_encoding $type mylist - check_numbered_list_consistency mylist - } - - test "LINDEX random access - $type" { -# assert_encoding $type mylist - check_random_access_consistency mylist - } - -# test "Check if list is still ok after a DEBUG RELOAD - $type" { -# r debug reload -# assert_encoding $type mylist -# check_numbered_list_consistency mylist -# check_random_access_consistency mylist -# } - } - -# test {LLEN against non-list value error} { -# r del mylist -# r set mylist foobar -# assert_error WRONGTYPE* {r llen mylist} -# } - - test {LLEN against non existing key} { - assert_equal 0 [r llen not-a-key] - } - -# test {LINDEX against non-list value error} { -# assert_error WRONGTYPE* {r lindex mylist 0} -# } - - test {LINDEX against non existing key} { - assert_equal "" [r lindex not-a-key 10] - } - -# test {LPUSH against non-list value error} { -# assert_error WRONGTYPE* {r lpush mylist 0} -# } - -# test {RPUSH against non-list value error} { -# assert_error WRONGTYPE* {r rpush mylist 0} -# } - - foreach {type large} [array get largevalue] { - test "RPOPLPUSH base case - $type" { - r del mylist1 mylist2 - create_$type mylist1 "a $large c d" - assert_equal d [r rpoplpush mylist1 mylist2] - assert_equal c [r rpoplpush mylist1 mylist2] - assert_equal "a $large" [r lrange mylist1 0 -1] - assert_equal "c d" [r lrange mylist2 0 -1] -# assert_encoding ziplist mylist2 - } - - test "RPOPLPUSH with the same list as src and dst - $type" { - create_$type mylist "a $large c" - assert_equal "a $large c" [r lrange mylist 0 -1] - assert_equal c [r rpoplpush mylist mylist] - assert_equal "c a $large" [r lrange mylist 0 -1] - } - - foreach {othertype otherlarge} [array get largevalue] { - test "RPOPLPUSH with $type source and existing target $othertype" { - create_$type srclist "a b c $large" - create_$othertype dstlist "$otherlarge" - assert_equal $large [r rpoplpush srclist dstlist] - assert_equal c [r rpoplpush srclist dstlist] - assert_equal "a b" [r lrange srclist 0 -1] - assert_equal "c $large $otherlarge" [r lrange dstlist 0 -1] - - # When we rpoplpush'ed a large value, dstlist should be - # converted to the same encoding as srclist. -# if {$type eq "linkedlist"} { -# assert_encoding linkedlist dstlist -# } - } - } - } - - test {RPOPLPUSH against non existing key} { - r del srclist dstlist - assert_equal {} [r rpoplpush srclist dstlist] - assert_equal 0 [r exists srclist] - assert_equal 0 [r exists dstlist] - } - - test {RPOPLPUSH against non list src key} { - r del srclist dstlist - r set srclist x -# assert_error WRONGTYPE* {r rpoplpush srclist dstlist} -# assert_type string srclist - assert_equal 0 [r exists newlist] - } - - test {RPOPLPUSH against non list dst key} { - create_ziplist srclist {a b c d} - r set dstlist x -# assert_error WRONGTYPE* {r rpoplpush srclist dstlist} -# assert_type string dstlist - assert_equal {a b c d} [r lrange srclist 0 -1] - } - - test {RPOPLPUSH against non existing src key} { - r del srclist dstlist - assert_equal {} [r rpoplpush srclist dstlist] - } {} - - foreach {type large} [array get largevalue] { - test "Basic LPOP/RPOP - $type" { - create_$type mylist "$large 1 2" - assert_equal $large [r lpop mylist] - assert_equal 2 [r rpop mylist] - assert_equal 1 [r lpop mylist] - assert_equal 0 [r llen mylist] - - # pop on empty list - assert_equal {} [r lpop mylist] - assert_equal {} [r rpop mylist] - } - } - -# test {LPOP/RPOP against non list value} { -# r set notalist foo -# assert_error WRONGTYPE* {r lpop notalist} -# assert_error WRONGTYPE* {r rpop notalist} -# } - - foreach {type num} {ziplist 250 linkedlist 500} { - test "Mass RPOP/LPOP - $type" { - r del mylist - set sum1 0 - for {set i 0} {$i < $num} {incr i} { - r lpush mylist $i - incr sum1 $i - } -# assert_encoding $type mylist - set sum2 0 - for {set i 0} {$i < [expr $num/2]} {incr i} { - incr sum2 [r lpop mylist] - incr sum2 [r rpop mylist] - } - assert_equal $sum1 $sum2 - } - } - - foreach {type large} [array get largevalue] { - test "LRANGE basics - $type" { - create_$type mylist "$large 1 2 3 4 5 6 7 8 9" - assert_equal {1 2 3 4 5 6 7 8} [r lrange mylist 1 -2] - assert_equal {7 8 9} [r lrange mylist -3 -1] - assert_equal {4} [r lrange mylist 4 4] - } - - test "LRANGE inverted indexes - $type" { - create_$type mylist "$large 1 2 3 4 5 6 7 8 9" - assert_equal {} [r lrange mylist 6 2] - } - - test "LRANGE out of range indexes including the full list - $type" { - create_$type mylist "$large 1 2 3" - assert_equal "$large 1 2 3" [r lrange mylist -1000 1000] - } - - test "LRANGE out of range negative end index - $type" { - create_$type mylist "$large 1 2 3" - assert_equal $large [r lrange mylist 0 -4] - assert_equal {} [r lrange mylist 0 -5] - } - } - - test {LRANGE against non existing key} { - assert_equal {} [r lrange nosuchkey 0 1] - } - - foreach {type large} [array get largevalue] { - proc trim_list {type min max} { - upvar 1 large large - r del mylist - create_$type mylist "1 2 3 4 $large" - r ltrim mylist $min $max - r lrange mylist 0 -1 - } - - test "LTRIM basics - $type" { - assert_equal "1" [trim_list $type 0 0] - assert_equal "1 2" [trim_list $type 0 1] - assert_equal "1 2 3" [trim_list $type 0 2] - assert_equal "2 3" [trim_list $type 1 2] - assert_equal "2 3 4 $large" [trim_list $type 1 -1] - assert_equal "2 3 4" [trim_list $type 1 -2] - assert_equal "4 $large" [trim_list $type -2 -1] - assert_equal "$large" [trim_list $type -1 -1] - assert_equal "1 2 3 4 $large" [trim_list $type -5 -1] - assert_equal "1 2 3 4 $large" [trim_list $type -10 10] - assert_equal "1 2 3 4 $large" [trim_list $type 0 5] - assert_equal "1 2 3 4 $large" [trim_list $type 0 10] - } - - test "LTRIM out of range negative end index - $type" { - assert_equal {1} [trim_list $type 0 -5] - assert_equal {} [trim_list $type 0 -6] - } - - } - - foreach {type large} [array get largevalue] { - test "LSET - $type" { - create_$type mylist "99 98 $large 96 95" - r lset mylist 1 foo - r lset mylist -1 bar - assert_equal "99 foo $large 96 bar" [r lrange mylist 0 -1] - } - - test "LSET out of range index - $type" { - assert_error ERR*range* {r lset mylist 10 foo} - } - } - - test {LSET against non existing key} { - assert_error ERR*key* {r lset nosuchkey 10 foo} - } - -# test {LSET against non list value} { -# r set nolist foobar -# assert_error WRONGTYPE* {r lset nolist 0 foo} -# } - - foreach {type e} [array get largevalue] { - test "LREM remove all the occurrences - $type" { - create_$type mylist "$e foo bar foobar foobared zap bar test foo" - assert_equal 2 [r lrem mylist 0 bar] - assert_equal "$e foo foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM remove the first occurrence - $type" { - assert_equal 1 [r lrem mylist 1 foo] - assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM remove non existing element - $type" { - assert_equal 0 [r lrem mylist 1 nosuchelement] - assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1] - } - - test "LREM starting from tail with negative count - $type" { - create_$type mylist "$e foo bar foobar foobared zap bar test foo foo" - assert_equal 1 [r lrem mylist -1 bar] - assert_equal "$e foo bar foobar foobared zap test foo foo" [r lrange mylist 0 -1] - } - - test "LREM starting from tail with negative count (2) - $type" { - assert_equal 2 [r lrem mylist -2 foo] - assert_equal "$e foo bar foobar foobared zap test" [r lrange mylist 0 -1] - } - - test "LREM deleting objects that may be int encoded - $type" { - create_$type myotherlist "$e 1 2 3" - assert_equal 1 [r lrem myotherlist 1 2] - assert_equal 3 [r llen myotherlist] - } - } - - test "Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds" { - set rd1 [redis_deferring_client] - set rd2 [redis_deferring_client] - - $rd1 brpoplpush a b 0 - $rd1 brpoplpush a b 0 - $rd2 brpoplpush b c 0 - after 1000 - r lpush a data - $rd1 close - $rd2 close - r ping - } {PONG} -} diff --git a/tools/pika_migrate/tests/unit/type/set.tcl b/tools/pika_migrate/tests/unit/type/set.tcl deleted file mode 100644 index de3c493a9c..0000000000 --- a/tools/pika_migrate/tests/unit/type/set.tcl +++ /dev/null @@ -1,531 +0,0 @@ -start_server { - tags {"set"} - overrides { - "set-max-intset-entries" 512 - } -} { - proc create_set {key entries} { - r del $key - foreach entry $entries { r sadd $key $entry } - } - - test {SADD, SCARD, SISMEMBER, SMEMBERS basics - regular set} { - create_set myset {foo} -# assert_encoding hashtable myset - assert_equal 1 [r sadd myset bar] - assert_equal 0 [r sadd myset bar] - assert_equal 2 [r scard myset] - assert_equal 1 [r sismember myset foo] - assert_equal 1 [r sismember myset bar] - assert_equal 0 [r sismember myset bla] - assert_equal {bar foo} [lsort [r smembers myset]] - } - - test {SADD, SCARD, SISMEMBER, SMEMBERS basics - intset} { - create_set myset {17} -# assert_encoding intset myset - assert_equal 1 [r sadd myset 16] - assert_equal 0 [r sadd myset 16] - assert_equal 2 [r scard myset] - assert_equal 1 [r sismember myset 16] - assert_equal 1 [r sismember myset 17] - assert_equal 0 [r sismember myset 18] - assert_equal {16 17} [lsort [r smembers myset]] - } - -# test {SADD against non set} { -# r lpush mylist foo -# assert_error WRONGTYPE* {r sadd mylist bar} -# } - - test "SADD a non-integer against an intset" { - create_set myset {1 2 3} -# assert_encoding intset myset - assert_equal 1 [r sadd myset a] -# assert_encoding hashtable myset - } - - test "SADD an integer larger than 64 bits" { - create_set myset {213244124402402314402033402} -# assert_encoding hashtable myset - assert_equal 1 [r sismember myset 213244124402402314402033402] - } - - test "SADD overflows the maximum allowed integers in an intset" { - r del myset - for {set i 0} {$i < 512} {incr i} { r sadd myset $i } -# assert_encoding intset myset - assert_equal 1 [r sadd myset 512] -# assert_encoding hashtable myset - } - - test {Variadic SADD} { - r del myset - assert_equal 3 [r sadd myset a b c] - assert_equal 2 [r sadd myset A a b c B] - assert_equal [lsort {A a b c B}] [lsort [r smembers myset]] - } - -# test "Set encoding after DEBUG RELOAD" { -# r del myintset myhashset mylargeintset -# for {set i 0} {$i < 100} {incr i} { r sadd myintset $i } -# for {set i 0} {$i < 1280} {incr i} { r sadd mylargeintset $i } -# for {set i 0} {$i < 256} {incr i} { r sadd myhashset [format "i%03d" $i] } -# assert_encoding intset myintset -# assert_encoding hashtable mylargeintset -# assert_encoding hashtable myhashset -# -# r debug reload -# assert_encoding intset myintset -# assert_encoding hashtable mylargeintset -# assert_encoding hashtable myhashset -# } - - test {SREM basics - regular set} { - create_set myset {foo bar ciao} -# assert_encoding hashtable myset - assert_equal 0 [r srem myset qux] - assert_equal 1 [r srem myset foo] - assert_equal {bar ciao} [lsort [r smembers myset]] - } - - test {SREM basics - intset} { - create_set myset {3 4 5} -# assert_encoding intset myset - assert_equal 0 [r srem myset 6] - assert_equal 1 [r srem myset 4] - assert_equal {3 5} [lsort [r smembers myset]] - } - - test {SREM with multiple arguments} { - r del myset - r sadd myset a b c d - assert_equal 0 [r srem myset k k k] - assert_equal 2 [r srem myset b d x y] - lsort [r smembers myset] - } {a c} - - test {SREM variadic version with more args needed to destroy the key} { - r del myset - r sadd myset 1 2 3 - r srem myset 1 2 3 4 5 6 7 8 - } {3} - - foreach {type} {hashtable intset} { - for {set i 1} {$i <= 5} {incr i} { - r del [format "set%d" $i] - } - for {set i 0} {$i < 200} {incr i} { - r sadd set1 $i - r sadd set2 [expr $i+195] - } - foreach i {199 195 1000 2000} { - r sadd set3 $i - } - for {set i 5} {$i < 200} {incr i} { - r sadd set4 $i - } - r sadd set5 0 - - # To make sure the sets are encoded as the type we are testing -- also - # when the VM is enabled and the values may be swapped in and out - # while the tests are running -- an extra element is added to every - # set that determines its encoding. - set large 200 - if {$type eq "hashtable"} { - set large foo - } - - for {set i 1} {$i <= 5} {incr i} { - r sadd [format "set%d" $i] $large - } - -# test "Generated sets must be encoded as $type" { -# for {set i 1} {$i <= 5} {incr i} { -# assert_encoding $type [format "set%d" $i] -# } -# } - - test "SINTER with two sets - $type" { - assert_equal [list 195 196 197 198 199 $large] [lsort [r sinter set1 set2]] - } - - test "SINTERSTORE with two sets - $type" { - r sinterstore setres set1 set2 -# assert_encoding $type setres - assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres]] - } - -# test "SINTERSTORE with two sets, after a DEBUG RELOAD - $type" { -# r debug reload -# r sinterstore setres set1 set2 -# assert_encoding $type setres -# assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres]] -# } - - test "SUNION with two sets - $type" { - set expected [lsort -uniq "[r smembers set1] [r smembers set2]"] - assert_equal $expected [lsort [r sunion set1 set2]] - } - - test "SUNIONSTORE with two sets - $type" { - r sunionstore setres set1 set2 -# assert_encoding $type setres - set expected [lsort -uniq "[r smembers set1] [r smembers set2]"] - assert_equal $expected [lsort [r smembers setres]] - } - - test "SINTER against three sets - $type" { - assert_equal [list 195 199 $large] [lsort [r sinter set1 set2 set3]] - } - - test "SINTERSTORE with three sets - $type" { - r sinterstore setres set1 set2 set3 - assert_equal [list 195 199 $large] [lsort [r smembers setres]] - } - - test "SUNION with non existing keys - $type" { - set expected [lsort -uniq "[r smembers set1] [r smembers set2]"] - assert_equal $expected [lsort [r sunion nokey1 set1 set2 nokey2]] - } - - test "SDIFF with two sets - $type" { - assert_equal {0 1 2 3 4} [lsort [r sdiff set1 set4]] - } - - test "SDIFF with three sets - $type" { - assert_equal {1 2 3 4} [lsort [r sdiff set1 set4 set5]] - } - - test "SDIFFSTORE with three sets - $type" { - r sdiffstore setres set1 set4 set5 - # When we start with intsets, we should always end with intsets. -# if {$type eq {intset}} { -# assert_encoding intset setres -# } - assert_equal {1 2 3 4} [lsort [r smembers setres]] - } - } - - test "SDIFF with first set empty" { - r del set1 set2 set3 - r sadd set2 1 2 3 4 - r sadd set3 a b c d - r sdiff set1 set2 set3 - } {} - - test "SDIFF with same set two times" { - r del set1 - r sadd set1 a b c 1 2 3 4 5 6 - r sdiff set1 set1 - } {} - - test "SDIFF fuzzing" { - for {set j 0} {$j < 100} {incr j} { - unset -nocomplain s - array set s {} - set args {} - set num_sets [expr {[randomInt 10]+1}] - for {set i 0} {$i < $num_sets} {incr i} { - set num_elements [randomInt 100] - r del set_$i - lappend args set_$i - while {$num_elements} { - set ele [randomValue] - r sadd set_$i $ele - if {$i == 0} { - set s($ele) x - } else { - unset -nocomplain s($ele) - } - incr num_elements -1 - } - } - set result [lsort [r sdiff {*}$args]] - assert_equal $result [lsort [array names s]] - } - } - -# test "SINTER against non-set should throw error" { -# r set key1 x -# assert_error "WRONGTYPE*" {r sinter key1 noset} -# } - -# test "SUNION against non-set should throw error" { -# r set key1 x -# assert_error "WRONGTYPE*" {r sunion key1 noset} -# } - - test "SINTER should handle non existing key as empty" { - r del set1 set2 set3 - r sadd set1 a b c - r sadd set2 b c d - r sinter set1 set2 set3 - } {} - - test "SINTER with same integer elements but different encoding" { - r del set1 set2 - r sadd set1 1 2 3 - r sadd set2 1 2 3 a - r srem set2 a -# assert_encoding intset set1 -# assert_encoding hashtable set2 - lsort [r sinter set1 set2] - } {1 2 3} - - test "SINTERSTORE against non existing keys should delete dstkey" { - r set setres xxx - assert_equal 0 [r sinterstore setres foo111 bar222] -# assert_equal 0 [r exists setres] - } - - test "SUNIONSTORE against non existing keys should delete dstkey" { - r set setres xxx - assert_equal 0 [r sunionstore setres foo111 bar222] -# assert_equal 0 [r exists setres] - } - - foreach {type contents} {hashtable {a b c} intset {1 2 3}} { - test "SPOP basics - $type" { - create_set myset $contents -# assert_encoding $type myset - assert_equal $contents [lsort [list [r spop myset] [r spop myset] [r spop myset]]] - assert_equal 0 [r scard myset] - } - - test "SRANDMEMBER - $type" { - create_set myset $contents - unset -nocomplain myset - array set myset {} - for {set i 0} {$i < 100} {incr i} { - set myset([r srandmember myset]) 1 - } - assert_equal $contents [lsort [array names myset]] - } - } - - test "SRANDMEMBER with against non existing key" { - r srandmember nonexisting_key 100 - } {} - - foreach {type contents} { - hashtable { - 1 5 10 50 125 50000 33959417 4775547 65434162 - 12098459 427716 483706 2726473884 72615637475 - MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA - SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN - SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH - KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA - BRENDA AMY ANNA REBECCA VIRGINIA KATHLEEN - } - intset { - 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 33 34 35 36 37 38 39 - 40 41 42 43 44 45 46 47 48 49 - } - } { - test "SRANDMEMBER with - $type" { - create_set myset $contents - unset -nocomplain myset - array set myset {} - foreach ele [r smembers myset] { - set myset($ele) 1 - } - assert_equal [lsort $contents] [lsort [array names myset]] - - # Make sure that a count of 0 is handled correctly. - assert_equal [r srandmember myset 0] {} - - # We'll stress different parts of the code, see the implementation - # of SRANDMEMBER for more information, but basically there are - # four different code paths. - # - # PATH 1: Use negative count. - # - # 1) Check that it returns repeated elements. - set res [r srandmember myset -100] - assert_equal [llength $res] 100 - - # 2) Check that all the elements actually belong to the - # original set. - foreach ele $res { - assert {[info exists myset($ele)]} - } - - # 3) Check that eventually all the elements are returned. - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset -10] - foreach ele $res { - set auxset($ele) 1 - } - if {[lsort [array names myset]] eq - [lsort [array names auxset]]} { - break; - } - } - assert {$iterations != 0} - - # PATH 2: positive count (unique behavior) with requested size - # equal or greater than set size. - foreach size {50 100} { - set res [r srandmember myset $size] - assert_equal [llength $res] 50 - assert_equal [lsort $res] [lsort [array names myset]] - } - - # PATH 3: Ask almost as elements as there are in the set. - # In this case the implementation will duplicate the original - # set and will remove random elements up to the requested size. - # - # PATH 4: Ask a number of elements definitely smaller than - # the set size. - # - # We can test both the code paths just changing the size but - # using the same code. - - foreach size {45 5} { - set res [r srandmember myset $size] - assert_equal [llength $res] $size - - # 1) Check that all the elements actually belong to the - # original set. - foreach ele $res { - assert {[info exists myset($ele)]} - } - - # 2) Check that eventually all the elements are returned. - unset -nocomplain auxset - set iterations 1000 - while {$iterations != 0} { - incr iterations -1 - set res [r srandmember myset -10] - foreach ele $res { - set auxset($ele) 1 - } - if {[lsort [array names myset]] eq - [lsort [array names auxset]]} { - break; - } - } - assert {$iterations != 0} - } - } - } - - proc setup_move {} { - r del myset3 myset4 - create_set myset1 {1 a b} - create_set myset2 {2 3 4} -# assert_encoding hashtable myset1 -# assert_encoding intset myset2 - } - - test "SMOVE basics - from regular set to intset" { - # move a non-integer element to an intset should convert encoding - setup_move - assert_equal 1 [r smove myset1 myset2 a] - assert_equal {1 b} [lsort [r smembers myset1]] - assert_equal {2 3 4 a} [lsort [r smembers myset2]] -# assert_encoding hashtable myset2 - - # move an integer element should not convert the encoding - setup_move - assert_equal 1 [r smove myset1 myset2 1] - assert_equal {a b} [lsort [r smembers myset1]] - assert_equal {1 2 3 4} [lsort [r smembers myset2]] -# assert_encoding intset myset2 - } - - test "SMOVE basics - from intset to regular set" { - setup_move - assert_equal 1 [r smove myset2 myset1 2] - assert_equal {1 2 a b} [lsort [r smembers myset1]] - assert_equal {3 4} [lsort [r smembers myset2]] - } - - test "SMOVE non existing key" { - setup_move - assert_equal 0 [r smove myset1 myset2 foo] - assert_equal {1 a b} [lsort [r smembers myset1]] - assert_equal {2 3 4} [lsort [r smembers myset2]] - } - - test "SMOVE non existing src set" { - setup_move - assert_equal 0 [r smove noset myset2 foo] - assert_equal {2 3 4} [lsort [r smembers myset2]] - } - - test "SMOVE from regular set to non existing destination set" { - setup_move - assert_equal 1 [r smove myset1 myset3 a] - assert_equal {1 b} [lsort [r smembers myset1]] - assert_equal {a} [lsort [r smembers myset3]] -# assert_encoding hashtable myset3 - } - - test "SMOVE from intset to non existing destination set" { - setup_move - assert_equal 1 [r smove myset2 myset3 2] - assert_equal {3 4} [lsort [r smembers myset2]] - assert_equal {2} [lsort [r smembers myset3]] -# assert_encoding intset myset3 - } - -# test "SMOVE wrong src key type" { -# r set x 10 -# assert_error "WRONGTYPE*" {r smove x myset2 foo} -# } - -# test "SMOVE wrong dst key type" { -# r set x 10 -# assert_error "WRONGTYPE*" {r smove myset2 x foo} -# } - - test "SMOVE with identical source and destination" { - r del set - r sadd set a b c - r smove set set b - lsort [r smembers set] - } {a b c} - - tags {slow} { - test {intsets implementation stress testing} { - for {set j 0} {$j < 20} {incr j} { - unset -nocomplain s - array set s {} - r del s - set len [randomInt 1024] - for {set i 0} {$i < $len} {incr i} { - randpath { - set data [randomInt 65536] - } { - set data [randomInt 4294967296] - } { - set data [randomInt 18446744073709551616] - } - set s($data) {} - r sadd s $data - } - assert_equal [lsort [r smembers s]] [lsort [array names s]] - set len [array size s] - for {set i 0} {$i < $len} {incr i} { - set e [r spop s] - if {![info exists s($e)]} { - puts "Can't find '$e' on local array" - puts "Local array: [lsort [r smembers s]]" - puts "Remote array: [lsort [array names s]]" - error "exception" - } - array unset s $e - } - assert_equal [r scard s] 0 - assert_equal [array size s] 0 - } - } - } -} diff --git a/tools/pika_migrate/tests/unit/type/zset.tcl b/tools/pika_migrate/tests/unit/type/zset.tcl deleted file mode 100644 index 626156c572..0000000000 --- a/tools/pika_migrate/tests/unit/type/zset.tcl +++ /dev/null @@ -1,944 +0,0 @@ -start_server {tags {"zset"}} { - proc create_zset {key items} { - r del $key - foreach {score entry} $items { - r zadd $key $score $entry - } - } - - proc basics {encoding} { - #if {$encoding == "ziplist"} { - # r config set zset-max-ziplist-entries 128 - # r config set zset-max-ziplist-value 64 - #} elseif {$encoding == "skiplist"} { - # r config set zset-max-ziplist-entries 0 - # r config set zset-max-ziplist-value 0 - #} else { - # puts "Unknown sorted set encoding" - # exit - #} - - test "Check encoding - $encoding" { - r del ztmp - r zadd ztmp 10 x - #assert_encoding $encoding ztmp - } - - test "ZSET basic ZADD and score update - $encoding" { - r del ztmp - r zadd ztmp 10 x - r zadd ztmp 20 y - r zadd ztmp 30 z - assert_equal {x y z} [r zrange ztmp 0 -1] - - r zadd ztmp 1 y - assert_equal {y x z} [r zrange ztmp 0 -1] - } - - test "ZSET element can't be set to NaN with ZADD - $encoding" { - assert_error "*not*float*" {r zadd myzset abcde abc} - } - - test "ZSET element can't be set to NaN with ZINCRBY" { - assert_error "*not*float*" {r zadd myzset abcde abc} - } - - test "ZINCRBY calls leading to NaN result in error" { - r zincrby myzset 999999999 abc - assert_error "*not*float*" {r zincrby myzset abcde abc} - } - - test {ZADD - Variadic version base case} { - r del myzset - list [r zadd myzset 10 a 20 b 30 c] [r zrange myzset 0 -1 withscores] - } {3 {a 10 b 20 c 30}} - - test {ZADD - Return value is the number of actually added items} { - list [r zadd myzset 5 x 20 b 30 c] [r zrange myzset 0 -1 withscores] - } {1 {x 5 a 10 b 20 c 30}} - - test {ZADD - Variadic version does not add nothing on single parsing err} { - r del myzset - catch {r zadd myzset 10 a 20 b 30.badscore c} e - assert_match {*ERR*not*float*} $e - #r exists myzset - } - - test {ZADD - Variadic version will raise error on missing arg} { - r del myzset - catch {r zadd myzset 10 a 20 b 30 c 40} e - assert_match {*ERR*syntax*} $e - } - - test {ZINCRBY does not work variadic even if shares ZADD implementation} { - r del myzset - catch {r zincrby myzset 10 a 20 b 30 c} e - assert_match {*ERR*wrong*number*arg*} $e - } - - test "ZCARD basics - $encoding" { - assert_equal 3 [r zcard ztmp] - assert_equal 0 [r zcard zdoesntexist] - } - - test "ZREM removes key after last element is removed" { - r del ztmp - r zadd ztmp 10 x - r zadd ztmp 20 y - - #assert_equal 1 [r exists ztmp] - assert_equal 0 [r zrem ztmp z] - assert_equal 1 [r zrem ztmp y] - assert_equal 1 [r zrem ztmp x] - #assert_equal 0 [r exists ztmp] - } - - test "ZREM variadic version" { - r del ztmp - r zadd ztmp 10 a 20 b 30 c - assert_equal 2 [r zrem ztmp x y a b k] - assert_equal 0 [r zrem ztmp foo bar] - assert_equal 1 [r zrem ztmp c] - #assert_equal 0 [r exists ztmp] - } - - test "ZREM variadic version -- remove elements after key deletion" { - r del ztmp - r zadd ztmp 10 a 20 b 30 c - r zrem ztmp a b c d e f g - } {3} - - test "ZRANGE basics - $encoding" { - r del ztmp - r zadd ztmp 1 a - r zadd ztmp 2 b - r zadd ztmp 3 c - r zadd ztmp 4 d - - assert_equal {a b c d} [r zrange ztmp 0 -1] - assert_equal {a b c} [r zrange ztmp 0 -2] - assert_equal {b c d} [r zrange ztmp 1 -1] - assert_equal {b c} [r zrange ztmp 1 -2] - assert_equal {c d} [r zrange ztmp -2 -1] - assert_equal {c} [r zrange ztmp -2 -2] - - # out of range start index - assert_equal {a b c} [r zrange ztmp -5 2] - assert_equal {a b} [r zrange ztmp -5 1] - assert_equal {} [r zrange ztmp 5 -1] - assert_equal {} [r zrange ztmp 5 -2] - - # out of range end index - assert_equal {a b c d} [r zrange ztmp 0 5] - assert_equal {b c d} [r zrange ztmp 1 5] - assert_equal {} [r zrange ztmp 0 -5] - assert_equal {} [r zrange ztmp 1 -5] - - # withscores - assert_equal {a 1 b 2 c 3 d 4} [r zrange ztmp 0 -1 withscores] - } - - test "ZREVRANGE basics - $encoding" { - r del ztmp - r zadd ztmp 1 a - r zadd ztmp 2 b - r zadd ztmp 3 c - r zadd ztmp 4 d - - assert_equal {d c b a} [r zrevrange ztmp 0 -1] - assert_equal {d c b} [r zrevrange ztmp 0 -2] - assert_equal {c b a} [r zrevrange ztmp 1 -1] - assert_equal {c b} [r zrevrange ztmp 1 -2] - assert_equal {b a} [r zrevrange ztmp -2 -1] - assert_equal {b} [r zrevrange ztmp -2 -2] - - # out of range start index - assert_equal {d c b} [r zrevrange ztmp -5 2] - assert_equal {d c} [r zrevrange ztmp -5 1] - assert_equal {} [r zrevrange ztmp 5 -1] - assert_equal {} [r zrevrange ztmp 5 -2] - - # out of range end index - assert_equal {d c b a} [r zrevrange ztmp 0 5] - assert_equal {c b a} [r zrevrange ztmp 1 5] - assert_equal {} [r zrevrange ztmp 0 -5] - assert_equal {} [r zrevrange ztmp 1 -5] - - ## withscores - assert_equal {d 4 c 3 b 2 a 1} [r zrevrange ztmp 0 -1 withscores] - } - - test "ZRANK/ZREVRANK basics - $encoding" { - r del zranktmp - r zadd zranktmp 10 x - r zadd zranktmp 20 y - r zadd zranktmp 30 z - assert_equal 0 [r zrank zranktmp x] - assert_equal 1 [r zrank zranktmp y] - assert_equal 2 [r zrank zranktmp z] - assert_equal "" [r zrank zranktmp foo] - assert_equal 2 [r zrevrank zranktmp x] - assert_equal 1 [r zrevrank zranktmp y] - assert_equal 0 [r zrevrank zranktmp z] - assert_equal "" [r zrevrank zranktmp foo] - } - - test "ZRANK - after deletion - $encoding" { - r zrem zranktmp y - assert_equal 0 [r zrank zranktmp x] - assert_equal 1 [r zrank zranktmp z] - } - - test "ZINCRBY - can create a new sorted set - $encoding" { - r del zset - r zincrby zset 1 foo - assert_equal {foo} [r zrange zset 0 -1] - assert_equal 1 [r zscore zset foo] - } - - test "ZINCRBY - increment and decrement - $encoding" { - r zincrby zset 2 foo - r zincrby zset 1 bar - assert_equal {bar foo} [r zrange zset 0 -1] - - r zincrby zset 10 bar - r zincrby zset -5 foo - r zincrby zset -5 bar - assert_equal {foo bar} [r zrange zset 0 -1] - - assert_equal -2 [r zscore zset foo] - assert_equal 6 [r zscore zset bar] - } - - proc create_default_zset {} { - create_zset zset {-999999999 a 1 b 2 c 3 d 4 e 5 f 999999999 g} - } - - test "ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics" { - create_default_zset - - # inclusive range - assert_equal {a b c} [r zrangebyscore zset -999999999 2] - assert_equal {b c d} [r zrangebyscore zset 0 3] - assert_equal {d e f} [r zrangebyscore zset 3 6] - assert_equal {e f g} [r zrangebyscore zset 4 999999999] - assert_equal {c b a} [r zrevrangebyscore zset 2 -999999999] - assert_equal {d c b} [r zrevrangebyscore zset 3 0] - assert_equal {f e d} [r zrevrangebyscore zset 6 3] - assert_equal {g f e} [r zrevrangebyscore zset 999999999 4] - assert_equal 3 [r zcount zset 0 3] - - # exclusive range - assert_equal {b} [r zrangebyscore zset (-999999999 (2] - assert_equal {b c} [r zrangebyscore zset (0 (3] - assert_equal {e f} [r zrangebyscore zset (3 (6] - assert_equal {f} [r zrangebyscore zset (4 (999999999] - assert_equal {b} [r zrevrangebyscore zset (2 (-999999999] - assert_equal {c b} [r zrevrangebyscore zset (3 (0] - assert_equal {f e} [r zrevrangebyscore zset (6 (3] - assert_equal {f} [r zrevrangebyscore zset (999999999 (4] - assert_equal 2 [r zcount zset (0 (3] - - # test empty ranges - r zrem zset a - r zrem zset g - - # inclusive - assert_equal {} [r zrangebyscore zset 4 2] - assert_equal {} [r zrangebyscore zset 6 999999999] - assert_equal {} [r zrangebyscore zset -999999999 -6] - assert_equal {} [r zrevrangebyscore zset 999999999 6] - assert_equal {} [r zrevrangebyscore zset -6 -999999999] - - # exclusive - assert_equal {} [r zrangebyscore zset (4 (2] - assert_equal {} [r zrangebyscore zset 2 (2] - assert_equal {} [r zrangebyscore zset (2 2] - assert_equal {} [r zrangebyscore zset (6 (999999999] - assert_equal {} [r zrangebyscore zset (-999999999 (-6] - assert_equal {} [r zrevrangebyscore zset (999999999 (6] - assert_equal {} [r zrevrangebyscore zset (-6 (-999999999] - - # empty inner range - assert_equal {} [r zrangebyscore zset 2.4 2.6] - assert_equal {} [r zrangebyscore zset (2.4 2.6] - assert_equal {} [r zrangebyscore zset 2.4 (2.6] - assert_equal {} [r zrangebyscore zset (2.4 (2.6] - } - - test "ZRANGEBYSCORE with WITHSCORES" { - create_default_zset - assert_equal {b 1 c 2 d 3} [r zrangebyscore zset 0 3 withscores] - assert_equal {d 3 c 2 b 1} [r zrevrangebyscore zset 3 0 withscores] - } - - test "ZRANGEBYSCORE with LIMIT" { - create_default_zset - assert_equal {b c} [r zrangebyscore zset 0 10 LIMIT 0 2] - assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 3] - assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 10] - assert_equal {} [r zrangebyscore zset 0 10 LIMIT 20 10] - assert_equal {f e} [r zrevrangebyscore zset 10 0 LIMIT 0 2] - assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 3] - assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 10] - assert_equal {} [r zrevrangebyscore zset 10 0 LIMIT 20 10] - } - - test "ZRANGEBYSCORE with LIMIT and WITHSCORES" { - create_default_zset - assert_equal {e 4 f 5} [r zrangebyscore zset 2 5 LIMIT 2 3 WITHSCORES] - assert_equal {d 3 c 2} [r zrevrangebyscore zset 5 2 LIMIT 2 3 WITHSCORES] - } - - test "ZRANGEBYSCORE with non-value min or max" { - assert_error "*not*float*" {r zrangebyscore fooz str 1} - assert_error "*not*float*" {r zrangebyscore fooz 1 str} - assert_error "*not*float*" {r zrangebyscore fooz 1 abcde} - } - - proc create_default_lex_zset {} { - create_zset zset {0 alpha 0 bar 0 cool 0 down - 0 elephant 0 foo 0 great 0 hill - 0 omega} - } - - test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZCOUNT basics" { - create_default_lex_zset - - # inclusive range - assert_equal {alpha bar cool} [r zrangebylex zset - \[cool] - assert_equal {bar cool down} [r zrangebylex zset \[bar \[down] - assert_equal {great hill omega} [r zrangebylex zset \[g +] - assert_equal {cool bar alpha} [r zrevrangebylex zset \[cool -] - assert_equal {down cool bar} [r zrevrangebylex zset \[down \[bar] - assert_equal {omega hill great foo elephant down} [r zrevrangebylex zset + \[d] - assert_equal 3 [r zlexcount zset \[ele \[h] - - # exclusive range - assert_equal {alpha bar} [r zrangebylex zset - (cool] - assert_equal {cool} [r zrangebylex zset (bar (down] - assert_equal {hill omega} [r zrangebylex zset (great +] - assert_equal {bar alpha} [r zrevrangebylex zset (cool -] - assert_equal {cool} [r zrevrangebylex zset (down (bar] - assert_equal {omega hill} [r zrevrangebylex zset + (great] - assert_equal 2 [r zlexcount zset (ele (great] - - # inclusive and exclusive - assert_equal {} [r zrangebylex zset (az (b] - assert_equal {} [r zrangebylex zset (z +] - assert_equal {} [r zrangebylex zset - \[aaaa] - assert_equal {} [r zrevrangebylex zset \[elez \[elex] - assert_equal {} [r zrevrangebylex zset (hill (omega] - } - - test "ZRANGEBYSLEX with LIMIT" { - create_default_lex_zset - assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2] - assert_equal {bar cool} [r zrangebylex zset - \[cool LIMIT 1 2] - assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 0 0] - assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 2 0] - assert_equal {bar} [r zrangebylex zset \[bar \[down LIMIT 0 1] - assert_equal {cool} [r zrangebylex zset \[bar \[down LIMIT 1 1] - assert_equal {bar cool down} [r zrangebylex zset \[bar \[down LIMIT 0 100] - assert_equal {omega hill great foo elephant} [r zrevrangebylex zset + \[d LIMIT 0 5] - assert_equal {omega hill great foo} [r zrevrangebylex zset + \[d LIMIT 0 4] - } - - test "ZRANGEBYLEX with invalid lex range specifiers" { - assert_error "*not*string*" {r zrangebylex fooz foo bar} - assert_error "*not*string*" {r zrangebylex fooz \[foo bar} - assert_error "*not*string*" {r zrangebylex fooz foo \[bar} - assert_error "*not*string*" {r zrangebylex fooz +x \[bar} - assert_error "*not*string*" {r zrangebylex fooz -x \[bar} - } - - test "ZREMRANGEBYSCORE basics" { - proc remrangebyscore {min max} { - create_zset zset {1 a 2 b 3 c 4 d 5 e} - #assert_equal 1 [r exists zset] - r zremrangebyscore zset $min $max - } - - # inner range - assert_equal 3 [remrangebyscore 2 4] - assert_equal {a e} [r zrange zset 0 -1] - - # start underflow - assert_equal 1 [remrangebyscore -10 1] - assert_equal {b c d e} [r zrange zset 0 -1] - - # end overflow - assert_equal 1 [remrangebyscore 5 10] - assert_equal {a b c d} [r zrange zset 0 -1] - - # switch min and max - assert_equal 0 [remrangebyscore 4 2] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # -999999999 to mid - assert_equal 3 [remrangebyscore -999999999 3] - assert_equal {d e} [r zrange zset 0 -1] - - # mid to 999999999 - assert_equal 3 [remrangebyscore 3 999999999] - assert_equal {a b} [r zrange zset 0 -1] - - # -999999999 to 999999999 - assert_equal 5 [remrangebyscore -999999999 999999999] - assert_equal {} [r zrange zset 0 -1] - - # exclusive min - assert_equal 4 [remrangebyscore (1 5] - assert_equal {a} [r zrange zset 0 -1] - assert_equal 3 [remrangebyscore (2 5] - assert_equal {a b} [r zrange zset 0 -1] - - # exclusive max - assert_equal 4 [remrangebyscore 1 (5] - assert_equal {e} [r zrange zset 0 -1] - assert_equal 3 [remrangebyscore 1 (4] - assert_equal {d e} [r zrange zset 0 -1] - - # exclusive min and max - assert_equal 3 [remrangebyscore (1 (5] - assert_equal {a e} [r zrange zset 0 -1] - - # destroy when empty - assert_equal 5 [remrangebyscore 1 5] - # assert_equal 0 [r exists zset] - } - - test "ZREMRANGEBYSCORE with non-value min or max" { - assert_error "*not*float*" {r zremrangebyscore fooz str 1} - assert_error "*not*float*" {r zremrangebyscore fooz 1 str} - assert_error "*not*float*" {r zremrangebyscore fooz 1 abcde} - } - - test "ZREMRANGEBYRANK basics" { - proc remrangebyrank {min max} { - create_zset zset {1 a 2 b 3 c 4 d 5 e} - #assert_equal 1 [r exists zset] - r zremrangebyrank zset $min $max - } - - # inner range - assert_equal 3 [remrangebyrank 1 3] - assert_equal {a e} [r zrange zset 0 -1] - - # start underflow - assert_equal 1 [remrangebyrank -10 0] - assert_equal {b c d e} [r zrange zset 0 -1] - - # start overflow - assert_equal 0 [remrangebyrank 10 -1] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # end underflow - assert_equal 0 [remrangebyrank 0 -10] - assert_equal {a b c d e} [r zrange zset 0 -1] - - # end overflow - assert_equal 5 [remrangebyrank 0 10] - assert_equal {} [r zrange zset 0 -1] - - # destroy when empty - assert_equal 5 [remrangebyrank 0 4] - #assert_equal 0 [r exists zset] - } - - test "ZUNIONSTORE against non-existing key doesn't set destination - $encoding" { - r del zseta - assert_equal 0 [r zunionstore dst_key 1 zseta] - #assert_equal 0 [r exists dst_key] - } - - test "ZUNIONSTORE with empty set - $encoding" { - r del zseta zsetb - r zadd zseta 1 a - r zadd zseta 2 b - r zunionstore zsetc 2 zseta zsetb - r zrange zsetc 0 -1 withscores - } {a 1 b 2} - - test "ZUNIONSTORE basics - $encoding" { - r del zseta zsetb zsetc - r zadd zseta 1 a - r zadd zseta 2 b - r zadd zseta 3 c - r zadd zsetb 1 b - r zadd zsetb 2 c - r zadd zsetb 3 d - - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb] - assert_equal {a 1 b 3 d 3 c 5} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with weights - $encoding" { - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb weights 2 3] - assert_equal {a 2 b 7 d 9 c 12} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with a regular set and weights - $encoding" { - r del seta - r sadd seta a - r sadd seta b - r sadd seta c - - # assert_equal 4 [r zunionstore zsetc 2 seta zsetb weights 2 3] - # assert_equal {a 2 b 5 c 8 d 9} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with AGGREGATE MIN - $encoding" { - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb aggregate min] - assert_equal {a 1 b 1 c 2 d 3} [r zrange zsetc 0 -1 withscores] - } - - test "ZUNIONSTORE with AGGREGATE MAX - $encoding" { - assert_equal 4 [r zunionstore zsetc 2 zseta zsetb aggregate max] - assert_equal {a 1 b 2 c 3 d 3} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE basics - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb] - assert_equal {b 3 c 5} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with weights - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb weights 2 3] - assert_equal {b 7 c 12} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with a regular set and weights - $encoding" { - r del seta - r sadd seta a - r sadd seta b - r sadd seta c - # assert_equal 2 [r zinterstore zsetc 2 seta zsetb weights 2 3] - # assert_equal {b 5 c 8} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with AGGREGATE MIN - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb aggregate min] - assert_equal {b 1 c 2} [r zrange zsetc 0 -1 withscores] - } - - test "ZINTERSTORE with AGGREGATE MAX - $encoding" { - assert_equal 2 [r zinterstore zsetc 2 zseta zsetb aggregate max] - assert_equal {b 2 c 3} [r zrange zsetc 0 -1 withscores] - } - - foreach cmd {ZUNIONSTORE ZINTERSTORE} { - # test "$cmd with 999999999/-999999999 scores - $encoding" { - # r del zsetinf1 zsetinf2 - - # r zadd zsetinf1 999999999 key - # r zadd zsetinf2 999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal 999999999 [r zscore zsetinf3 key] - - # r zadd zsetinf1 -999999999 key - # r zadd zsetinf2 999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal 0 [r zscore zsetinf3 key] - - # r zadd zsetinf1 999999999 key - # r zadd zsetinf2 -999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal 0 [r zscore zsetinf3 key] - - # r zadd zsetinf1 -999999999 key - # r zadd zsetinf2 -999999999 key - # r $cmd zsetinf3 2 zsetinf1 zsetinf2 - # assert_equal -999999999 [r zscore zsetinf3 key] - # } - - test "$cmd with NaN weights $encoding" { - r del zsetinf1 zsetinf2 - - r zadd zsetinf1 1.0 key - r zadd zsetinf2 1.0 key - assert_error "*weight*not*float*" { - r $cmd zsetinf3 2 zsetinf1 zsetinf2 weights abcde abcde - } - } - } - } - - basics ziplist - basics skiplist - - test {ZINTERSTORE regression with two sets, intset+hashtable} { - r del seta setb setc - r sadd set1 a - r sadd set2 10 - r zinterstore set3 2 set1 set2 - } {0} - - test {ZUNIONSTORE regression, should not create NaN in scores} { - r zadd z -999999999 neginf - r zunionstore out 1 z weights 0 - r zrange out 0 -1 withscores - } {neginf 0} - - # test {ZINTERSTORE #516 regression, mixed sets and ziplist zsets} { - # r sadd one 100 101 102 103 - # r sadd two 100 200 201 202 - # r zadd three 1 500 1 501 1 502 1 503 1 100 - # r zinterstore to_here 3 one two three WEIGHTS 0 0 1 - # r zrange to_here 0 -1 - # } {100} - - test {ZUNIONSTORE result is sorted} { - # Create two sets with common and not common elements, perform - # the UNION, check that elements are still sorted. - r del one two dest - set cmd1 [list r zadd one] - set cmd2 [list r zadd two] - for {set j 0} {$j < 1000} {incr j} { - lappend cmd1 [expr rand()] [randomInt 1000] - lappend cmd2 [expr rand()] [randomInt 1000] - } - {*}$cmd1 - {*}$cmd2 - assert {[r zcard one] > 100} - assert {[r zcard two] > 100} - r zunionstore dest 2 one two - set oldscore 0 - foreach {ele score} [r zrange dest 0 -1 withscores] { - assert {$score >= $oldscore} - set oldscore $score - } - } - - proc stressers {encoding} { - if {$encoding == "ziplist"} { - # Little extra to allow proper fuzzing in the sorting stresser - #r config set zset-max-ziplist-entries 256 - #r config set zset-max-ziplist-value 64 - set elements 128 - } elseif {$encoding == "skiplist"} { - #r config set zset-max-ziplist-entries 0 - #r config set zset-max-ziplist-value 0 - if {$::accurate} {set elements 1000} else {set elements 100} - } else { - puts "Unknown sorted set encoding" - exit - } - - test "ZSCORE - $encoding" { - r del zscoretest - set aux {} - for {set i 0} {$i < $elements} {incr i} { - set score [expr rand()] - lappend aux $score - r zadd zscoretest $score $i - } - - #assert_encoding $encoding zscoretest - for {set i 0} {$i < $elements} {incr i} { - assert_equal [lindex $aux $i] [r zscore zscoretest $i] - } - } - - test "ZSCORE after a DEBUG RELOAD - $encoding" { - r del zscoretest - set aux {} - for {set i 0} {$i < $elements} {incr i} { - set score [expr rand()] - lappend aux $score - r zadd zscoretest $score $i - } - - #r debug reload - #assert_encoding $encoding zscoretest - for {set i 0} {$i < $elements} {incr i} { - assert_equal [lindex $aux $i] [r zscore zscoretest $i] - } - } - - test "ZSET sorting stresser - $encoding" { - set delta 0 - for {set test 0} {$test < 2} {incr test} { - unset -nocomplain auxarray - array set auxarray {} - set auxlist {} - r del myzset - for {set i 0} {$i < $elements} {incr i} { - if {$test == 0} { - set score [expr rand()] - } else { - set score [expr int(rand()*10)] - } - set auxarray($i) $score - r zadd myzset $score $i - # Random update - if {[expr rand()] < .2} { - set j [expr int(rand()*1000)] - if {$test == 0} { - set score [expr rand()] - } else { - set score [expr int(rand()*10)] - } - set auxarray($j) $score - r zadd myzset $score $j - } - } - foreach {item score} [array get auxarray] { - lappend auxlist [list $score $item] - } - set sorted [lsort -command zlistAlikeSort $auxlist] - set auxlist {} - foreach x $sorted { - lappend auxlist [lindex $x 1] - } - - #assert_encoding $encoding myzset - set fromredis [r zrange myzset 0 -1] - set delta 0 - for {set i 0} {$i < [llength $fromredis]} {incr i} { - if {[lindex $fromredis $i] != [lindex $auxlist $i]} { - incr delta - } - } - } - assert_equal 0 $delta - } - - test "ZRANGEBYSCORE fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set err {} - r del zset - for {set i 0} {$i < $elements} {incr i} { - r zadd zset [expr rand()] $i - } - - #assert_encoding $encoding zset - for {set i 0} {$i < 100} {incr i} { - set min [expr rand()] - set max [expr rand()] - if {$min > $max} { - set aux $min - set min $max - set max $aux - } - set low [r zrangebyscore zset -999999999 $min] - set ok [r zrangebyscore zset $min $max] - set high [r zrangebyscore zset $max 999999999] - set lowx [r zrangebyscore zset -999999999 ($min] - set okx [r zrangebyscore zset ($min ($max] - set highx [r zrangebyscore zset ($max 999999999] - - if {[r zcount zset -999999999 $min] != [llength $low]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset $min $max] != [llength $ok]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset $max 999999999] != [llength $high]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset -999999999 ($min] != [llength $lowx]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset ($min ($max] != [llength $okx]} { - append err "Error, len does not match zcount\n" - } - if {[r zcount zset ($max 999999999] != [llength $highx]} { - append err "Error, len does not match zcount\n" - } - - foreach x $low { - set score [r zscore zset $x] - if {$score > $min} { - append err "Error, score for $x is $score > $min\n" - } - } - foreach x $lowx { - set score [r zscore zset $x] - if {$score >= $min} { - append err "Error, score for $x is $score >= $min\n" - } - } - foreach x $ok { - set score [r zscore zset $x] - if {$score < $min || $score > $max} { - append err "Error, score for $x is $score outside $min-$max range\n" - } - } - foreach x $okx { - set score [r zscore zset $x] - if {$score <= $min || $score >= $max} { - append err "Error, score for $x is $score outside $min-$max open range\n" - } - } - foreach x $high { - set score [r zscore zset $x] - if {$score < $max} { - append err "Error, score for $x is $score < $max\n" - } - } - foreach x $highx { - set score [r zscore zset $x] - if {$score <= $max} { - append err "Error, score for $x is $score <= $max\n" - } - } - } - assert_equal {} $err - } - - test "ZRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set lexset {} - r del zset - for {set j 0} {$j < $elements} {incr j} { - set e [randstring 1 30 alpha] - lappend lexset $e - r zadd zset 0 $e - } - set lexset [lsort -unique $lexset] - for {set j 0} {$j < 100} {incr j} { - set min [randstring 1 30 alpha] - set max [randstring 1 30 alpha] - set mininc [randomInt 2] - set maxinc [randomInt 2] - if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} - if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} - set rev [randomInt 2] - if {$rev} { - break - set cmd zrevrangebylex - } else { - set cmd zrangebylex - } - - # Make sure data is the same in both sides - assert {[r zrange zset 0 -1] eq $lexset} - - # Get the Redis output - set output [r $cmd zset $cmin $cmax] - if {$rev} { - set outlen [r zlexcount zset $cmax $cmin] - } else { - set outlen [r zlexcount zset $cmin $cmax] - } - - # Compute the same output via Tcl - set o {} - set copy $lexset - if {(!$rev && [string compare $min $max] > 0) || - ($rev && [string compare $max $min] > 0)} { - # Empty output when ranges are inverted. - } else { - if {$rev} { - # Invert the Tcl array using Redis itself. - set copy [r zrevrange zset 0 -1] - # Invert min / max as well - lassign [list $min $max $mininc $maxinc] \ - max min maxinc mininc - } - foreach e $copy { - set mincmp [string compare $e $min] - set maxcmp [string compare $e $max] - if { - ($mininc && $mincmp >= 0 || !$mininc && $mincmp > 0) - && - ($maxinc && $maxcmp <= 0 || !$maxinc && $maxcmp < 0) - } { - lappend o $e - } - } - } - assert {$o eq $output} - assert {$outlen eq [llength $output]} - } - } - - test "ZREMRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" { - set lexset {} - r del zset zsetcopy - for {set j 0} {$j < $elements} {incr j} { - set e [randstring 1 30 alpha] - lappend lexset $e - r zadd zset 0 $e - } - set lexset [lsort -unique $lexset] - for {set j 0} {$j < 100} {incr j} { - # Copy... - r zunionstore zsetcopy 1 zset - set lexsetcopy $lexset - - set min [randstring 1 30 alpha] - set max [randstring 1 30 alpha] - set mininc [randomInt 2] - set maxinc [randomInt 2] - if {$mininc} {set cmin "\[$min"} else {set cmin "($min"} - if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"} - - # Make sure data is the same in both sides - assert {[r zrange zset 0 -1] eq $lexset} - - # Get the range we are going to remove - set torem [r zrangebylex zset $cmin $cmax] - set toremlen [r zlexcount zset $cmin $cmax] - r zremrangebylex zsetcopy $cmin $cmax - set output [r zrange zsetcopy 0 -1] - # Remove the range with Tcl from the original list - if {$toremlen} { - set first [lsearch -exact $lexsetcopy [lindex $torem 0]] - set last [expr {$first+$toremlen-1}] - set lexsetcopy [lreplace $lexsetcopy $first $last] - } - assert {$lexsetcopy eq $output} - } - } - - test "ZSETs skiplist implementation backlink consistency test - $encoding" { - set diff 0 - for {set j 0} {$j < $elements} {incr j} { - r zadd myzset [expr rand()] "Element-$j" - r zrem myzset "Element-[expr int(rand()*$elements)]" - } - - #assert_encoding $encoding myzset - set l1 [r zrange myzset 0 -1] - set l2 [r zrevrange myzset 0 -1] - for {set j 0} {$j < [llength $l1]} {incr j} { - if {[lindex $l1 $j] ne [lindex $l2 end-$j]} { - incr diff - } - } - assert_equal 0 $diff - } - - test "ZSETs ZRANK augmented skip list stress testing - $encoding" { - set err {} - r del myzset - for {set k 0} {$k < 2000} {incr k} { - set i [expr {$k % $elements}] - if {[expr rand()] < .2} { - r zrem myzset $i - } else { - set score [expr rand()] - r zadd myzset $score $i - #assert_encoding $encoding myzset - } - - set card [r zcard myzset] - if {$card > 0} { - set index [randomInt $card] - set ele [lindex [r zrange myzset $index $index] 0] - set rank [r zrank myzset $ele] - if {$rank != $index} { - set err "$ele RANK is wrong! ($rank != $index)" - break - } - } - } - assert_equal {} $err - } - } - - tags {"slow"} { - stressers ziplist - stressers skiplist - } -} From 801b8bc1c082fbdcbf045235cdae1f2bc950afc5 Mon Sep 17 00:00:00 2001 From: wuyun Date: Sun, 29 Dec 2024 08:45:55 +0000 Subject: [PATCH 2/4] add source code from pika/unstable-7c155a59008e584410da7f301c1a3d71e715d6d6 --- tools/pika_migrate/CMakeLists.txt | 203 + tools/pika_migrate/conf/pika.conf | 694 + tools/pika_migrate/include/acl.h | 435 + tools/pika_migrate/include/build_version.h | 15 + tools/pika_migrate/include/pika_acl.h | 48 + tools/pika_migrate/include/pika_admin.h | 750 ++ .../include/pika_auxiliary_thread.h | 24 + tools/pika_migrate/include/pika_binlog.h | 113 + .../pika_migrate/include/pika_binlog_reader.h | 48 + .../include/pika_binlog_transverter.h | 77 + tools/pika_migrate/include/pika_bit.h | 182 + tools/pika_migrate/include/pika_cache.h | 226 + .../include/pika_cache_load_thread.h | 55 + tools/pika_migrate/include/pika_client_conn.h | 150 + .../include/pika_client_processor.h | 28 + .../include/pika_cmd_table_manager.h | 64 + tools/pika_migrate/include/pika_command.h | 641 + tools/pika_migrate/include/pika_conf.h | 1122 ++ tools/pika_migrate/include/pika_consensus.h | 203 + .../include/pika_data_distribution.h | 28 + tools/pika_migrate/include/pika_db.h | 206 + tools/pika_migrate/include/pika_define.h | 412 + .../include/pika_dispatch_thread.h | 56 + tools/pika_migrate/include/pika_geo.h | 184 + tools/pika_migrate/include/pika_geohash.h | 101 + .../include/pika_geohash_helper.h | 56 + tools/pika_migrate/include/pika_hash.h | 445 + tools/pika_migrate/include/pika_hyperloglog.h | 75 + tools/pika_migrate/include/pika_instant.h | 39 + tools/pika_migrate/include/pika_kv.h | 879 ++ tools/pika_migrate/include/pika_list.h | 429 + .../include/pika_migrate_thread.h | 118 + .../include/pika_monitor_thread.h | 47 + .../include/pika_monotonic_time.h | 20 + tools/pika_migrate/include/pika_pubsub.h | 107 + .../pika_migrate/include/pika_repl_bgworker.h | 52 + tools/pika_migrate/include/pika_repl_client.h | 117 + .../include/pika_repl_client_conn.h | 39 + .../include/pika_repl_client_thread.h | 49 + tools/pika_migrate/include/pika_repl_server.h | 51 + .../include/pika_repl_server_conn.h | 42 + .../include/pika_repl_server_thread.h | 46 + tools/pika_migrate/include/pika_rm.h | 228 + .../pika_migrate/include/pika_rsync_service.h | 27 + tools/pika_migrate/include/pika_server.h | 662 + tools/pika_migrate/include/pika_set.h | 371 + tools/pika_migrate/include/pika_slave_node.h | 82 + .../include/pika_slaveping_thread.h | 41 + .../pika_migrate/include/pika_slot_command.h | 273 + tools/pika_migrate/include/pika_stable_log.h | 63 + tools/pika_migrate/include/pika_statistic.h | 67 + tools/pika_migrate/include/pika_stream.h | 163 + tools/pika_migrate/include/pika_transaction.h | 107 + tools/pika_migrate/include/pika_version.h | 13 + tools/pika_migrate/include/pika_zset.h | 634 + tools/pika_migrate/include/rsync_client.h | 247 + .../include/rsync_client_thread.h | 55 + tools/pika_migrate/include/rsync_server.h | 187 + tools/pika_migrate/include/throttle.h | 45 + tools/pika_migrate/pika-migrate.md | 43 + tools/pika_migrate/protogen.cmake | 41 + tools/pika_migrate/src/acl.cc | 1418 ++ tools/pika_migrate/src/build_version.cc.in | 8 + tools/pika_migrate/src/cache/CMakeLists.txt | 20 + tools/pika_migrate/src/cache/include/cache.h | 179 + tools/pika_migrate/src/cache/include/config.h | 72 + tools/pika_migrate/src/cache/src/bit.cc | 115 + tools/pika_migrate/src/cache/src/cache.cc | 272 + tools/pika_migrate/src/cache/src/hash.cc | 312 + tools/pika_migrate/src/cache/src/list.cc | 293 + tools/pika_migrate/src/cache/src/set.cc | 138 + tools/pika_migrate/src/cache/src/string.cc | 295 + tools/pika_migrate/src/cache/src/zset.cc | 409 + tools/pika_migrate/src/net/CMakeLists.txt | 35 + .../src/net/examples/CMakeLists.txt | 38 + tools/pika_migrate/src/net/examples/README.md | 11 + .../src/net/examples/bg_thread.cc | 102 + .../src/net/examples/binlog_parser_test.cc | 67 + .../src/net/examples/http_server.cc | 113 + .../src/net/examples/https_server.cc | 121 + .../src/net/examples/mydispatch_srv.cc | 92 + .../src/net/examples/myholy_srv.cc | 97 + .../src/net/examples/myholy_srv_chandle.cc | 122 + .../src/net/examples/myproto.proto | 11 + .../src/net/examples/myproto_cli.cc | 53 + .../src/net/examples/myredis_cli.cc | 117 + .../src/net/examples/myredis_srv.cc | 114 + .../net/examples/performance/CMakeLists.txt | 46 + .../src/net/examples/performance/README.md | 13 + .../src/net/examples/performance/client.cc | 48 + .../net/examples/performance/message.proto | 7 + .../src/net/examples/performance/server.cc | 103 + .../src/net/examples/redis_cli_test.cc | 123 + .../src/net/examples/redis_parser_test.cc | 108 + .../src/net/examples/simple_http_server.cc | 93 + .../src/net/examples/thread_pool_test.cc | 97 + .../src/net/include/backend_thread.h | 161 + .../pika_migrate/src/net/include/bg_thread.h | 81 + .../src/net/include/build_version.h | 20 + .../src/net/include/client_thread.h | 162 + .../pika_migrate/src/net/include/http_conn.h | 204 + tools/pika_migrate/src/net/include/net_cli.h | 62 + tools/pika_migrate/src/net/include/net_conn.h | 132 + .../pika_migrate/src/net/include/net_define.h | 120 + .../src/net/include/net_interfaces.h | 14 + .../pika_migrate/src/net/include/net_pubsub.h | 129 + .../pika_migrate/src/net/include/net_stats.h | 36 + .../pika_migrate/src/net/include/net_thread.h | 57 + tools/pika_migrate/src/net/include/pb_conn.h | 90 + .../src/net/include/period_thread.h | 26 + .../pika_migrate/src/net/include/redis_cli.h | 27 + .../pika_migrate/src/net/include/redis_conn.h | 67 + .../src/net/include/redis_parser.h | 97 + .../src/net/include/server_thread.h | 242 + .../src/net/include/simple_http_conn.h | 106 + .../src/net/include/thread_pool.h | 89 + .../src/net/src/backend_thread.cc | 470 + tools/pika_migrate/src/net/src/bg_thread.cc | 133 + .../src/net/src/build_version.cc.in | 9 + .../pika_migrate/src/net/src/client_thread.cc | 482 + .../src/net/src/dispatch_thread.cc | 349 + .../src/net/src/dispatch_thread.h | 168 + tools/pika_migrate/src/net/src/holy_thread.cc | 325 + tools/pika_migrate/src/net/src/holy_thread.h | 81 + tools/pika_migrate/src/net/src/http_conn.cc | 620 + tools/pika_migrate/src/net/src/net_cli.cc | 307 + tools/pika_migrate/src/net/src/net_conn.cc | 66 + tools/pika_migrate/src/net/src/net_epoll.cc | 104 + tools/pika_migrate/src/net/src/net_epoll.h | 32 + .../src/net/src/net_interfaces.cc | 154 + tools/pika_migrate/src/net/src/net_item.h | 37 + tools/pika_migrate/src/net/src/net_kqueue.cc | 117 + tools/pika_migrate/src/net/src/net_kqueue.h | 32 + .../src/net/src/net_multiplexer.cc | 75 + .../src/net/src/net_multiplexer.h | 68 + tools/pika_migrate/src/net/src/net_pubsub.cc | 617 + tools/pika_migrate/src/net/src/net_stats.cc | 46 + tools/pika_migrate/src/net/src/net_thread.cc | 54 + .../src/net/src/net_thread_name.h | 34 + tools/pika_migrate/src/net/src/net_util.cc | 141 + tools/pika_migrate/src/net/src/net_util.h | 100 + tools/pika_migrate/src/net/src/pb_cli.cc | 91 + tools/pika_migrate/src/net/src/pb_conn.cc | 208 + .../pika_migrate/src/net/src/period_thread.cc | 20 + tools/pika_migrate/src/net/src/redis_cli.cc | 641 + tools/pika_migrate/src/net/src/redis_conn.cc | 214 + .../pika_migrate/src/net/src/redis_parser.cc | 407 + .../pika_migrate/src/net/src/server_socket.cc | 79 + .../pika_migrate/src/net/src/server_socket.h | 78 + .../pika_migrate/src/net/src/server_thread.cc | 352 + .../src/net/src/simple_http_conn.cc | 454 + tools/pika_migrate/src/net/src/thread_pool.cc | 167 + .../pika_migrate/src/net/src/worker_thread.cc | 359 + .../pika_migrate/src/net/src/worker_thread.h | 87 + .../pika_migrate/src/net/test/CMakeLists.txt | 36 + .../src/net/test/net_thread_test.cc | 45 + tools/pika_migrate/src/pika.cc | 258 + tools/pika_migrate/src/pika_acl.cc | 328 + tools/pika_migrate/src/pika_admin.cc | 3766 ++++++ .../pika_migrate/src/pika_auxiliary_thread.cc | 52 + tools/pika_migrate/src/pika_binlog.cc | 437 + tools/pika_migrate/src/pika_binlog_reader.cc | 266 + .../src/pika_binlog_transverter.cc | 176 + tools/pika_migrate/src/pika_bit.cc | 355 + tools/pika_migrate/src/pika_cache.cc | 1628 +++ .../src/pika_cache_load_thread.cc | 214 + tools/pika_migrate/src/pika_client_conn.cc | 589 + .../pika_migrate/src/pika_client_processor.cc | 46 + .../src/pika_cmd_table_manager.cc | 110 + tools/pika_migrate/src/pika_command.cc | 1076 ++ tools/pika_migrate/src/pika_command_docs.cc | 10845 ++++++++++++++++ tools/pika_migrate/src/pika_conf.cc | 991 ++ tools/pika_migrate/src/pika_consensus.cc | 783 ++ .../src/pika_data_distribution.cc | 11 + tools/pika_migrate/src/pika_db.cc | 640 + .../pika_migrate/src/pika_dispatch_thread.cc | 85 + tools/pika_migrate/src/pika_geo.cc | 589 + tools/pika_migrate/src/pika_geohash.cc | 287 + tools/pika_migrate/src/pika_geohash_helper.cc | 265 + tools/pika_migrate/src/pika_hash.cc | 892 ++ tools/pika_migrate/src/pika_hyperloglog.cc | 91 + .../pika_migrate/src/pika_inner_message.proto | 166 + tools/pika_migrate/src/pika_instant.cc | 40 + tools/pika_migrate/src/pika_kv.cc | 1968 +++ tools/pika_migrate/src/pika_list.cc | 925 ++ tools/pika_migrate/src/pika_migrate_thread.cc | 979 ++ tools/pika_migrate/src/pika_monotonic_time.cc | 63 + tools/pika_migrate/src/pika_pubsub.cc | 242 + tools/pika_migrate/src/pika_repl_bgworker.cc | 274 + tools/pika_migrate/src/pika_repl_client.cc | 332 + .../pika_migrate/src/pika_repl_client_conn.cc | 282 + .../src/pika_repl_client_thread.cc | 51 + tools/pika_migrate/src/pika_repl_server.cc | 149 + .../pika_migrate/src/pika_repl_server_conn.cc | 464 + .../src/pika_repl_server_thread.cc | 27 + tools/pika_migrate/src/pika_rm.cc | 1056 ++ tools/pika_migrate/src/pika_rsync_service.cc | 105 + tools/pika_migrate/src/pika_server.cc | 1921 +++ tools/pika_migrate/src/pika_set.cc | 729 ++ tools/pika_migrate/src/pika_slave_node.cc | 107 + tools/pika_migrate/src/pika_slot_command.cc | 1530 +++ tools/pika_migrate/src/pika_stable_log.cc | 225 + tools/pika_migrate/src/pika_statistic.cc | 111 + tools/pika_migrate/src/pika_stream.cc | 540 + tools/pika_migrate/src/pika_transaction.cc | 313 + tools/pika_migrate/src/pika_zset.cc | 1544 +++ tools/pika_migrate/src/pstd/CMakeLists.txt | 58 + tools/pika_migrate/src/pstd/doc.h | 6 + .../src/pstd/examples/CMakeLists.txt | 30 + .../src/pstd/examples/conf_example.cc | 22 + .../src/pstd/examples/hash_example.cc | 20 + .../pika_migrate/src/pstd/include/base_conf.h | 82 + tools/pika_migrate/src/pstd/include/env.h | 162 + tools/pika_migrate/src/pstd/include/fmacros.h | 66 + .../pika_migrate/src/pstd/include/lock_mgr.h | 57 + tools/pika_migrate/src/pstd/include/mutex.h | 86 + .../src/pstd/include/mutex_impl.h | 23 + .../src/pstd/include/noncopyable.h | 23 + .../src/pstd/include/pika_codis_slot.h | 22 + tools/pika_migrate/src/pstd/include/posix.h | 158 + .../src/pstd/include/pstd_coding.h | 154 + .../src/pstd/include/pstd_defer.h | 99 + .../src/pstd/include/pstd_define.h | 9 + .../pika_migrate/src/pstd/include/pstd_hash.h | 141 + .../src/pstd/include/pstd_mutex.h | 75 + .../src/pstd/include/pstd_slice.h | 114 + .../src/pstd/include/pstd_status.h | 129 + .../src/pstd/include/pstd_string.h | 69 + tools/pika_migrate/src/pstd/include/random.h | 21 + tools/pika_migrate/src/pstd/include/rsync.h | 33 + .../src/pstd/include/scope_record_lock.h | 57 + .../pika_migrate/src/pstd/include/testutil.h | 21 + tools/pika_migrate/src/pstd/include/version.h | 19 + tools/pika_migrate/src/pstd/include/xdebug.h | 86 + tools/pika_migrate/src/pstd/src/base_conf.cc | 381 + .../src/pstd/src/build_version.cc | 9 + .../src/pstd/src/build_version.cc.in | 4 + .../pika_migrate/src/pstd/src/build_version.h | 17 + tools/pika_migrate/src/pstd/src/env.cc | 689 + tools/pika_migrate/src/pstd/src/lock_mgr.cc | 177 + tools/pika_migrate/src/pstd/src/mutex_impl.cc | 118 + .../src/pstd/src/pika_codis_slot.cc | 52 + tools/pika_migrate/src/pstd/src/posix.cc | 752 ++ .../pika_migrate/src/pstd/src/pstd_coding.cc | 204 + tools/pika_migrate/src/pstd/src/pstd_hash.cc | 583 + tools/pika_migrate/src/pstd/src/pstd_mutex.cc | 74 + .../pika_migrate/src/pstd/src/pstd_status.cc | 95 + .../pika_migrate/src/pstd/src/pstd_string.cc | 763 ++ tools/pika_migrate/src/pstd/src/rsync.cc | 174 + .../src/pstd/src/scope_record_lock.cc | 76 + tools/pika_migrate/src/pstd/src/testutil.cc | 42 + .../src/pstd/tests/CMakeLists.txt | 35 + .../src/pstd/tests/base_conf_test.cc | 84 + .../src/pstd/tests/slash_coding_test.cc | 199 + .../src/pstd/tests/slash_env_test.cc | 33 + .../src/pstd/tests/slash_string_test.cc | 130 + tools/pika_migrate/src/rsync_client.cc | 526 + tools/pika_migrate/src/rsync_client_thread.cc | 45 + tools/pika_migrate/src/rsync_server.cc | 249 + tools/pika_migrate/src/rsync_service.proto | 51 + tools/pika_migrate/src/storage/CMakeLists.txt | 44 + tools/pika_migrate/src/storage/LICENSE | 15 + tools/pika_migrate/src/storage/README.md | 8 + .../src/storage/benchmark/CMakeLists.txt | 30 + .../src/storage/benchmark/storage_bench.cc | 238 + .../src/storage/detect_environment | 92 + .../src/storage/examples/CMakeLists.txt | 30 + .../src/storage/examples/hashes_example.cc | 113 + .../src/storage/examples/sets_example.cc | 35 + .../src/storage/examples/strings_example.cc | 173 + .../src/storage/include/storage/backupable.h | 73 + .../storage/include/storage/build_version.h | 15 + .../storage/include/storage/db_checkpoint.h | 43 + .../storage/include/storage/slot_indexer.h | 28 + .../src/storage/include/storage/storage.h | 1156 ++ .../storage/include/storage/storage_define.h | 135 + .../src/storage/include/storage/util.h | 33 + .../src/storage/include/storage/version.h | 13 + .../src/storage/src/backupable.cc | 149 + .../src/storage/src/base_data_key_format.h | 188 + .../src/storage/src/base_data_value_format.h | 115 + .../src/storage/src/base_filter.h | 264 + .../src/storage/src/base_key_format.h | 99 + .../src/storage/src/base_meta_value_format.h | 225 + .../src/storage/src/base_value_format.h | 160 + .../src/storage/src/build_version.cc.in | 9 + tools/pika_migrate/src/storage/src/coding.h | 86 + .../src/storage/src/custom_comparator.h | 261 + .../src/storage/src/db_checkpoint.cc | 265 + tools/pika_migrate/src/storage/src/debug.h | 32 + .../src/storage/src/lists_data_key_format.h | 118 + .../src/storage/src/lists_filter.h | 153 + .../src/storage/src/lists_meta_value_format.h | 284 + tools/pika_migrate/src/storage/src/lock_mgr.h | 21 + .../pika_migrate/src/storage/src/lru_cache.h | 297 + .../src/storage/src/murmurhash.cc | 197 + .../pika_migrate/src/storage/src/murmurhash.h | 45 + tools/pika_migrate/src/storage/src/mutex.h | 24 + .../pika_migrate/src/storage/src/mutex_impl.h | 20 + .../src/storage/src/options_helper.cc | 98 + .../src/storage/src/options_helper.h | 79 + .../src/storage/src/pika_stream_meta_value.h | 517 + .../src/storage/src/pika_stream_types.h | 87 + tools/pika_migrate/src/storage/src/redis.cc | 766 ++ tools/pika_migrate/src/storage/src/redis.h | 544 + .../src/storage/src/redis_hashes.cc | 1398 ++ .../src/storage/src/redis_hyperloglog.cc | 173 + .../src/storage/src/redis_hyperloglog.h | 39 + .../src/storage/src/redis_lists.cc | 1343 ++ .../src/storage/src/redis_sets.cc | 1645 +++ .../src/storage/src/redis_streams.cc | 980 ++ .../src/storage/src/redis_streams.h | 143 + .../src/storage/src/redis_strings.cc | 1767 +++ .../src/storage/src/redis_zsets.cc | 2013 +++ .../src/storage/src/scope_record_lock.h | 24 + .../src/storage/src/scope_snapshot.h | 27 + tools/pika_migrate/src/storage/src/storage.cc | 2003 +++ .../src/storage/src/storage_murmur3.h | 151 + .../src/storage/src/strings_filter.h | 66 + .../src/storage/src/strings_value_format.h | 163 + .../src/storage/src/type_iterator.h | 521 + tools/pika_migrate/src/storage/src/util.cc | 292 + .../src/storage/src/zsets_data_key_format.h | 127 + .../src/storage/src/zsets_filter.h | 146 + .../src/storage/tests/CMakeLists.txt | 39 + .../storage/tests/custom_comparator_test.cc | 158 + .../src/storage/tests/hashes_filter_test.cc | 211 + .../src/storage/tests/hashes_test.cc | 2445 ++++ .../src/storage/tests/hyperloglog_test.cc | 188 + .../src/storage/tests/keys_test.cc | 5264 ++++++++ .../src/storage/tests/kv_format_test.cc | 120 + .../src/storage/tests/lists_filter_test.cc | 251 + .../src/storage/tests/lists_test.cc | 2719 ++++ .../src/storage/tests/lock_mgr_test.cc | 44 + .../src/storage/tests/lru_cache_test.cc | 493 + .../src/storage/tests/options_test.cc | 60 + .../src/storage/tests/sets_test.cc | 2254 ++++ .../src/storage/tests/strings_filter_test.cc | 35 + .../src/storage/tests/strings_test.cc | 1061 ++ .../src/storage/tests/zsets_test.cc | 5249 ++++++++ tools/pika_migrate/src/throttle.cc | 56 + 341 files changed, 114440 insertions(+) create mode 100644 tools/pika_migrate/CMakeLists.txt create mode 100644 tools/pika_migrate/conf/pika.conf create mode 100644 tools/pika_migrate/include/acl.h create mode 100644 tools/pika_migrate/include/build_version.h create mode 100644 tools/pika_migrate/include/pika_acl.h create mode 100644 tools/pika_migrate/include/pika_admin.h create mode 100644 tools/pika_migrate/include/pika_auxiliary_thread.h create mode 100644 tools/pika_migrate/include/pika_binlog.h create mode 100644 tools/pika_migrate/include/pika_binlog_reader.h create mode 100644 tools/pika_migrate/include/pika_binlog_transverter.h create mode 100644 tools/pika_migrate/include/pika_bit.h create mode 100644 tools/pika_migrate/include/pika_cache.h create mode 100644 tools/pika_migrate/include/pika_cache_load_thread.h create mode 100644 tools/pika_migrate/include/pika_client_conn.h create mode 100644 tools/pika_migrate/include/pika_client_processor.h create mode 100644 tools/pika_migrate/include/pika_cmd_table_manager.h create mode 100644 tools/pika_migrate/include/pika_command.h create mode 100644 tools/pika_migrate/include/pika_conf.h create mode 100644 tools/pika_migrate/include/pika_consensus.h create mode 100644 tools/pika_migrate/include/pika_data_distribution.h create mode 100644 tools/pika_migrate/include/pika_db.h create mode 100644 tools/pika_migrate/include/pika_define.h create mode 100644 tools/pika_migrate/include/pika_dispatch_thread.h create mode 100644 tools/pika_migrate/include/pika_geo.h create mode 100644 tools/pika_migrate/include/pika_geohash.h create mode 100644 tools/pika_migrate/include/pika_geohash_helper.h create mode 100644 tools/pika_migrate/include/pika_hash.h create mode 100644 tools/pika_migrate/include/pika_hyperloglog.h create mode 100644 tools/pika_migrate/include/pika_instant.h create mode 100644 tools/pika_migrate/include/pika_kv.h create mode 100644 tools/pika_migrate/include/pika_list.h create mode 100644 tools/pika_migrate/include/pika_migrate_thread.h create mode 100644 tools/pika_migrate/include/pika_monitor_thread.h create mode 100644 tools/pika_migrate/include/pika_monotonic_time.h create mode 100644 tools/pika_migrate/include/pika_pubsub.h create mode 100644 tools/pika_migrate/include/pika_repl_bgworker.h create mode 100644 tools/pika_migrate/include/pika_repl_client.h create mode 100644 tools/pika_migrate/include/pika_repl_client_conn.h create mode 100644 tools/pika_migrate/include/pika_repl_client_thread.h create mode 100644 tools/pika_migrate/include/pika_repl_server.h create mode 100644 tools/pika_migrate/include/pika_repl_server_conn.h create mode 100644 tools/pika_migrate/include/pika_repl_server_thread.h create mode 100644 tools/pika_migrate/include/pika_rm.h create mode 100644 tools/pika_migrate/include/pika_rsync_service.h create mode 100644 tools/pika_migrate/include/pika_server.h create mode 100644 tools/pika_migrate/include/pika_set.h create mode 100644 tools/pika_migrate/include/pika_slave_node.h create mode 100644 tools/pika_migrate/include/pika_slaveping_thread.h create mode 100644 tools/pika_migrate/include/pika_slot_command.h create mode 100644 tools/pika_migrate/include/pika_stable_log.h create mode 100644 tools/pika_migrate/include/pika_statistic.h create mode 100644 tools/pika_migrate/include/pika_stream.h create mode 100644 tools/pika_migrate/include/pika_transaction.h create mode 100644 tools/pika_migrate/include/pika_version.h create mode 100644 tools/pika_migrate/include/pika_zset.h create mode 100644 tools/pika_migrate/include/rsync_client.h create mode 100644 tools/pika_migrate/include/rsync_client_thread.h create mode 100644 tools/pika_migrate/include/rsync_server.h create mode 100644 tools/pika_migrate/include/throttle.h create mode 100644 tools/pika_migrate/pika-migrate.md create mode 100644 tools/pika_migrate/protogen.cmake create mode 100644 tools/pika_migrate/src/acl.cc create mode 100644 tools/pika_migrate/src/build_version.cc.in create mode 100644 tools/pika_migrate/src/cache/CMakeLists.txt create mode 100644 tools/pika_migrate/src/cache/include/cache.h create mode 100644 tools/pika_migrate/src/cache/include/config.h create mode 100644 tools/pika_migrate/src/cache/src/bit.cc create mode 100644 tools/pika_migrate/src/cache/src/cache.cc create mode 100644 tools/pika_migrate/src/cache/src/hash.cc create mode 100644 tools/pika_migrate/src/cache/src/list.cc create mode 100644 tools/pika_migrate/src/cache/src/set.cc create mode 100644 tools/pika_migrate/src/cache/src/string.cc create mode 100644 tools/pika_migrate/src/cache/src/zset.cc create mode 100644 tools/pika_migrate/src/net/CMakeLists.txt create mode 100644 tools/pika_migrate/src/net/examples/CMakeLists.txt create mode 100644 tools/pika_migrate/src/net/examples/README.md create mode 100644 tools/pika_migrate/src/net/examples/bg_thread.cc create mode 100644 tools/pika_migrate/src/net/examples/binlog_parser_test.cc create mode 100644 tools/pika_migrate/src/net/examples/http_server.cc create mode 100644 tools/pika_migrate/src/net/examples/https_server.cc create mode 100644 tools/pika_migrate/src/net/examples/mydispatch_srv.cc create mode 100644 tools/pika_migrate/src/net/examples/myholy_srv.cc create mode 100644 tools/pika_migrate/src/net/examples/myholy_srv_chandle.cc create mode 100644 tools/pika_migrate/src/net/examples/myproto.proto create mode 100644 tools/pika_migrate/src/net/examples/myproto_cli.cc create mode 100644 tools/pika_migrate/src/net/examples/myredis_cli.cc create mode 100644 tools/pika_migrate/src/net/examples/myredis_srv.cc create mode 100644 tools/pika_migrate/src/net/examples/performance/CMakeLists.txt create mode 100644 tools/pika_migrate/src/net/examples/performance/README.md create mode 100644 tools/pika_migrate/src/net/examples/performance/client.cc create mode 100644 tools/pika_migrate/src/net/examples/performance/message.proto create mode 100644 tools/pika_migrate/src/net/examples/performance/server.cc create mode 100644 tools/pika_migrate/src/net/examples/redis_cli_test.cc create mode 100644 tools/pika_migrate/src/net/examples/redis_parser_test.cc create mode 100644 tools/pika_migrate/src/net/examples/simple_http_server.cc create mode 100644 tools/pika_migrate/src/net/examples/thread_pool_test.cc create mode 100644 tools/pika_migrate/src/net/include/backend_thread.h create mode 100644 tools/pika_migrate/src/net/include/bg_thread.h create mode 100644 tools/pika_migrate/src/net/include/build_version.h create mode 100644 tools/pika_migrate/src/net/include/client_thread.h create mode 100644 tools/pika_migrate/src/net/include/http_conn.h create mode 100644 tools/pika_migrate/src/net/include/net_cli.h create mode 100644 tools/pika_migrate/src/net/include/net_conn.h create mode 100644 tools/pika_migrate/src/net/include/net_define.h create mode 100644 tools/pika_migrate/src/net/include/net_interfaces.h create mode 100644 tools/pika_migrate/src/net/include/net_pubsub.h create mode 100644 tools/pika_migrate/src/net/include/net_stats.h create mode 100644 tools/pika_migrate/src/net/include/net_thread.h create mode 100644 tools/pika_migrate/src/net/include/pb_conn.h create mode 100644 tools/pika_migrate/src/net/include/period_thread.h create mode 100644 tools/pika_migrate/src/net/include/redis_cli.h create mode 100644 tools/pika_migrate/src/net/include/redis_conn.h create mode 100644 tools/pika_migrate/src/net/include/redis_parser.h create mode 100644 tools/pika_migrate/src/net/include/server_thread.h create mode 100644 tools/pika_migrate/src/net/include/simple_http_conn.h create mode 100644 tools/pika_migrate/src/net/include/thread_pool.h create mode 100644 tools/pika_migrate/src/net/src/backend_thread.cc create mode 100644 tools/pika_migrate/src/net/src/bg_thread.cc create mode 100644 tools/pika_migrate/src/net/src/build_version.cc.in create mode 100644 tools/pika_migrate/src/net/src/client_thread.cc create mode 100644 tools/pika_migrate/src/net/src/dispatch_thread.cc create mode 100644 tools/pika_migrate/src/net/src/dispatch_thread.h create mode 100644 tools/pika_migrate/src/net/src/holy_thread.cc create mode 100644 tools/pika_migrate/src/net/src/holy_thread.h create mode 100644 tools/pika_migrate/src/net/src/http_conn.cc create mode 100644 tools/pika_migrate/src/net/src/net_cli.cc create mode 100644 tools/pika_migrate/src/net/src/net_conn.cc create mode 100644 tools/pika_migrate/src/net/src/net_epoll.cc create mode 100644 tools/pika_migrate/src/net/src/net_epoll.h create mode 100644 tools/pika_migrate/src/net/src/net_interfaces.cc create mode 100644 tools/pika_migrate/src/net/src/net_item.h create mode 100644 tools/pika_migrate/src/net/src/net_kqueue.cc create mode 100644 tools/pika_migrate/src/net/src/net_kqueue.h create mode 100644 tools/pika_migrate/src/net/src/net_multiplexer.cc create mode 100644 tools/pika_migrate/src/net/src/net_multiplexer.h create mode 100644 tools/pika_migrate/src/net/src/net_pubsub.cc create mode 100644 tools/pika_migrate/src/net/src/net_stats.cc create mode 100644 tools/pika_migrate/src/net/src/net_thread.cc create mode 100644 tools/pika_migrate/src/net/src/net_thread_name.h create mode 100644 tools/pika_migrate/src/net/src/net_util.cc create mode 100644 tools/pika_migrate/src/net/src/net_util.h create mode 100644 tools/pika_migrate/src/net/src/pb_cli.cc create mode 100644 tools/pika_migrate/src/net/src/pb_conn.cc create mode 100644 tools/pika_migrate/src/net/src/period_thread.cc create mode 100644 tools/pika_migrate/src/net/src/redis_cli.cc create mode 100644 tools/pika_migrate/src/net/src/redis_conn.cc create mode 100644 tools/pika_migrate/src/net/src/redis_parser.cc create mode 100644 tools/pika_migrate/src/net/src/server_socket.cc create mode 100644 tools/pika_migrate/src/net/src/server_socket.h create mode 100644 tools/pika_migrate/src/net/src/server_thread.cc create mode 100644 tools/pika_migrate/src/net/src/simple_http_conn.cc create mode 100644 tools/pika_migrate/src/net/src/thread_pool.cc create mode 100644 tools/pika_migrate/src/net/src/worker_thread.cc create mode 100644 tools/pika_migrate/src/net/src/worker_thread.h create mode 100644 tools/pika_migrate/src/net/test/CMakeLists.txt create mode 100644 tools/pika_migrate/src/net/test/net_thread_test.cc create mode 100644 tools/pika_migrate/src/pika.cc create mode 100644 tools/pika_migrate/src/pika_acl.cc create mode 100644 tools/pika_migrate/src/pika_admin.cc create mode 100644 tools/pika_migrate/src/pika_auxiliary_thread.cc create mode 100644 tools/pika_migrate/src/pika_binlog.cc create mode 100644 tools/pika_migrate/src/pika_binlog_reader.cc create mode 100644 tools/pika_migrate/src/pika_binlog_transverter.cc create mode 100644 tools/pika_migrate/src/pika_bit.cc create mode 100644 tools/pika_migrate/src/pika_cache.cc create mode 100644 tools/pika_migrate/src/pika_cache_load_thread.cc create mode 100644 tools/pika_migrate/src/pika_client_conn.cc create mode 100644 tools/pika_migrate/src/pika_client_processor.cc create mode 100644 tools/pika_migrate/src/pika_cmd_table_manager.cc create mode 100644 tools/pika_migrate/src/pika_command.cc create mode 100644 tools/pika_migrate/src/pika_command_docs.cc create mode 100644 tools/pika_migrate/src/pika_conf.cc create mode 100644 tools/pika_migrate/src/pika_consensus.cc create mode 100644 tools/pika_migrate/src/pika_data_distribution.cc create mode 100644 tools/pika_migrate/src/pika_db.cc create mode 100644 tools/pika_migrate/src/pika_dispatch_thread.cc create mode 100644 tools/pika_migrate/src/pika_geo.cc create mode 100644 tools/pika_migrate/src/pika_geohash.cc create mode 100644 tools/pika_migrate/src/pika_geohash_helper.cc create mode 100644 tools/pika_migrate/src/pika_hash.cc create mode 100644 tools/pika_migrate/src/pika_hyperloglog.cc create mode 100644 tools/pika_migrate/src/pika_inner_message.proto create mode 100644 tools/pika_migrate/src/pika_instant.cc create mode 100644 tools/pika_migrate/src/pika_kv.cc create mode 100644 tools/pika_migrate/src/pika_list.cc create mode 100644 tools/pika_migrate/src/pika_migrate_thread.cc create mode 100644 tools/pika_migrate/src/pika_monotonic_time.cc create mode 100644 tools/pika_migrate/src/pika_pubsub.cc create mode 100644 tools/pika_migrate/src/pika_repl_bgworker.cc create mode 100644 tools/pika_migrate/src/pika_repl_client.cc create mode 100644 tools/pika_migrate/src/pika_repl_client_conn.cc create mode 100644 tools/pika_migrate/src/pika_repl_client_thread.cc create mode 100644 tools/pika_migrate/src/pika_repl_server.cc create mode 100644 tools/pika_migrate/src/pika_repl_server_conn.cc create mode 100644 tools/pika_migrate/src/pika_repl_server_thread.cc create mode 100644 tools/pika_migrate/src/pika_rm.cc create mode 100644 tools/pika_migrate/src/pika_rsync_service.cc create mode 100644 tools/pika_migrate/src/pika_server.cc create mode 100644 tools/pika_migrate/src/pika_set.cc create mode 100644 tools/pika_migrate/src/pika_slave_node.cc create mode 100644 tools/pika_migrate/src/pika_slot_command.cc create mode 100644 tools/pika_migrate/src/pika_stable_log.cc create mode 100644 tools/pika_migrate/src/pika_statistic.cc create mode 100644 tools/pika_migrate/src/pika_stream.cc create mode 100644 tools/pika_migrate/src/pika_transaction.cc create mode 100644 tools/pika_migrate/src/pika_zset.cc create mode 100644 tools/pika_migrate/src/pstd/CMakeLists.txt create mode 100644 tools/pika_migrate/src/pstd/doc.h create mode 100644 tools/pika_migrate/src/pstd/examples/CMakeLists.txt create mode 100644 tools/pika_migrate/src/pstd/examples/conf_example.cc create mode 100644 tools/pika_migrate/src/pstd/examples/hash_example.cc create mode 100644 tools/pika_migrate/src/pstd/include/base_conf.h create mode 100644 tools/pika_migrate/src/pstd/include/env.h create mode 100644 tools/pika_migrate/src/pstd/include/fmacros.h create mode 100644 tools/pika_migrate/src/pstd/include/lock_mgr.h create mode 100644 tools/pika_migrate/src/pstd/include/mutex.h create mode 100644 tools/pika_migrate/src/pstd/include/mutex_impl.h create mode 100644 tools/pika_migrate/src/pstd/include/noncopyable.h create mode 100644 tools/pika_migrate/src/pstd/include/pika_codis_slot.h create mode 100644 tools/pika_migrate/src/pstd/include/posix.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_coding.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_defer.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_define.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_hash.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_mutex.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_slice.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_status.h create mode 100644 tools/pika_migrate/src/pstd/include/pstd_string.h create mode 100644 tools/pika_migrate/src/pstd/include/random.h create mode 100644 tools/pika_migrate/src/pstd/include/rsync.h create mode 100644 tools/pika_migrate/src/pstd/include/scope_record_lock.h create mode 100644 tools/pika_migrate/src/pstd/include/testutil.h create mode 100644 tools/pika_migrate/src/pstd/include/version.h create mode 100644 tools/pika_migrate/src/pstd/include/xdebug.h create mode 100644 tools/pika_migrate/src/pstd/src/base_conf.cc create mode 100644 tools/pika_migrate/src/pstd/src/build_version.cc create mode 100644 tools/pika_migrate/src/pstd/src/build_version.cc.in create mode 100644 tools/pika_migrate/src/pstd/src/build_version.h create mode 100644 tools/pika_migrate/src/pstd/src/env.cc create mode 100644 tools/pika_migrate/src/pstd/src/lock_mgr.cc create mode 100644 tools/pika_migrate/src/pstd/src/mutex_impl.cc create mode 100644 tools/pika_migrate/src/pstd/src/pika_codis_slot.cc create mode 100644 tools/pika_migrate/src/pstd/src/posix.cc create mode 100644 tools/pika_migrate/src/pstd/src/pstd_coding.cc create mode 100644 tools/pika_migrate/src/pstd/src/pstd_hash.cc create mode 100644 tools/pika_migrate/src/pstd/src/pstd_mutex.cc create mode 100644 tools/pika_migrate/src/pstd/src/pstd_status.cc create mode 100644 tools/pika_migrate/src/pstd/src/pstd_string.cc create mode 100644 tools/pika_migrate/src/pstd/src/rsync.cc create mode 100644 tools/pika_migrate/src/pstd/src/scope_record_lock.cc create mode 100644 tools/pika_migrate/src/pstd/src/testutil.cc create mode 100644 tools/pika_migrate/src/pstd/tests/CMakeLists.txt create mode 100644 tools/pika_migrate/src/pstd/tests/base_conf_test.cc create mode 100644 tools/pika_migrate/src/pstd/tests/slash_coding_test.cc create mode 100644 tools/pika_migrate/src/pstd/tests/slash_env_test.cc create mode 100644 tools/pika_migrate/src/pstd/tests/slash_string_test.cc create mode 100644 tools/pika_migrate/src/rsync_client.cc create mode 100644 tools/pika_migrate/src/rsync_client_thread.cc create mode 100644 tools/pika_migrate/src/rsync_server.cc create mode 100644 tools/pika_migrate/src/rsync_service.proto create mode 100644 tools/pika_migrate/src/storage/CMakeLists.txt create mode 100644 tools/pika_migrate/src/storage/LICENSE create mode 100644 tools/pika_migrate/src/storage/README.md create mode 100644 tools/pika_migrate/src/storage/benchmark/CMakeLists.txt create mode 100644 tools/pika_migrate/src/storage/benchmark/storage_bench.cc create mode 100755 tools/pika_migrate/src/storage/detect_environment create mode 100644 tools/pika_migrate/src/storage/examples/CMakeLists.txt create mode 100644 tools/pika_migrate/src/storage/examples/hashes_example.cc create mode 100644 tools/pika_migrate/src/storage/examples/sets_example.cc create mode 100644 tools/pika_migrate/src/storage/examples/strings_example.cc create mode 100644 tools/pika_migrate/src/storage/include/storage/backupable.h create mode 100644 tools/pika_migrate/src/storage/include/storage/build_version.h create mode 100644 tools/pika_migrate/src/storage/include/storage/db_checkpoint.h create mode 100644 tools/pika_migrate/src/storage/include/storage/slot_indexer.h create mode 100644 tools/pika_migrate/src/storage/include/storage/storage.h create mode 100644 tools/pika_migrate/src/storage/include/storage/storage_define.h create mode 100644 tools/pika_migrate/src/storage/include/storage/util.h create mode 100644 tools/pika_migrate/src/storage/include/storage/version.h create mode 100644 tools/pika_migrate/src/storage/src/backupable.cc create mode 100644 tools/pika_migrate/src/storage/src/base_data_key_format.h create mode 100644 tools/pika_migrate/src/storage/src/base_data_value_format.h create mode 100644 tools/pika_migrate/src/storage/src/base_filter.h create mode 100644 tools/pika_migrate/src/storage/src/base_key_format.h create mode 100644 tools/pika_migrate/src/storage/src/base_meta_value_format.h create mode 100644 tools/pika_migrate/src/storage/src/base_value_format.h create mode 100644 tools/pika_migrate/src/storage/src/build_version.cc.in create mode 100644 tools/pika_migrate/src/storage/src/coding.h create mode 100644 tools/pika_migrate/src/storage/src/custom_comparator.h create mode 100644 tools/pika_migrate/src/storage/src/db_checkpoint.cc create mode 100644 tools/pika_migrate/src/storage/src/debug.h create mode 100644 tools/pika_migrate/src/storage/src/lists_data_key_format.h create mode 100644 tools/pika_migrate/src/storage/src/lists_filter.h create mode 100644 tools/pika_migrate/src/storage/src/lists_meta_value_format.h create mode 100644 tools/pika_migrate/src/storage/src/lock_mgr.h create mode 100644 tools/pika_migrate/src/storage/src/lru_cache.h create mode 100644 tools/pika_migrate/src/storage/src/murmurhash.cc create mode 100644 tools/pika_migrate/src/storage/src/murmurhash.h create mode 100644 tools/pika_migrate/src/storage/src/mutex.h create mode 100644 tools/pika_migrate/src/storage/src/mutex_impl.h create mode 100644 tools/pika_migrate/src/storage/src/options_helper.cc create mode 100644 tools/pika_migrate/src/storage/src/options_helper.h create mode 100644 tools/pika_migrate/src/storage/src/pika_stream_meta_value.h create mode 100644 tools/pika_migrate/src/storage/src/pika_stream_types.h create mode 100644 tools/pika_migrate/src/storage/src/redis.cc create mode 100644 tools/pika_migrate/src/storage/src/redis.h create mode 100644 tools/pika_migrate/src/storage/src/redis_hashes.cc create mode 100644 tools/pika_migrate/src/storage/src/redis_hyperloglog.cc create mode 100644 tools/pika_migrate/src/storage/src/redis_hyperloglog.h create mode 100644 tools/pika_migrate/src/storage/src/redis_lists.cc create mode 100644 tools/pika_migrate/src/storage/src/redis_sets.cc create mode 100644 tools/pika_migrate/src/storage/src/redis_streams.cc create mode 100644 tools/pika_migrate/src/storage/src/redis_streams.h create mode 100644 tools/pika_migrate/src/storage/src/redis_strings.cc create mode 100644 tools/pika_migrate/src/storage/src/redis_zsets.cc create mode 100644 tools/pika_migrate/src/storage/src/scope_record_lock.h create mode 100644 tools/pika_migrate/src/storage/src/scope_snapshot.h create mode 100644 tools/pika_migrate/src/storage/src/storage.cc create mode 100644 tools/pika_migrate/src/storage/src/storage_murmur3.h create mode 100644 tools/pika_migrate/src/storage/src/strings_filter.h create mode 100644 tools/pika_migrate/src/storage/src/strings_value_format.h create mode 100644 tools/pika_migrate/src/storage/src/type_iterator.h create mode 100644 tools/pika_migrate/src/storage/src/util.cc create mode 100644 tools/pika_migrate/src/storage/src/zsets_data_key_format.h create mode 100644 tools/pika_migrate/src/storage/src/zsets_filter.h create mode 100644 tools/pika_migrate/src/storage/tests/CMakeLists.txt create mode 100644 tools/pika_migrate/src/storage/tests/custom_comparator_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/hashes_filter_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/hashes_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/hyperloglog_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/keys_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/kv_format_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/lists_filter_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/lists_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/lock_mgr_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/lru_cache_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/options_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/sets_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/strings_filter_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/strings_test.cc create mode 100644 tools/pika_migrate/src/storage/tests/zsets_test.cc create mode 100644 tools/pika_migrate/src/throttle.cc diff --git a/tools/pika_migrate/CMakeLists.txt b/tools/pika_migrate/CMakeLists.txt new file mode 100644 index 0000000000..c16dc011bd --- /dev/null +++ b/tools/pika_migrate/CMakeLists.txt @@ -0,0 +1,203 @@ +cmake_minimum_required(VERSION 3.18) + +# Avoid warning about DOWNLOAD_EXTRACT_TIMESTAMP in CMake 3.24: +if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0") + cmake_policy(SET CMP0135 NEW) +endif() + +set(CMAKE_CXX_STANDARD 17) +project(pika-migrate) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) +enable_testing() + +if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + # using Clang + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.0") + message(FATAL_ERROR "Clang version must be greater than 5.0") + endif() +elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + # using GCC + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "7.0") + message(FATAL_ERROR "GCC G++ version must be greater than 7.0") + endif() +endif() + +link_directories("/opt/rh/gcc-toolset-13/root/lib/gcc/x86_64-redhat-linux/13") + +############# You should enable sanitizer if you are developing pika ############# +# Uncomment the following two lines to enable AddressSanitizer to detect memory leaks and other memory-related bugs. +#set(CMAKE_BUILD_TYPE "Debug") +#set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address -O0 -fno-omit-frame-pointer -fno-optimize-sibling-calls") + +# [Notice] AddressSanitizer and ThreadSanitizer can not be enabled at the same time. + +# Uncomment the following two lines to enable ThreadSanitizer to detect data race and other thread-related issue. +#set(CMAKE_BUILD_TYPE "Debug") +#set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=thread -O0 -fno-omit-frame-pointer -fno-optimize-sibling-calls") + +string(TOLOWER ${CMAKE_HOST_SYSTEM_PROCESSOR} HOST_ARCH) + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE RELEASE) +endif() + +string(TOUPPER ${CMAKE_BUILD_TYPE} BUILD_TYPE) + +if(${BUILD_TYPE} STREQUAL DEBUG) + set(LIB_BUILD_TYPE DEBUG) +elseif(${BUILD_TYPE} STREQUAL MINSIZEREL) + set(LIB_BUILD_TYPE MINSIZEREL) +elseif(${BUILD_TYPE} STREQUAL RELWITHDEBINFO) + set(LIB_BUILD_TYPE RELWITHDEBINFO) +else() + set(LIB_BUILD_TYPE RELEASE) + set(CMAKE_CXX_FLAGS_RELEASE "-O2 -g -DNDEBUG") +endif() + +if(CMAKE_SYSTEM_NAME MATCHES "Darwin") + set(CMAKE_CXX_FLAGS "-pthread") + add_definitions(-DOS_MACOSX) +elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + set(CMAKE_CXX_FLAGS "-pthread") + add_definitions(-DOS_FREEBSD) +elseif(CMAKE_SYSTEM_NAME MATCHES "Linux") + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set(CMAKE_EXE_LINKER_FLAGS "-stdlib=libc++ -fuse-ld=lld -lc++ -lc++abi ${CMAKE_EXE_LINKER_FLAGS}") + set(CMAKE_CXX_FLAGS "-stdlib=libc++ -pthread ${CMAKE_CXX_FLAGS}") + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set(CMAKE_EXE_LINKER_FLAGS "-static-libgcc -static-libstdc++") + set(CMAKE_CXX_FLAGS "-pthread -Wl,--no-as-needed -ldl") + endif() + add_definitions(-DOS_LINUX) +else() + message(FATAL_ERROR "only support linux or macOs or FreeBSD") +endif() + +if(HOST_ARCH MATCHES "x86_64" OR HOST_ARCH MATCHES "i386") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse -msse4.2") +elseif(HOST_ARCH MATCHES "arm") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=armv8-a+crc+crypto -moutline-atomics") +endif() + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") + + +message(STATUS "${PROJECT_NAME} staged install: ${STAGED_INSTALL_PREFIX}") +message(STATUS "Current platform: ${OS_VERSION} ") +cmake_host_system_information(RESULT CPU_CORE QUERY NUMBER_OF_LOGICAL_CORES) +message(STATUS "Cpu core ${CPU_CORE}") + +find_program(AUTOCONF + autoconf + PATHS /usr/bin /usr/local/bin) + +if (${AUTOCONF} MATCHES AUTOCONF-NOTFOUND) + message(FATAL_ERROR "not find autoconf on localhost") +endif() + +#set(CLANG_SEARCH_PATH "/usr/local/bin" "/usr/bin" "/usr/local/opt/llvm/bin" +# "/usr/local/opt/llvm@12/bin") +find_program(CLANG_TIDY_BIN + NAMES clang-tidy clang-tidy-12 + HINTS ${CLANG_SEARCH_PATH}) +if ("${CLANG_TIDY_BIN}" STREQUAL "CLANG_TIDY_BIN-NOTFOUND") + message(WARNING "couldn't find clang-tidy.") +else () + message(STATUS "found clang-tidy at ${CLANG_TIDY_BIN}") +endif () + +find_program(CLANG_APPLY_REPLACEMENTS_BIN + NAMES clang-apply-replacements clang-apply-replacements-12 + HINTS ${CLANG_SEARCH_PATH}) + +if ("${CLANG_APPLY_REPLACEMENTS_BIN}" STREQUAL "CLANG_APPLY_REPLACEMENTS_BIN-NOTFOUND") + message(WARNING "couldn't find clang-apply-replacements.") +else () + message(STATUS "found clang-apply-replacements at ${CLANG_APPLY_REPLACEMENTS_BIN}") +endif () + +option(WITH_COMMAND_DOCS "build with command docs support" OFF) +if (WITH_COMMAND_DOCS) + add_definitions(-DWITH_COMMAND_DOCS) +endif() + +include(protogen.cmake) +include(ExternalProject) + +aux_source_directory(src DIR_SRCS) + +# # generate version +string(TIMESTAMP TS "%Y-%m-%d %H:%M:%S" UTC) +set(PIKA_BUILD_DATE "${TS}" CACHE STRING "the time we first built pika") + + +set(PIKA_BUILD_VERSION_CC ${CMAKE_BINARY_DIR}/pika_build_version.cc + src/pika_cache_load_thread.cc + ) +message("PIKA_BUILD_VERSION_CC : " ${PIKA_BUILD_VERSION_CC}) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/build_version.cc.in ${PIKA_BUILD_VERSION_CC} @ONLY) + +set(PROTO_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/pika_inner_message.proto ${CMAKE_CURRENT_SOURCE_DIR}/src/rsync_service.proto) +custom_protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${PROTO_FILES}) +message("pika PROTO_SRCS = ${PROTO_SRCS}") +message("pika PROTO_HDRS = ${PROTO_HDRS}") + +add_executable(${PROJECT_NAME} + ${DIR_SRCS} + ${PROTO_SRCS} + ${PROTO_HDRS} + ${PIKA_BUILD_VERSION_CC}) + +target_link_directories(${PROJECT_NAME} + PUBLIC ${INSTALL_LIBDIR_64} + PUBLIC ${INSTALL_LIBDIR}) + +add_dependencies(${PROJECT_NAME} + gflags + gtest + ${LIBUNWIND_NAME} + glog + fmt + snappy + zstd + lz4 + zlib + ${LIBGPERF_NAME} + ${LIBJEMALLOC_NAME} + rocksdb + protobuf + pstd + net + rediscache + storage + cache + hiredis +) + +target_include_directories(${PROJECT_NAME} + PUBLIC ${CMAKE_CURRENT_BINARY_DIR} + PUBLIC ${PROJECT_SOURCE_DIR} + ${INSTALL_INCLUDEDIR} +) + +target_link_libraries(${PROJECT_NAME} + cache + storage + net + pstd + ${GLOG_LIBRARY} + librocksdb.a + ${LIB_PROTOBUF} + ${LIB_GFLAGS} + ${LIB_FMT} + libsnappy.a + libzstd.a + liblz4.a + libz.a + librediscache.a + ${LIBUNWIND_LIBRARY} + ${JEMALLOC_LIBRARY} + libhiredis.a) + +option(USE_SSL "Enable SSL support" OFF) + diff --git a/tools/pika_migrate/conf/pika.conf b/tools/pika_migrate/conf/pika.conf new file mode 100644 index 0000000000..d4f0efb011 --- /dev/null +++ b/tools/pika_migrate/conf/pika.conf @@ -0,0 +1,694 @@ +########################### +# Pika configuration file # +########################### + +# Pika port, the default value is 9221. +# [NOTICE] Port Magic offsets of port+1000 / port+2000 are used by Pika at present. +# Port 10221 is used for Rsync, and port 11221 is used for Replication, while the listening port is 9221. +port : 9221 + +db-instance-num : 3 +rocksdb-ttl-second : 86400 * 7; +rocksdb-periodic-second : 86400 * 3; + +# Random value identifying the Pika server, its string length must be 40. +# If not set, Pika will generate a random string with a length of 40 random characters. +# run-id : + +# Master's run-id +# master-run-id : + +# The number of Net-worker threads in Pika. +# It's not recommended to set this value exceeds +# the number of CPU cores on the deployment server. +thread-num : 1 + +# use Net worker thread to read redis Cache for [Get, HGet] command, +# which can significantly improve QPS and reduce latency when cache hit rate is high +# default value is "yes", set it to "no" if you wanna disable it +rtc-cache-read : yes + +# Size of the thread pool, The threads within this pool +# are dedicated to handling user requests. +thread-pool-size : 12 + +# This parameter is used to control whether to separate fast and slow commands. +# When slow-cmd-pool is set to yes, fast and slow commands are separated. +# When set to no, they are not separated. +slow-cmd-pool : no + +# Size of the low level thread pool, The threads within this pool +# are dedicated to handling slow user requests. +slow-cmd-thread-pool-size : 1 + +# Size of the low level thread pool, The threads within this pool +# are dedicated to handling slow user requests. +admin-thread-pool-size : 2 + +# Slow cmd list e.g. hgetall, mset +slow-cmd-list : + +# List of commands considered as administrative. These commands will be handled by the admin thread pool. Modify this list as needed. +# Default commands: info, ping, monitor +# This parameter is only supported by the CONFIG GET command and not by CONFIG SET. +admin-cmd-list : info, ping, monitor + +# The number of threads to write DB in slaveNode when replicating. +# It's preferable to set slave's sync-thread-num value close to master's thread-pool-size. +sync-thread-num : 6 + +# The num of threads to write binlog in slaveNode when replicating, +# each DB cloud only bind to one sync-binlog-thread to write binlog in maximum +#[NOTICE] It's highly recommended to set sync-binlog-thread-num equal to conf item 'database'(then each DB cloud have a exclusive thread to write binlog), +# eg. if you use 8 DBs(databases_ is 8), sync-binlog-thread-num is preferable to be 8 +# Valid range of sync-binlog-thread-num is [1, databases], the final value of it is Min(sync-binlog-thread-num, databases) +sync-binlog-thread-num : 1 + +# Directory to store log files of Pika, which contains multiple types of logs, +# Including: INFO, WARNING, ERROR log, as well as binglog(write2fine) file which +# is used for replication. +log-path : ./log/ + +# log retention time of serverlogs(pika.{hostname}.{username}.log.{loglevel}.YYYYMMDD-HHMMSS) files that stored within log-path. +# Any serverlogs files that exceed this time will be cleaned up. +# The unit of serverlogs is in [days] and the default value is 7(days). +log-retention-time : 7 + +# Directory to store the data of Pika. +db-path : ./db/ + +# The size of a single RocksDB memtable at the Pika's bottom layer(Pika use RocksDB to store persist data). +# [Tip] Big write-buffer-size can improve writing performance, +# but this will generate heavier IO load when flushing from buffer to disk, +# you should configure it based on you usage scenario. +# Supported Units [K|M|G], write-buffer-size default unit is in [bytes]. +write-buffer-size : 256M + +# The size of one block in arena memory allocation. +# If <= 0, a proper value is automatically calculated. +# (usually 1/8 of writer-buffer-size, rounded up to a multiple of 4KB) +# Supported Units [K|M|G], arena-block-size default unit is in [bytes]. +arena-block-size : + +# Timeout of Pika's connection, counting down starts When there are no requests +# on a connection (it enters sleep state), when the countdown reaches 0, the connection +# will be closed by Pika. +# [Tip] The issue of running out of Pika's connections may be avoided if this value +# is configured properly. +# The Unit of timeout is in [seconds] and its default value is 60(s). +timeout : 60 + +# The [password of administrator], which is empty by default. +# [NOTICE] If this admin password is the same as user password (including both being empty), +# the value of userpass will be ignored and all users are considered as administrators, +# in this scenario, users are not subject to the restrictions imposed by the userblacklist. +# PS: "user password" refers to value of the parameter below: userpass. +requirepass : + +# Password for replication verify, used for authentication when a slave +# connects to a master to request replication. +# [NOTICE] The value of this parameter must match the "requirepass" setting on the master. +masterauth : + +# The [password of user], which is empty by default. +# [NOTICE] If this user password is the same as admin password (including both being empty), +# the value of this parameter will be ignored and all users are considered as administrators, +# in this scenario, users are not subject to the restrictions imposed by the userblacklist. +# PS: "admin password" refers to value of the parameter above: requirepass. +# userpass : + +# The blacklist of commands for users that logged in by userpass, +# the commands that added to this list will not be available for users except for administrator. +# [Advice] It's recommended to add high-risk commands to this list. +# [Format] Commands should be separated by ",". For example: FLUSHALL, SHUTDOWN, KEYS, CONFIG +# By default, this list is empty. +# userblacklist : + +# Running Mode of Pika, The current version only supports running in "classic mode". +# If set to 'classic', Pika will create multiple DBs whose number is the value of configure item "databases". +instance-mode : classic + +# The number of databases when Pika runs in classic mode. +# The default database id is DB 0. You can select a different one on +# a per-connection by using SELECT. The db id range is [0, 'databases' value -1]. +# The value range of this parameter is [1, 8]. +# [NOTICE] It's RECOMMENDED to set sync-binlog-thread-num equal to DB num(databases), +# if you've changed the value of databases, remember to check if the value of sync-binlog-thread-num is proper. +databases : 1 + +# The number of followers of a master. Only [0, 1, 2, 3, 4] is valid at present. +# By default, this num is set to 0, which means this feature is [not enabled] +# and the Pika runs in standalone mode. +replication-num : 0 + +# consensus level defines the num of confirms(ACKs) the leader node needs to receive from +# follower nodes before returning the result to the client that sent the request. +# The [value range] of this parameter is: [0, ...replicaiton-num]. +# The default value of consensus-level is 0, which means this feature is not enabled. +consensus-level : 0 + +# The Prefix of dump file's name. +# All the files that generated by command "bgsave" will be name with this prefix. +dump-prefix : + +# daemonize [yes | no]. +#daemonize : yes + +# The directory to stored dump files that generated by command "bgsave". +dump-path : ./dump/ + +# TTL of dump files that generated by command "bgsave". +# Any dump files which exceed this TTL will be deleted. +# Unit of dump-expire is in [days] and the default value is 0(day), +# which means dump files never expire. +dump-expire : 0 + +# Pid file Path of Pika. +pidfile : ./pika.pid + +# The Maximum number of Pika's Connection. +maxclients : 20000 + +# The size of sst file in RocksDB(Pika is based on RocksDB). +# sst files are hierarchical, the smaller the sst file size, the higher the performance and the lower the merge cost, +# the price is that the number of sst files could be huge. On the contrary, the bigger the sst file size, the lower +# the performance and the higher the merge cost, while the number of files is fewer. +# Supported Units [K|M|G], target-file-size-base default unit is in [bytes] and the default value is 20M. +target-file-size-base : 20M + +# Expire-time of binlog(write2file) files that stored within log-path. +# Any binlog(write2file) files that exceed this expire time will be cleaned up. +# The unit of expire-logs-days is in [days] and the default value is 7(days). +# The [Minimum value] of this parameter is 1(day). +expire-logs-days : 7 + +# The maximum number of binlog(write2file) files. +# Once the total number of binlog files exceed this value, +# automatic cleaning will start to ensure the maximum number +# of binlog files is equal to expire-logs-nums. +# The [Minimum value] of this parameter is 10. +expire-logs-nums : 10 + +# The number of guaranteed connections for root user. +# This parameter guarantees that there are 2(By default) connections available +# for root user to log in Pika from 127.0.0.1, even if the maximum connection limit is reached. +# PS: The maximum connection refers to the parameter above: maxclients. +# The default value of root-connection-num is 2. +root-connection-num : 2 + +# Slowlog-write-errorlog +slowlog-write-errorlog : no + +# The time threshold for slow log recording. +# Any command whose execution time exceeds this threshold will be recorded in pika-ERROR.log, +# which is stored in log-path. +# The unit of slowlog-log-slower-than is in [microseconds(μs)] and the default value is 10000 μs / 10 ms. +slowlog-log-slower-than : 10000 + +# Slowlog-max-len +slowlog-max-len : 128 + +# Pika db sync path +db-sync-path : ./dbsync/ + +# The maximum Transmission speed during full synchronization. +# The exhaustion of network can be prevented by setting this parameter properly. +# The value range of this parameter is [1,1024] with unit in [MB/s]. +# [NOTICE] If this parameter is set to an invalid value(smaller than 0 or bigger than 1024), +# it will be automatically reset to 1024. +# The default value of db-sync-speed is -1 (1024MB/s). +db-sync-speed : -1 + +# The priority of slave node when electing new master node. +# The slave node with [lower] value of slave-priority will have [higher priority] to be elected as the new master node. +# This parameter is only used in conjunction with sentinel and serves no other purpose. +# The default value of slave-priority is 100. +slave-priority : 100 + +# Specify network interface that work with Pika. +#network-interface : eth1 + +# The IP and port of the master node are specified by this parameter for +# replication between master and slaves. +# [Format] is "ip:port" , for example: "192.168.1.2:6666" indicates that +# the slave instances that configured with this value will automatically send +# SLAVEOF command to port 6666 of 192.168.1.2 after startup. +# This parameter should be configured on slave nodes. +#slaveof : master-ip:master-port + + +# Daily/Weekly Automatic full compaction task is configured by compact-cron. +# +# [Format-daily]: start time(hour)-end time(hour)/disk-free-space-ratio, +# example: with value of "02-04/60", Pika will perform full compaction task between 2:00-4:00 AM everyday if +# the disk-free-size / disk-size > 60%. +# +# [Format-weekly]: week/start time(hour)-end time(hour)/disk-free-space-ratio, +# example: with value of "3/02-04/60", Pika will perform full compaction task between 2:00-4:00 AM every Wednesday if +# the disk-free-size / disk-size > 60%. +# +# [Tip] Automatic full compaction is suitable for scenarios with multiple data structures +# and lots of items are expired or deleted, or key names are frequently reused. +# +# [NOTICE]: If compact-interval is set, compact-cron will be masked and disabled. +# +#compact-cron : 3/02-04/60 + + +# Automatic full synchronization task between a time interval is configured by compact-interval. +# [Format]: time interval(hour)/disk-free-space-ratio, example: "6/60", Pika will perform full compaction every 6 hours, +# if the disk-free-size / disk-size > 60%. +# [NOTICE]: compact-interval is prior than compact-cron. +#compact-interval : + +# The disable_auto_compactions option is [true | false] +disable_auto_compactions : false + +# Rocksdb max_subcompactions, increasing this value can accelerate the exec speed of a single compaction task +# it's recommended to increase it's value if large compaction is found in you instance +max-subcompactions : 1 +# The minimum disk usage ratio for checking resume. +# If the disk usage ratio is lower than min-check-resume-ratio, it will not check resume, only higher will check resume. +# Its default value is 0.7. +#min-check-resume-ratio : 0.7 + +# The minimum free disk space to trigger db resume. +# If the db has a background error, only the free disk size is larger than this configuration can trigger manually resume db. +# Its default value is 256MB. +# [NOTICE]: least-free-disk-resume-size should not smaller than write-buffer-size! +#least-free-disk-resume-size : 256M + +# Manually trying to resume db interval is configured by manually-resume-interval. +# If db has a background error, it will try to manually call resume() to resume db if satisfy the least free disk to resume. +# Its default value is 60 seconds. +#manually-resume-interval : 60 + +# This window-size determines the amount of data that can be transmitted in a single synchronization process. +# [Tip] In the scenario of high network latency. Increasing this size can improve synchronization efficiency. +# Its default value is 9000. the [maximum] value is 90000. +sync-window-size : 9000 + +# Maximum buffer size of a client connection. +# [NOTICE] Master and slaves must have exactly the same value for the max-conn-rbuf-size. +# Supported Units [K|M|G]. Its default unit is in [bytes] and its default value is 268435456(256MB). The value range is [64MB, 1GB]. +max-conn-rbuf-size : 268435456 + + +#######################################################################E####### +#! Critical Settings !# +#######################################################################E####### + +# write_binlog [yes | no] +write-binlog : yes + +# The size of binlog file, which can not be modified once Pika instance started. +# [NOTICE] Master and slaves must have exactly the same value for the binlog-file-size. +# The [value range] of binlog-file-size is [1K, 2G]. +# Supported Units [K|M|G], binlog-file-size default unit is in [bytes] and the default value is 100M. +binlog-file-size : 104857600 + +# Automatically triggers a small compaction according to statistics +# Use the cache to store up to 'max-cache-statistic-keys' keys +# If 'max-cache-statistic-keys' set to '0', that means turn off the statistics function +# and this automatic small compaction feature is disabled. +max-cache-statistic-keys : 0 + +# When 'delete' or 'overwrite' a specific multi-data structure key 'small-compaction-threshold' times, +# a small compact is triggered automatically if the small compaction feature is enabled. +# small-compaction-threshold default value is 5000 and the value range is [1, 100000]. +small-compaction-threshold : 5000 +small-compaction-duration-threshold : 10000 + +# The maximum total size of all live memtables of the RocksDB instance that owned by Pika. +# Flushing from memtable to disk will be triggered if the actual memory usage of RocksDB +# exceeds max-write-buffer-size when next write operation is issued. +# [RocksDB-Basic-Tuning](https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning) +# Supported Units [K|M|G], max-write-buffer-size default unit is in [bytes]. +max-write-buffer-size : 10737418240 + +# The maximum number of write buffers(memtables) that are built up in memory for one ColumnFamily in DB. +# The default and the minimum number is 2. It means that Pika(RocksDB) will write to a write buffer +# when it flushes the data of another write buffer to storage. +# If max-write-buffer-num > 3, writing will be slowed down. +max-write-buffer-num : 2 + +# `min_write_buffer_number_to_merge` is the minimum number of memtables +# that need to be merged before placing the order. For example, if the +# option is set to 2, immutable memtables will only be flushed if there +# are two of them - a single immutable memtable will never be flushed. +# If multiple memtables are merged together, less data will be written +# to storage because the two updates are merged into a single key. However, +# each Get() must linearly traverse all unmodifiable memtables and check +# whether the key exists. Setting this value too high may hurt performance. +min-write-buffer-number-to-merge : 1 + +# The total size of wal files, when reaches this limit, rocksdb will force the flush of column-families +# whose memtables are backed by the oldest live WAL file. Also used to control the rocksdb open time when +# process restart. +max-total-wal-size : 1073741824 + +# rocksdb level0_stop_writes_trigger +level0-stop-writes-trigger : 36 + +# rocksdb level0_slowdown_writes_trigger +level0-slowdown-writes-trigger : 20 + +# rocksdb level0_file_num_compaction_trigger +level0-file-num-compaction-trigger : 4 + +# enable db statistics [yes | no] default no +enable-db-statistics : no +# see rocksdb/include/rocksdb/statistics.h enum StatsLevel for more details +# only use ticker counter should set db-statistics-level to 2 +db-statistics-level : 2 + +# The maximum size of the response package to client to prevent memory +# exhaustion caused by commands like 'keys *' and 'Scan' which can generate huge response. +# Supported Units [K|M|G]. The default unit is in [bytes]. +max-client-response-size : 1073741824 + +# The compression algorithm. You can not change it when Pika started. +# Supported types: [snappy, zlib, lz4, zstd]. If you do not wanna compress the SST file, please set its value as none. +# [NOTICE] The Pika official binary release just linking the snappy library statically, which means that +# you should compile the Pika from the source code and then link it with other compression algorithm library statically by yourself. +compression : snappy + +# if the vector size is smaller than the level number, the undefined lower level uses the +# last option in the configurable array, for example, for 3 level +# LSM tree the following settings are the same: +# configurable array: [none:snappy] +# LSM settings: [none:snappy:snappy] +# When this configurable is enabled, compression is ignored, +# default l0 l1 noCompression, l2 and more use `compression` option +# https://github.com/facebook/rocksdb/wiki/Compression +#compression_per_level : [none:none:snappy:lz4:lz4] + +# The number of rocksdb background threads(sum of max-background-compactions and max-background-flushes) +# If max-background-jobs has a valid value AND both 'max-background-flushs' and 'max-background-compactions' is set to -1, +# then max-background-flushs' and 'max-background-compactions will be auto config by rocksdb, specifically: +# 1/4 of max-background-jobs will be given to max-background-flushs' and the rest(3/4) will be given to 'max-background-compactions'. +# 'max-background-jobs' default value is 3 and the value range is [2, 12]. +max-background-jobs : 3 + +# The number of background flushing threads. +# max-background-flushes default value is -1 and the value range is [1, 4] or -1. +# if 'max-background-flushes' is set to -1, the 'max-background-compactions' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-flushes : -1 + +# [NOTICE] you MUST NOT set one of the max-background-flushes or max-background-compactions to -1 while setting another one to other values(not -1). +# They SHOULD both be -1 or both not(if you want to config them manually). + +# The number of background compacting threads. +# max-background-compactions default value is -1 and the value range is [1, 8] or -1. +# if 'max-background-compactions' is set to -1, the 'max-background-flushes' should also be set to -1, +# which means let rocksdb to auto config them based on the value of 'max-background-jobs' +max-background-compactions : -1 + +# RocksDB delayed-write-rate, default is 0(infer from rate-limiter by RocksDB) +# Ref from rocksdb: Whenever stall conditions are triggered, RocksDB will reduce write rate to delayed_write_rate, +# and could possibly reduce write rate to even lower than delayed_write_rate if estimated pending compaction bytes accumulates. +# If the value is 0, RcoksDB will infer a value from `rater_limiter` value if it is not empty, or 16MB if `rater_limiter` is empty. +# Note that if users change the rate in `rate_limiter` after DB is opened, delayed_write_rate won't be adjusted. +# [Support Dynamically changeable] send 'config set delayed-write-rate' to a running pika can change it's value dynamically +delayed-write-rate : 0 + + +# RocksDB will try to limit number of bytes in one compaction to be lower than this max-compaction-bytes. +# But it's NOT guaranteed. +# default value is -1, means let it be 25 * target-file-size-base (Which is RocksDB's default value) +max-compaction-bytes : -1 + + +# maximum value of RocksDB cached open file descriptors +max-cache-files : 5000 + +# The ratio between the total size of RocksDB level-(L+1) files and the total size of RocksDB level-L files for all L. +# Its default value is 10(x). You can also change it to 5(x). +max-bytes-for-level-multiplier : 10 + +# slotmigrate is mainly used to migrate slots, usually we will set it to no. +# When you migrate slots, you need to set it to yes, and reload slotskeys before. +# slotmigrate [yes | no] +slotmigrate : no + +# slotmigrate thread num +slotmigrate-thread-num : 1 + +# thread-migrate-keys-num 1/8 of the write_buffer_size_ +thread-migrate-keys-num : 64 + +# BlockBasedTable block_size, default 4k +# block-size: 4096 + +# block LRU cache, default 8M, 0 to disable +# Supported Units [K|M|G], default unit [bytes] +# block-cache: 8388608 + +# num-shard-bits default -1, the number of bits from cache keys to be use as shard id. +# The cache will be sharded into 2^num_shard_bits shards. +# https://github.com/EighteenZi/rocksdb_wiki/blob/master/Block-Cache.md#lru-cache +# num-shard-bits: -1 + +# whether the block cache is shared among the RocksDB instances, default is per CF +# share-block-cache: no + +# The slot number of pika when used with codis. +default-slot-num : 1024 + +# enable-partitioned-index-filters [yes | no] +# When `cache-index-and-filter-blocks` is enabled, `pin_l0_filter_and_index_blocks_in_cache` +# and `cache-index-and-filter-blocks` is suggested to be enabled +# https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters +# enable-partitioned-index-filters: default no + +# whether or not index and filter blocks is stored in block cache +# cache-index-and-filter-blocks: no + +# pin_l0_filter_and_index_blocks_in_cache [yes | no] +# When `cache-index-and-filter-blocks` is enabled, `pin_l0_filter_and_index_blocks_in_cache` is suggested to be enabled +# pin_l0_filter_and_index_blocks_in_cache : no + +# when set to yes, bloomfilter of the last level will not be built +# optimize-filters-for-hits: no +# https://github.com/facebook/rocksdb/wiki/Leveled-Compaction#levels-target-size +# level-compaction-dynamic-level-bytes: no + +################################## RocksDB Rate Limiter ####################### +# rocksdb rate limiter +# https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html +# https://github.com/EighteenZi/rocksdb_wiki/blob/master/Rate-Limiter.md +#######################################################################E####### + +# rate limiter mode +# 0: Read 1: Write 2: ReadAndWrite +# rate-limiter-mode : default 1 + +# rate limiter bandwidth, units in bytes, default 1024GB/s (No limit) +# [Support Dynamically changeable] send 'rate-limiter-bandwidth' to a running pika can change it's value dynamically +#rate-limiter-bandwidth : 1099511627776 + +#rate-limiter-refill-period-us : 100000 +# +#rate-limiter-fairness: 10 + +# if auto_tuned is true: Enables dynamic adjustment of rate limit within the range +#`[rate-limiter-bandwidth / 20, rate-limiter-bandwidth]`, according to the recent demand for background I/O. +# rate limiter auto tune https://rocksdb.org/blog/2017/12/18/17-auto-tuned-rate-limiter.html. the default value is true. +#rate-limiter-auto-tuned : true + +################################## RocksDB Blob Configure ##################### +# rocksdb blob configure +# https://rocksdb.org/blog/2021/05/26/integrated-blob-db.html +# wiki https://github.com/facebook/rocksdb/wiki/BlobDB +#######################################################################E####### + +# enable rocksdb blob, default no +# enable-blob-files : yes + +# values at or above this threshold will be written to blob files during flush or compaction. +# Supported Units [K|M|G], default unit is in [bytes]. +# min-blob-size : 4K + +# the size limit for blob files +# Supported Units [K|M|G], default unit is in [bytes]. +# blob-file-size : 256M + +# the compression type to use for blob files. All blobs in the same file are compressed using the same algorithm. +# Supported types: [snappy, zlib, lz4, zstd]. If you do not wanna compress the SST file, please set its value as none. +# [NOTICE] The Pika official binary release just link the snappy library statically, which means that +# you should compile the Pika from the source code and then link it with other compression algorithm library statically by yourself. +# blob-compression-type : lz4 + +# set this to open to make BlobDB actively relocate valid blobs from the oldest blob files as they are encountered during compaction. +# The value option is [yes | no] +# enable-blob-garbage-collection : no + +# the cutoff that the GC logic uses to determine which blob files should be considered “old“. +# This parameter can be tuned to adjust the trade-off between write amplification and space amplification. +# blob-garbage-collection-age-cutoff : 0.25 + +# if the ratio of garbage in the oldest blob files exceeds this threshold, +# targeted compactions are scheduled in order to force garbage collecting the blob files in question +# blob_garbage_collection_force_threshold : 1.0 + +# the Cache object to use for blobs, default not open +# blob-cache : 0 + +# blob-num-shard-bits default -1, the number of bits from cache keys to be use as shard id. +# The cache will be sharded into 2^blob-num-shard-bits shards. +# blob-num-shard-bits : -1 + +# Rsync Rate limiting configuration [Default value is 200MB/s] +# [USED BY SLAVE] The transmitting speed(Rsync Rate) In full replication is controlled BY SLAVE NODE, You should modify the throttle-bytes-per-second in slave's pika.conf if you wanna change the rsync rate limit. +# [Dynamic Change Supported] send command 'config set throttle-bytes-per-second new_value' to SLAVE NODE can dynamically adjust rsync rate during full sync(use config rewrite can persist the changes). +throttle-bytes-per-second : 207200000 +# Rsync timeout in full sync stage[Default value is 1000 ms], unnecessary retries will happen if this value is too small. +# [Dynamic Change Supported] similar to throttle-bytes-per-second, rsync-timeout-ms can be dynamically changed by configset command +# [USED BY SLAVE] Similar to throttle-bytes-per-second, you should change rsync-timeout-ms's value in slave's conf file if it is needed to adjust. +rsync-timeout-ms : 1000 +# The valid range for max-rsync-parallel-num is [1, 4]. +# If an invalid value is provided, max-rsync-parallel-num will automatically be reset to 4. +max-rsync-parallel-num : 4 + +# The synchronization mode of Pika primary/secondary replication is determined by ReplicationID. ReplicationID in one replication_cluster are the same +# replication-id : + +################### +## Cache Settings +################### +# the number of caches for every db +cache-num : 16 + +# cache-model 0:cache_none 1:cache_read +cache-model : 1 +# cache-type: string, set, zset, list, hash, bit +cache-type: string, set, zset, list, hash, bit + +# Maximum number of keys in the zset redis cache +# On the disk DB, a zset field may have many fields. In the memory cache, we limit the maximum +# number of keys that can exist in a zset, which is zset-zset-cache-field-num-per-key, with a +# default value of 512. +zset-cache-field-num-per-key : 512 + +# If the number of elements in a zset in the DB exceeds zset-cache-field-num-per-key, +# we determine whether to cache the first 512[zset-cache-field-num-per-key] elements +# or the last 512[zset-cache-field-num-per-key] elements in the zset based on zset-cache-start-direction. +# +# If zset-cache-start-direction is 0, cache the first 512[zset-cache-field-num-per-key] elements from the header +# If zset-cache-start-direction is -1, cache the last 512[zset-cache-field-num-per-key] elements +zset-cache-start-direction : 0 + + +# the cache maxmemory of every db, configuration 10G +cache-maxmemory : 10737418240 + +# cache-maxmemory-policy +# 0: volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# 1: allkeys-lru -> Evict any key using approximated LRU. +# 2: volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# 3: allkeys-lfu -> Evict any key using approximated LFU. +# 4: volatile-random -> Remove a random key among the ones with an expire set. +# 5: allkeys-random -> Remove a random key, any key. +# 6: volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# 7: noeviction -> Don't evict anything, just return an error on write operations. +cache-maxmemory-policy : 1 + +# cache-maxmemory-samples +cache-maxmemory-samples: 5 + +# cache-lfu-decay-time +cache-lfu-decay-time: 1 + + +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# acl-pubsub-default defaults to 'resetchannels' permission. +# acl-pubsub-default : resetchannels + +# ACL users are defined in the following format: +# user : ... acl rules ... +# +# For example: +# +# user : worker on >password ~key* +@all + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside pika.conf to describe users. +# +# aclfile : ../conf/users.acl + +# (experimental) +# It is possible to change the name of dangerous commands in a shared environment. +# For instance the CONFIG command may be renamed into something Warning: To prevent +# data inconsistency caused by different configuration files, do not use the rename +# command to modify write commands on the primary and secondary servers. If necessary, +# ensure that the configuration files of the primary and secondary servers are consistent +# In addition, when using the command rename, you must not use "" to modify the command, +# for example, rename-command: FLUSHDB "360flushdb" is incorrect; instead, use +# rename-command: FLUSHDB 360flushdb is correct. After the rename command is executed, +# it is most appropriate to use a numeric string with uppercase or lowercase letters +# for example: rename-command : FLUSHDB joYAPNXRPmcarcR4ZDgC81TbdkSmLAzRPmcarcR +# Warning: Currently only applies to flushdb, slaveof, bgsave, shutdown, config command +# Warning: Ensure that the Settings of rename-command on the master and slave servers are consistent +# +# Example: +# rename-command : FLUSHDB 360flushdb + +# [You can ignore this item] +# This is NOT a regular conf item, it is a internal used metric that relies on pika.conf for persistent storage. +# 'internal-used-unfinished-full-sync' is used to generate a metric 'is_eligible_for_master_election' +# which serves for the scenario of codis-pika cluster reelection +# You'd better [DO NOT MODIFY IT UNLESS YOU KNOW WHAT YOU ARE DOING] +internal-used-unfinished-full-sync : + +# for wash data from 4.0.0 to 4.0.1 +# https://github.com/OpenAtomFoundation/pika/issues/2886 +# default value: true +wash-data: true + +# Pika automatic compact compact strategy, a complement to rocksdb compact. +# Trigger the compact background task periodically according to `compact-interval` +# Can choose `full-compact` or `obd-compact`. +# obd-compact https://github.com/OpenAtomFoundation/pika/issues/2255 +compaction-strategy : obd-compact + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +compact-every-num-of-files : 10 + +# For OBD_Compact +# In another search, if the file creation time is +# greater than `force-compact-file-age-seconds`, +# a compaction of the upper and lower boundaries +# of the file will be performed at the same time +# `compact-every-num-of-files` -1 +force-compact-file-age-seconds : 300 + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +force-compact-min-delete-ratio : 10 + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +dont-compact-sst-created-in-seconds : 20 + +# For OBD_Compact +# According to the number of sst files in rocksdb, +# compact every `compact-every-num-of-files` file. +best-delete-min-ratio : 10 \ No newline at end of file diff --git a/tools/pika_migrate/include/acl.h b/tools/pika_migrate/include/acl.h new file mode 100644 index 0000000000..77bd5ba8a3 --- /dev/null +++ b/tools/pika_migrate/include/acl.h @@ -0,0 +1,435 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_ACL_H +#define PIKA_ACL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pika_command.h" +#include "pstd_status.h" + +static const int USER_COMMAND_BITS_COUNT = 1024; + +enum class AclSelectorFlag { + ROOT = (1 << 0), // This is the root user permission selector + ALL_KEYS = (1 << 1), // The user can mention any key + ALL_COMMANDS = (1 << 2), // The user can run all commands + ALL_CHANNELS = (1 << 3), // The user can mention any Pub/Sub channel +}; + +enum class AclCategory { + KEYSPACE = (1ULL << 0), + READ = (1ULL << 1), + WRITE = (1ULL << 2), + SET = (1ULL << 3), + SORTEDSET = (1ULL << 4), + LIST = (1ULL << 5), + HASH = (1ULL << 6), + STRING = (1ULL << 7), + BITMAP = (1ULL << 8), + HYPERLOGLOG = (1ULL << 9), + GEO = (1ULL << 10), + STREAM = (1ULL << 11), + PUBSUB = (1ULL << 12), + ADMIN = (1ULL << 13), + FAST = (1ULL << 14), + SLOW = (1ULL << 15), + BLOCKING = (1ULL << 16), + DANGEROUS = (1ULL << 17), + CONNECTION = (1ULL << 18), + TRANSACTION = (1ULL << 19), + SCRIPTING = (1ULL << 20), +}; + +enum class AclUserFlag { + ENABLED = (1 << 0), // The user is active + DISABLED = (1 << 1), // The user is disabled + NO_PASS = (1 << 2), /* The user requires no password, any provided password will work. For the + default user, this also means that no AUTH is needed, and every + connection is immediately authenticated. */ +}; + +enum class AclDeniedCmd { OK, CMD, KEY, CHANNEL, NUMBER, NO_SUB_CMD, NO_AUTH }; + +enum class AclLogCtx { + TOPLEVEL, + MULTI, + LUA, +}; + +// ACL key permission types +enum class AclPermission { + READ = (1 << 0), + WRITE = (1 << 1), + ALL = (READ | WRITE), +}; + +struct AclKeyPattern { + void ToString(std::string* str) { + if (flags & static_cast(AclPermission::ALL)) { + str->append("~"); + } else if (flags & static_cast(AclPermission::WRITE)) { + str->append("%W~"); + } else if (flags & static_cast(AclPermission::READ)) { + str->append("%R~"); + } + str->append(pattern); + } + + uint32_t flags; /* The CMD_KEYS_* flags for this key pattern */ + std::string pattern; /* The pattern to match keys against */ +}; + +class ACLLogEntry { + public: + ACLLogEntry() = delete; + ACLLogEntry(int32_t reason, int32_t context, const std::string& object, const std::string& username, int64_t ctime, + const std::string& cinfo) + : count_(1), + reason_(reason), + context_(context), + object_(object), + username_(username), + ctime_(ctime), + cinfo_(cinfo) {} + + bool Match(int32_t reason, int32_t context, int64_t ctime, const std::string& object, const std::string& username); + + void AddEntry(const std::string& cinfo, u_int64_t ctime); + + void GetReplyInfo(std::vector* vector); + + private: + uint64_t count_; + int32_t reason_; + int32_t context_; + std::string object_; + std::string username_; + int64_t ctime_; + std::string cinfo_; +}; + +class User; +class Acl; + +class AclSelector { + friend User; + + public: + explicit AclSelector() : AclSelector(0) {}; + explicit AclSelector(uint32_t flag); + explicit AclSelector(const AclSelector& selector); + ~AclSelector() = default; + + inline uint32_t Flags() const { return flags_; }; + inline bool HasFlags(uint32_t flag) const { return flags_ & flag; }; + inline void AddFlags(uint32_t flag) { flags_ |= flag; }; + inline void DecFlags(uint32_t flag) { flags_ &= ~flag; }; + bool EqualChannel(const std::vector& allChannel); + + private: + pstd::Status SetSelector(const std::string& op); + + pstd::Status SetSelectorFromOpSet(const std::string& opSet); + + void ACLDescribeSelector(std::string* str); + + void ACLDescribeSelector(std::vector& vector); + + AclDeniedCmd CheckCanExecCmd(std::shared_ptr& cmd, int8_t subCmdIndex, const std::vector& keys, + std::string* errKey); + + bool SetSelectorCommandBitsForCategory(const std::string& categoryName, bool allow); + void SetAllCommandSelector(); + void RestAllCommandSelector(); + + void InsertKeyPattern(const std::string& str, uint32_t flags); + + void InsertChannel(const std::string& str); + + void ChangeSelector(const Cmd* cmd, bool allow); + void ChangeSelector(const std::shared_ptr& cmd, bool allow); + pstd::Status ChangeSelector(const std::shared_ptr& cmd, const std::string& subCmd, bool allow); + + void SetSubCommand(uint32_t cmdId); + void SetSubCommand(uint32_t cmdId, uint32_t subCmdIndex); + void ResetSubCommand(); + void ResetSubCommand(uint32_t cmdId); + void ResetSubCommand(uint32_t cmdId, uint32_t subCmdIndex); + + bool CheckSubCommand(uint32_t cmdId, uint32_t subCmdIndex); + + void DescribeSelectorCommandRules(std::string* str); + + // process acl command op, and sub command + pstd::Status SetCommandOp(const std::string& op, bool allow); + + // when modify command, do update Selector commandRule string + void UpdateCommonRule(const std::string& rule, bool allow); + + // remove rule string from Selector commandRule + void RemoveCommonRule(const std::string& rule); + + // clean commandRule + void CleanCommandRule(); + + bool CheckKey(const std::string& key, const uint32_t cmdFlag); + + bool CheckChannel(const std::string& key, bool isPattern); + + uint32_t flags_; // See SELECTOR_FLAG_* + + /* The bit in allowed_commands is set if this user has the right to + * execute this command.*/ + std::bitset allowedCommands_; + + // record subcommands,key is commandId,value subCommand bit index + std::map subCommand_; + + /* A list of allowed key patterns. If this field is empty the user cannot mention any key in a command, + * unless the flag ALLKEYS is set in the user. */ + std::list> patterns_; + + /* A list of allowed Pub/Sub channel patterns. If this field is empty the user cannot mention any + * channel in a `PUBLISH` or [P][UNSUBSCRIBE] command, unless the flag ALLCHANNELS is set in the user. */ + std::list channels_; + + /* A string representation of the ordered categories and commands, this + * is used to regenerate the original ACL string for display. + */ + std::string commandRules_; +}; + +// acl user +class User { + friend Acl; + + public: + User() = delete; + explicit User(std::string name); + explicit User(const User& user); + ~User() = default; + + std::string Name() const; + // inline uint32_t Flags() const { return flags_; }; + inline bool HasFlags(uint32_t flag) const { return flags_ & flag; }; + inline void AddFlags(uint32_t flag) { flags_ |= flag; }; + inline void DecFlags(uint32_t flag) { flags_ &= ~flag; }; + + void CleanAclString(); + + /** + * store a password + * A lock is required before the call + * @param password + */ + void AddPassword(const std::string& password); + + /** + * delete a stored password + * A lock is required before the call + * @param password + */ + void RemovePassword(const std::string& password); + + // clean the user password + // A lock is required before the call + void CleanPassword(); + + // Add a selector to the user + // A lock is required before the call + void AddSelector(const std::shared_ptr& selector); + + // Set rule for user based on given parameters + // Use this function to handle it because it allows locking specified users + pstd::Status SetUser(const std::vector& rules); + + // Set the user rule with the given string + // A lock is required before the call + pstd::Status SetUser(const std::string& op); + + pstd::Status CreateSelectorFromOpSet(const std::string& opSet); + + // Get the user default selector + // A lock is required before the call + std::shared_ptr GetRootSelector(); + + void DescribeUser(std::string* str); + + // match the user password, when do auth, + // if match,return true, else return false + bool MatchPassword(const std::string& password); + + // handle Cmd Acl|get + void GetUserDescribe(CmdRes* res); + + // Get the user Channel key + // A lock is required before the call + std::vector AllChannelKey(); + + // check the user can exec the cmd + AclDeniedCmd CheckUserPermission(std::shared_ptr& cmd, const PikaCmdArgsType& argv, int8_t& subCmdIndex, + std::string* errKey); + + private: + mutable std::shared_mutex mutex_; + + const std::string name_; // The username + + std::atomic flags_ = static_cast(AclUserFlag::DISABLED); // See USER_FLAG_* + + std::set passwords_; // passwords for this user + + std::list> selectors_; /* A set of selectors this user validates commands + against. This list will always contain at least + one selector for backwards compatibility. */ + + std::string aclString_; /* cached string represent of ACLs */ +}; + +class Acl { + friend User; + friend AclSelector; + + public: + explicit Acl() = default; + ~Acl() = default; + + /** + * Initialization all acl + * @return + */ + pstd::Status Initialization(); + + /** + * create acl default user + * @return + */ + std::shared_ptr CreateDefaultUser(); + + std::shared_ptr CreatedUser(const std::string& name); + + /** + * Set user properties according to the string "op". + * @param op acl rule string + */ + pstd::Status SetUser(const std::string& userName, std::vector& op); + + /** + * get user from users_ map + * @param userName + * @return + */ + std::shared_ptr GetUser(const std::string& userName); + + std::shared_ptr GetUserLock(const std::string& userName); + + /** + * store a user to users_ map + * @param user + */ + void AddUser(const std::shared_ptr& user); + + void AddUserLock(const std::shared_ptr& user); + + // bo user auth, pass not is sha256 + std::shared_ptr Auth(const std::string& userName, const std::string& password); + + // get all user + std::vector Users(); + + void DescribeAllUser(std::vector* content); + + // save acl rule to file + pstd::Status SaveToFile(); + + // delete a user from users + std::set DeleteUser(const std::vector& userNames); + + // reload User from acl file, whe exec acl|load command + pstd::Status LoadUserFromFile(std::set* toUnAuthUsers); + + void UpdateDefaultUserPassword(const std::string& pass); + + void InitLimitUser(const std::string& bl, bool limit_exist); + + // After the user channel is modified, determine whether the current channel needs to be disconnected + void KillPubsubClientsIfNeeded(const std::shared_ptr& origin, const std::shared_ptr& newUser); + + // check the user can be exec the command, after exec command + // bool CheckUserCanExec(const std::shared_ptr& cmd, const PikaCmdArgsType& argv); + + // Gets the value of the classification based on the cmd classification name + static uint32_t GetCommandCategoryFlagByName(const std::string& name); + + // Obtain the corresponding name based on category + static std::string GetCommandCategoryFlagByName(const uint32_t category); + + static std::vector GetAllCategoryName(); + + static const std::string DefaultUser; + static const std::string DefaultLimitUser; + static const int64_t LogGroupingMaxTimeDelta; + + // Adds a new entry in the ACL log, making sure to delete the old entry + // if we reach the maximum length allowed for the log. + void AddLogEntry(int32_t reason, int32_t context, const std::string& username, const std::string& object, + const std::string& cInfo); + + void GetLog(long count, CmdRes* res); + void ResetLog(); + + private: + /** + * This function is called once the server is already running,we are ready to start, + * in order to load the ACLs either from the pending list of users defined in redis.conf, + * or from the ACL file.The function will just exit with an error if the user is trying to mix + * both the loading methods. + */ + pstd::Status LoadUsersAtStartup(); + + /** + * Loads the ACL from the specified filename: every line + * is validated and should be either empty or in the format used to specify + * users in the pika.conf configuration or in the ACL file, that is: + * + * user ... rules ... + * + * @param users pika.conf users rule + */ + pstd::Status LoadUserConfigured(std::vector& users); + + /** + * Load ACL from acl rule file + * @param fileName file full name + */ + pstd::Status LoadUserFromFile(const std::string& fileName); + + void ACLMergeSelectorArguments(std::vector& argv, std::vector* merged); + mutable std::shared_mutex mutex_; + + static std::array, 21> CommandCategories; + + static std::array, 3> UserFlags; + + static std::array, 3> SelectorFlags; + + std::map> users_; + + std::list> logEntries_; +}; + +#endif // PIKA_ACL_H diff --git a/tools/pika_migrate/include/build_version.h b/tools/pika_migrate/include/build_version.h new file mode 100644 index 0000000000..52e583c3a3 --- /dev/null +++ b/tools/pika_migrate/include/build_version.h @@ -0,0 +1,15 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef INCLUDE_BUILD_VERSION_H_ +#define INCLUDE_BUILD_VERSION_H_ + +// this variable tells us about the git revision +extern const char* pika_build_git_sha; + +// Date on which the code was compiled: +extern const char* pika_build_compile_date; + +#endif // INCLUDE_BUILD_VERSION_H_ diff --git a/tools/pika_migrate/include/pika_acl.h b/tools/pika_migrate/include/pika_acl.h new file mode 100644 index 0000000000..8d830581f8 --- /dev/null +++ b/tools/pika_migrate/include/pika_acl.h @@ -0,0 +1,48 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// pika ACL command +#ifndef PIKA_ACL_CMD_H +#define PIKA_ACL_CMD_H + +#include "include/pika_command.h" +#include "include/pika_server.h" + +extern PikaServer* g_pika_server; + +class PikaAclCmd : public Cmd { + public: + PikaAclCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) { + subCmdName_ = {"cat", "deluser", "dryrun", "genpass", "getuser", "list", "load", + "log", "save", "setuser", "users", "whoami", "help"}; + } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PikaAclCmd(*this); } + + private: + void DoInitial() override; + void Clear() override {} + + void Cat(); + void DelUser(); + void DryRun(); + void GenPass(); + void GetUser(); + void List(); + void Load(); + void Log(); + void Save(); + void SetUser(); + void Users(); + void WhoAmI(); + void Help(); + + std::string subCmd_; +}; + +#endif // PIKA_ACL_CMD_H diff --git a/tools/pika_migrate/include/pika_admin.h b/tools/pika_migrate/include/pika_admin.h new file mode 100644 index 0000000000..1b1aa1bad3 --- /dev/null +++ b/tools/pika_migrate/include/pika_admin.h @@ -0,0 +1,750 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_ADMIN_H_ +#define PIKA_ADMIN_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "include/acl.h" +#include "include/pika_command.h" +#include "storage/storage.h" +#include "pika_db.h" + +/* + * Admin + */ +class SlaveofCmd : public Cmd { + public: + SlaveofCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlaveofCmd(*this); } + + private: + std::string master_ip_; + int64_t master_port_ = -1; + bool is_none_ = false; + void DoInitial() override; + void Clear() override { + is_none_ = false; + master_ip_.clear(); + master_port_ = 0; + } +}; + +class DbSlaveofCmd : public Cmd { + public: + DbSlaveofCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DbSlaveofCmd(*this); } + + private: + std::string db_name_; + bool force_sync_ = false; + bool is_none_ = false; + bool have_offset_ = false; + int64_t filenum_ = 0; + int64_t offset_ = 0; + void DoInitial() override; + void Clear() override { + db_name_.clear(); + force_sync_ = false; + is_none_ = false; + have_offset_ = false; + } +}; + +class AuthCmd : public Cmd { + public: + AuthCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new AuthCmd(*this); } + + private: + void DoInitial() override; +}; + +class BgsaveCmd : public Cmd { + public: + BgsaveCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BgsaveCmd(*this); } + + private: + void DoInitial() override; + void Clear() override { bgsave_dbs_.clear(); } + std::set bgsave_dbs_; +}; + +class CompactCmd : public Cmd { + public: + CompactCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new CompactCmd(*this); } + + private: + void DoInitial() override; + void Clear() override { + compact_dbs_.clear(); + } + std::set compact_dbs_; +}; + +// we can use pika/tests/helpers/test_queue.py to test this command +class CompactRangeCmd : public Cmd { + public: + CompactRangeCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new CompactRangeCmd(*this); } + + private: + void DoInitial() override; + void Clear() override { + compact_dbs_.clear(); + start_key_.clear(); + end_key_.clear(); + } + std::set compact_dbs_; + std::string start_key_; + std::string end_key_; +}; + +class PurgelogstoCmd : public Cmd { + public: + PurgelogstoCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PurgelogstoCmd(*this); } + + private: + uint32_t num_ = 0; + std::string db_; + void DoInitial() override; +}; + +class PingCmd : public Cmd { + public: + PingCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PingCmd(*this); } + + private: + void DoInitial() override; +}; + +class SelectCmd : public Cmd { + public: + SelectCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SelectCmd(*this); } + + private: + void DoInitial() override; + void Clear() override { db_name_.clear(); } + std::string db_name_; +}; + +class FlushallCmd : public Cmd { + public: + FlushallCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + void Do() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new FlushallCmd(*this); } + bool FlushAllWithoutLock(); + void DoBinlog() override; + void DoBinlogByDB(const std::shared_ptr& sync_db); + + private: + void DoInitial() override; + bool DoWithoutLock(std::shared_ptr db); + void DoFlushCache(std::shared_ptr db); + void Clear() override { flushall_succeed_ = false; } + std::string ToRedisProtocol() override; + + bool flushall_succeed_{false}; +}; + +class FlushdbCmd : public Cmd { + public: + FlushdbCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + // The flush command belongs to the write categories, so the key cannot be empty + std::vector current_key() const override { return {""}; } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new FlushdbCmd(*this); } + std::string GetFlushDBname() { return db_name_; } + void DoBinlog() override; + bool DoWithoutLock(); + + private: + void DoInitial() override; + void Clear() override { + db_name_.clear(); + flush_succeed_ = false; + } + + bool flush_succeed_{false}; + std::string db_name_; +}; + +class ClientCmd : public Cmd { + public: + ClientCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) { + subCmdName_ = {"getname", "setname", "list", "addr", "kill"}; + } + void Do() override; + const static std::string CLIENT_LIST_S; + const static std::string CLIENT_KILL_S; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ClientCmd(*this); } + + private: + const static std::string KILLTYPE_NORMAL; + const static std::string KILLTYPE_PUBSUB; + + std::string operation_, info_, kill_type_; + void DoInitial() override; +}; + +class InfoCmd : public Cmd { + public: + enum InfoSection { + kInfoErr = 0x0, + kInfoServer, + kInfoClients, + kInfoStats, + kInfoExecCount, + kInfoCPU, + kInfoReplication, + kInfoKeyspace, + kInfoLog, + kInfoData, + kInfoRocksDB, + kInfo, + kInfoAll, + kInfoDebug, + kInfoCommandStats, + kInfoCache + }; + InfoCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new InfoCmd(*this); } + void Execute() override; + + private: + InfoSection info_section_; + bool rescan_ = false; // whether to rescan the keyspace + bool off_ = false; + std::set keyspace_scan_dbs_; + const static std::string kInfoSection; + const static std::string kAllSection; + const static std::string kServerSection; + const static std::string kClientsSection; + const static std::string kStatsSection; + const static std::string kExecCountSection; + const static std::string kCPUSection; + const static std::string kReplicationSection; + const static std::string kKeyspaceSection; + const static std::string kDataSection; + const static std::string kRocksDBSection; + const static std::string kDebugSection; + const static std::string kCommandStatsSection; + const static std::string kCacheSection; + + void DoInitial() override; + void Clear() override { + rescan_ = false; + off_ = false; + keyspace_scan_dbs_.clear(); + } + + void InfoServer(std::string& info); + void InfoClients(std::string& info); + void InfoStats(std::string& info); + void InfoExecCount(std::string& info); + void InfoCPU(std::string& info); + void InfoReplication(std::string& info); + void InfoKeyspace(std::string& info); + void InfoData(std::string& info); + void InfoRocksDB(std::string& info); + void InfoDebug(std::string& info); + void InfoCommandStats(std::string& info); + void InfoCache(std::string& info, std::shared_ptr db); + + std::string CacheStatusToString(int status); +}; + +class ShutdownCmd : public Cmd { + public: + ShutdownCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ShutdownCmd(*this); } + + private: + void DoInitial() override; +}; + +class ConfigCmd : public Cmd { + public: + ConfigCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) { + subCmdName_ = {"get", "set", "rewrite", "resetstat"}; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ConfigCmd(*this); } + void Execute() override; + + private: + std::vector config_args_v_; + void DoInitial() override; + void ConfigGet(std::string& ret); + void ConfigSet(std::shared_ptr db); + void ConfigRewrite(std::string& ret); + void ConfigResetstat(std::string& ret); + void ConfigRewriteReplicationID(std::string& ret); +}; + +class MonitorCmd : public Cmd { + public: + MonitorCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new MonitorCmd(*this); } + + private: + void DoInitial() override; +}; + +class DbsizeCmd : public Cmd { + public: + DbsizeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DbsizeCmd(*this); } + + private: + void DoInitial() override; +}; + +class TimeCmd : public Cmd { + public: + TimeCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new TimeCmd(*this); } + + private: + void DoInitial() override; +}; + +class LastsaveCmd : public Cmd { + public: + LastsaveCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new LastsaveCmd(*this); } + + private: + void DoInitial() override; +}; + +class DelbackupCmd : public Cmd { + public: + DelbackupCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DelbackupCmd(*this); } + + private: + void DoInitial() override; +}; + +class EchoCmd : public Cmd { + public: + EchoCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Merge() override{}; + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + Cmd* Clone() override { return new EchoCmd(*this); } + + private: + std::string body_; + void DoInitial() override; +}; + +class ScandbCmd : public Cmd { + public: + ScandbCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ScandbCmd(*this); } + + private: + storage::DataType type_ = storage::DataType::kAll; + void DoInitial() override; + void Clear() override { type_ = storage::DataType::kAll; } +}; + +class SlowlogCmd : public Cmd { + public: + enum SlowlogCondition { kGET, kLEN, kRESET }; + SlowlogCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlowlogCmd(*this); } + + private: + int64_t number_ = 10; + SlowlogCmd::SlowlogCondition condition_ = kGET; + void DoInitial() override; + void Clear() override { + number_ = 10; + condition_ = kGET; + } +}; + +class PaddingCmd : public Cmd { + public: + PaddingCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PaddingCmd(*this); } + + private: + void DoInitial() override; + std::string ToRedisProtocol() override; +}; + +class PKPatternMatchDelCmd : public Cmd { + public: + PKPatternMatchDelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKPatternMatchDelCmd(*this); } + void DoBinlog() override; + + private: + storage::DataType type_; + std::vector remove_keys_; + std::string pattern_; + int64_t max_count_; + void DoInitial() override; +}; + +class DummyCmd : public Cmd { + public: + DummyCmd() : Cmd("", 0, 0) {} + DummyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DummyCmd(*this); } + + private: + void DoInitial() override; +}; + +class QuitCmd : public Cmd { + public: + QuitCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new QuitCmd(*this); } + + private: + void DoInitial() override; +}; + +class HelloCmd : public Cmd { + public: + HelloCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HelloCmd(*this); } + + private: + void DoInitial() override; +}; + +class DiskRecoveryCmd : public Cmd { + public: + DiskRecoveryCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DiskRecoveryCmd(*this); } + + private: + void DoInitial() override; + std::map background_errors_; +}; + +class ClearReplicationIDCmd : public Cmd { + public: + ClearReplicationIDCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::ADMIN)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ClearReplicationIDCmd(*this); } + + private: + void DoInitial() override; +}; + +class DisableWalCmd : public Cmd { + public: + DisableWalCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DisableWalCmd(*this); } + + private: + void DoInitial() override; +}; + +class CacheCmd : public Cmd { + public: + enum CacheCondition {kCLEAR_DB, kCLEAR_HITRATIO, kDEL_KEYS, kRANDOM_KEY}; + CacheCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new CacheCmd(*this); } + + private: + CacheCondition condition_; + std::vector keys_; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { + keys_.clear(); + } +}; + +class ClearCacheCmd : public Cmd { + public: + ClearCacheCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ClearCacheCmd(*this); } + + private: + void DoInitial() override; +}; + +#ifdef WITH_COMMAND_DOCS +class CommandCmd : public Cmd { + public: + CommandCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new CommandCmd(*this); } + + class CommandFieldCompare { + public: + CommandFieldCompare() = default; + bool operator()(const std::string&, const std::string&) const; + + private: + const static std::unordered_map kFieldNameOrder; + }; + + class Encodable; + using EncodablePtr = std::shared_ptr; + + class Encodable { + public: + friend CmdRes& operator<<(CmdRes& res, const Encodable& e) { return e.EncodeTo(res); } + EncodablePtr operator+(const EncodablePtr& other) { return MergeFrom(other); } + + protected: + virtual CmdRes& EncodeTo(CmdRes&) const = 0; + virtual EncodablePtr MergeFrom(const EncodablePtr& other) const = 0; + }; + + class EncodableInt : public Encodable { + public: + EncodableInt(int value) : value_(value) {} + EncodableInt(unsigned long long value) : value_(value) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + int value_; + }; + + class EncodableString : public Encodable { + public: + EncodableString(std::string value) : value_(std::move(value)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::string value_; + }; + + class EncodableMap : public Encodable { + public: + using RedisMap = std::map; + EncodableMap(RedisMap values) : values_(std::move(values)) {} + template + static CmdRes& EncodeTo(CmdRes& res, const Map& map, const Map& specialization = Map()); + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + RedisMap values_; + + const static std::string kPrefix; + }; + + class EncodableSet : public Encodable { + public: + EncodableSet(std::vector values) : values_(std::move(values)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::vector values_; + + const static std::string kPrefix; + }; + + class EncodableArray : public Encodable { + public: + EncodableArray(std::vector values) : values_(std::move(values)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::vector values_; + }; + + class EncodableStatus : public Encodable { + public: + EncodableStatus(std::string value) : value_(std::move(value)) {} + + protected: + CmdRes& EncodeTo(CmdRes& res) const override; + EncodablePtr MergeFrom(const EncodablePtr& other) const override; + + private: + std::string value_; + + const static std::string kPrefix; + }; + + private: + void DoInitial() override; + + std::string command_; + std::vector::const_iterator cmds_begin_, cmds_end_; + + const static std::string kPikaField; + const static EncodablePtr kNotSupportedLiteral; + const static EncodablePtr kCompatibleLiteral; + const static EncodablePtr kBitSpecLiteral; + const static EncodablePtr kHyperLogLiteral; + const static EncodablePtr kPubSubLiteral; + + const static EncodablePtr kNotSupportedSpecialization; + const static EncodablePtr kCompatibleSpecialization; + const static EncodablePtr kBitSpecialization; + const static EncodablePtr kHyperLogSpecialization; + const static EncodablePtr kPubSubSpecialization; + + const static std::unordered_map kPikaSpecialization; + const static std::unordered_map kCommandDocs; +}; + +static CommandCmd::EncodablePtr operator""_RedisInt(unsigned long long value); +static CommandCmd::EncodablePtr operator""_RedisString(const char* value); +static CommandCmd::EncodablePtr operator""_RedisStatus(const char* value); +static CommandCmd::EncodablePtr RedisMap(CommandCmd::EncodableMap::RedisMap values); +static CommandCmd::EncodablePtr RedisSet(std::vector values); +static CommandCmd::EncodablePtr RedisArray(std::vector values); + +#endif // WITH_COMMAND_DOCS + +#endif // PIKA_ADMIN_H_ diff --git a/tools/pika_migrate/include/pika_auxiliary_thread.h b/tools/pika_migrate/include/pika_auxiliary_thread.h new file mode 100644 index 0000000000..ab0fa6aea2 --- /dev/null +++ b/tools/pika_migrate/include/pika_auxiliary_thread.h @@ -0,0 +1,24 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_AUXILIARY_THREAD_H_ +#define PIKA_AUXILIARY_THREAD_H_ + +#include "net/include/net_thread.h" + +#include "pstd/include/pstd_mutex.h" + +class PikaAuxiliaryThread : public net::Thread { + public: + PikaAuxiliaryThread() { set_thread_name("AuxiliaryThread"); } + ~PikaAuxiliaryThread() override; + pstd::Mutex mu_; + pstd::CondVar cv_; + + private: + void* ThreadMain() override; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_binlog.h b/tools/pika_migrate/include/pika_binlog.h new file mode 100644 index 0000000000..851de88746 --- /dev/null +++ b/tools/pika_migrate/include/pika_binlog.h @@ -0,0 +1,113 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_BINLOG_H_ +#define PIKA_BINLOG_H_ + +#include + +#include "pstd/include/env.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/noncopyable.h" +#include "include/pika_define.h" + +std::string NewFileName(const std::string& name, uint32_t current); + +class Version final : public pstd::noncopyable { + public: + Version(const std::shared_ptr& save); + ~Version(); + + pstd::Status Init(); + + // RWLock should be held when access members. + pstd::Status StableSave(); + + uint32_t pro_num_ = 0; + uint64_t pro_offset_ = 0; + uint64_t logic_id_ = 0; + uint32_t term_ = 0; + + std::shared_mutex rwlock_; + + void debug() { + std::shared_lock l(rwlock_); + printf("Current pro_num %u pro_offset %llu\n", pro_num_, pro_offset_); + } + + private: + // shared with versionfile_ + std::shared_ptr save_; +}; + +class Binlog : public pstd::noncopyable { + public: + Binlog(std::string Binlog_path, int file_size = 100 * 1024 * 1024); + ~Binlog(); + + void Lock() { mutex_.lock(); } + void Unlock() { mutex_.unlock(); } + + pstd::Status Put(const std::string& item); + pstd::Status IsOpened(); + pstd::Status GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint32_t* term = nullptr, uint64_t* logic_id = nullptr); + /* + * Set Producer pro_num and pro_offset with lock + */ + pstd::Status SetProducerStatus(uint32_t pro_num, uint64_t pro_offset, uint32_t term = 0, uint64_t index = 0); + // Need to hold Lock(); + pstd::Status Truncate(uint32_t pro_num, uint64_t pro_offset, uint64_t index); + + std::string filename() { return filename_; } + + // need to hold mutex_ + void SetTerm(uint32_t term) { + std::lock_guard l(version_->rwlock_); + version_->term_ = term; + version_->StableSave(); + } + + uint32_t term() { + std::shared_lock l(version_->rwlock_); + return version_->term_; + } + + void Close(); + + private: + pstd::Status Put(const char* item, int len); + pstd::Status EmitPhysicalRecord(RecordType t, const char* ptr, size_t n, int* temp_pro_offset); + static pstd::Status AppendPadding(pstd::WritableFile* file, uint64_t* len); + void InitLogFile(); + + /* + * Produce + */ + pstd::Status Produce(const pstd::Slice& item, int* pro_offset); + + std::atomic opened_; + + std::unique_ptr version_; + std::unique_ptr queue_; + // versionfile_ can only be used as a shared_ptr, and it will be used as a variable version_ in the ~Version() function. + std::shared_ptr versionfile_; + + pstd::Mutex mutex_; + + uint32_t pro_num_ = 0; + + int block_offset_ = 0; + + const std::string binlog_path_; + + uint64_t file_size_ = 0; + + std::string filename_; + + std::atomic binlog_io_error_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_binlog_reader.h b/tools/pika_migrate/include/pika_binlog_reader.h new file mode 100644 index 0000000000..1d604b02f7 --- /dev/null +++ b/tools/pika_migrate/include/pika_binlog_reader.h @@ -0,0 +1,48 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_BINLOG_READER_H_ +#define PIKA_BINLOG_READER_H_ + +#include +#include +#include + +#include "pstd/include/env.h" +#include "pstd/include/pstd_slice.h" +#include "pstd/include/pstd_status.h" + +#include "include/pika_binlog.h" + +class PikaBinlogReader { + public: + PikaBinlogReader(uint32_t cur_filenum, uint64_t cur_offset); + PikaBinlogReader(); + ~PikaBinlogReader() = default; + + pstd::Status Get(std::string* scratch, uint32_t* filenum, uint64_t* offset); + int Seek(const std::shared_ptr& logger, uint32_t filenum, uint64_t offset); + bool ReadToTheEnd(); + void GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset); + + private: + bool GetNext(uint64_t* size); + unsigned int ReadPhysicalRecord(pstd::Slice* result, uint32_t* filenum, uint64_t* offset); + // Returns scratch binflog and corresponding offset + pstd::Status Consume(std::string* scratch, uint32_t* filenum, uint64_t* offset); + + std::shared_mutex rwlock_; + uint32_t cur_filenum_ = 0; + uint64_t cur_offset_ = 0; + uint64_t last_record_offset_ = 0; + + std::shared_ptr logger_; + std::unique_ptr queue_; + + std::unique_ptr const backing_store_; + pstd::Slice buffer_; +}; + +#endif // PIKA_BINLOG_READER_H_ diff --git a/tools/pika_migrate/include/pika_binlog_transverter.h b/tools/pika_migrate/include/pika_binlog_transverter.h new file mode 100644 index 0000000000..d85d958667 --- /dev/null +++ b/tools/pika_migrate/include/pika_binlog_transverter.h @@ -0,0 +1,77 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_BINLOG_TRANSVERTER_H_ +#define PIKA_BINLOG_TRANSVERTER_H_ + +#include +#include +#include +#include + +/******************* Type First Binlog Item Format ****************** + * +-----------------------------------------------------------------+ + * | Type (2 bytes) | Create Time (4 bytes) | Term Id (4 bytes) | + * |-----------------------------------------------------------------| + * | Logic Id (8 bytes) | File Num (4 bytes) | Offset (8 bytes) | + * |-----------------------------------------------------------------| + * | Content Length (4 bytes) | Content (content length bytes) | + * +-----------------------------------------------------------------+ + */ +#define BINLOG_ENCODE_LEN 34 + +enum BinlogType { + TypeFirst = 1, +}; + +const int BINLOG_ITEM_HEADER_SIZE = 34; +const int PADDING_BINLOG_PROTOCOL_SIZE = 22; +const int SPACE_STROE_PARAMETER_LENGTH = 5; + +class BinlogItem { + public: + BinlogItem() = default; + + friend class PikaBinlogTransverter; + + uint32_t exec_time() const; + uint32_t term_id() const; + uint64_t logic_id() const; + uint32_t filenum() const; + uint64_t offset() const; + std::string content() const; + std::string ToString() const; + + void set_exec_time(uint32_t exec_time); + void set_term_id(uint32_t term_id); + void set_logic_id(uint64_t logic_id); + void set_filenum(uint32_t filenum); + void set_offset(uint64_t offset); + + private: + uint32_t exec_time_ = 0; + uint32_t term_id_ = 0; + uint64_t logic_id_ = 0; + uint32_t filenum_ = 0; + uint64_t offset_ = 0; + std::string content_; + std::vector extends_; +}; + +class PikaBinlogTransverter { + public: + PikaBinlogTransverter()= default;; + static std::string BinlogEncode(BinlogType type, uint32_t exec_time, uint32_t term_id, uint64_t logic_id, + uint32_t filenum, uint64_t offset, const std::string& content, + const std::vector& extends); + + static bool BinlogDecode(BinlogType type, const std::string& binlog, BinlogItem* binlog_item); + + static std::string ConstructPaddingBinlog(BinlogType type, uint32_t size); + + static bool BinlogItemWithoutContentDecode(BinlogType type, const std::string& binlog, BinlogItem* binlog_item); +}; + +#endif diff --git a/tools/pika_migrate/include/pika_bit.h b/tools/pika_migrate/include/pika_bit.h new file mode 100644 index 0000000000..94e7767b16 --- /dev/null +++ b/tools/pika_migrate/include/pika_bit.h @@ -0,0 +1,182 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_BIT_H_ +#define PIKA_BIT_H_ + +#include "storage/storage.h" + +#include "include/acl.h" +#include "include/pika_command.h" +#include "include/pika_kv.h" + +/* + * bitoperation + */ +class BitGetCmd : public Cmd { + public: + BitGetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitGetCmd(*this); } + + private: + std::string key_; + int64_t bit_offset_ = -1; + rocksdb::Status s_; + void Clear() override { + key_ = ""; + bit_offset_ = -1; + } + void DoInitial() override; +}; + +class BitSetCmd : public Cmd { + public: + BitSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitSetCmd(*this); } + + private: + std::string key_; + int64_t bit_offset_; + int64_t on_; + rocksdb::Status s_; + void Clear() override { + key_ = ""; + bit_offset_ = -1; + on_ = -1; + } + void DoInitial() override; +}; + +class BitCountCmd : public Cmd { + public: + BitCountCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitCountCmd(*this); } + + private: + std::string key_; + bool count_all_; + int64_t start_offset_; + int64_t end_offset_; + rocksdb::Status s_; + void Clear() override { + key_ = ""; + count_all_ = false; + start_offset_ = -1; + end_offset_ = -1; + } + void DoInitial() override; +}; + +class BitPosCmd : public Cmd { + public: + BitPosCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new BitPosCmd(*this); } + + private: + std::string key_; + bool pos_all_; + bool endoffset_set_; + int64_t bit_val_; + int64_t start_offset_; + int64_t end_offset_; + rocksdb::Status s_; + void Clear() override { + key_ = ""; + pos_all_ = false; + endoffset_set_ = false; + bit_val_ = -1; + start_offset_ = -1; + end_offset_ = -1; + } + void DoInitial() override; +}; + +class BitOpCmd : public Cmd { + public: + BitOpCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::BITMAP)) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + }; + BitOpCmd(const BitOpCmd& other) + : Cmd(other), + dest_key_(other.dest_key_), + src_keys_(other.src_keys_), + op_(other.op_), + value_to_dest_(other.value_to_dest_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + + std::vector current_key() const override { return {dest_key_}; } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new BitOpCmd(*this); } + void DoBinlog() override; + + private: + std::string dest_key_; + rocksdb::Status s_; + std::vector src_keys_; + storage::BitOpType op_; + void Clear() override { + dest_key_ = ""; + src_keys_.clear(); + op_ = storage::kBitOpDefault; + } + void DoInitial() override; + // used to write binlog + std::string value_to_dest_; + std::shared_ptr set_cmd_; +}; +#endif diff --git a/tools/pika_migrate/include/pika_cache.h b/tools/pika_migrate/include/pika_cache.h new file mode 100644 index 0000000000..d82627ced7 --- /dev/null +++ b/tools/pika_migrate/include/pika_cache.h @@ -0,0 +1,226 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CACHE_H_ +#define PIKA_CACHE_H_ + +#include +#include +#include + +#include "include/pika_define.h" +#include "include/pika_zset.h" +#include "include/pika_command.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" +#include "cache/include/cache.h" +#include "storage/storage.h" + +class PikaCacheLoadThread; +class ZIncrbyCmd; +class ZRangebyscoreCmd; +class ZRevrangebyscoreCmd; +class ZCountCmd; +enum RangeStatus { RangeError = 1, RangeHit, RangeMiss }; + +struct CacheInfo { + int status = PIKA_CACHE_STATUS_NONE; + uint32_t cache_num = 0; + int64_t keys_num = 0; + size_t used_memory = 0; + int64_t hits = 0; + int64_t misses = 0; + uint64_t async_load_keys_num = 0; + uint32_t waitting_load_keys_num = 0; + void clear() { + status = PIKA_CACHE_STATUS_NONE; + cache_num = 0; + keys_num = 0; + used_memory = 0; + hits = 0; + misses = 0; + async_load_keys_num = 0; + waitting_load_keys_num = 0; + } +}; + +class PikaCache : public pstd::noncopyable, public std::enable_shared_from_this { + public: + PikaCache(int zset_cache_start_direction, int zset_cache_field_num_per_key); + ~PikaCache(); + + rocksdb::Status Init(uint32_t cache_num, cache::CacheConfig *cache_cfg); + rocksdb::Status Reset(uint32_t cache_num, cache::CacheConfig *cache_cfg = nullptr); + int64_t TTL(std::string &key); + void ResetConfig(cache::CacheConfig *cache_cfg); + void Destroy(void); + void SetCacheStatus(int status); + int CacheStatus(void); + void ClearHitRatio(void); + // Normal Commands + void Info(CacheInfo& info); + bool Exists(std::string& key); + void FlushCache(void); + void ProcessCronTask(void); + + rocksdb::Status Del(const std::vector& keys); + rocksdb::Status Expire(std::string& key, int64_t ttl); + rocksdb::Status Expireat(std::string& key, int64_t ttl_sec); + rocksdb::Status TTL(std::string& key, int64_t* ttl); + rocksdb::Status Persist(std::string& key); + rocksdb::Status Type(std::string& key, std::string* value); + rocksdb::Status RandomKey(std::string* key); + rocksdb::Status GetType(const std::string& key, bool single, std::vector& types); + + // String Commands + rocksdb::Status Set(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status Setnx(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status SetnxWithoutTTL(std::string& key, std::string& value); + rocksdb::Status Setxx(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status SetxxWithoutTTL(std::string& key, std::string& value); + rocksdb::Status MSet(const std::vector& kvs); + rocksdb::Status Get(std::string& key, std::string* value); + rocksdb::Status MGet(const std::vector& keys, std::vector* vss); + rocksdb::Status Incrxx(std::string& key); + rocksdb::Status Decrxx(std::string& key); + rocksdb::Status IncrByxx(std::string& key, uint64_t incr); + rocksdb::Status DecrByxx(std::string& key, uint64_t incr); + rocksdb::Status Incrbyfloatxx(std::string& key, long double incr); + rocksdb::Status Appendxx(std::string& key, std::string& value); + rocksdb::Status GetRange(std::string& key, int64_t start, int64_t end, std::string* value); + rocksdb::Status SetRangexx(std::string& key, int64_t start, std::string& value); + rocksdb::Status Strlen(std::string& key, int32_t* len); + + // Hash Commands + rocksdb::Status HDel(std::string& key, std::vector& fields); + rocksdb::Status HSet(std::string& key, std::string& field, std::string& value); + rocksdb::Status HSetIfKeyExist(std::string& key, std::string& field, std::string& value); + rocksdb::Status HSetIfKeyExistAndFieldNotExist(std::string& key, std::string& field, std::string& value); + rocksdb::Status HMSet(std::string& key, std::vector& fvs); + rocksdb::Status HMSetnx(std::string& key, std::vector& fvs, int64_t ttl); + rocksdb::Status HMSetnxWithoutTTL(std::string& key, std::vector& fvs); + rocksdb::Status HMSetxx(std::string& key, std::vector& fvs); + rocksdb::Status HGet(std::string& key, std::string& field, std::string* value); + rocksdb::Status HMGet(std::string& key, std::vector& fields, std::vector* vss); + rocksdb::Status HGetall(std::string& key, std::vector* fvs); + rocksdb::Status HKeys(std::string& key, std::vector* fields); + rocksdb::Status HVals(std::string& key, std::vector* values); + rocksdb::Status HExists(std::string& key, std::string& field); + rocksdb::Status HIncrbyxx(std::string& key, std::string& field, int64_t value); + rocksdb::Status HIncrbyfloatxx(std::string& key, std::string& field, long double value); + rocksdb::Status HLen(std::string& key, uint64_t* len); + rocksdb::Status HStrlen(std::string& key, std::string& field, uint64_t* len); + + // List Commands + rocksdb::Status LIndex(std::string& key, int64_t index, std::string* element); + rocksdb::Status LInsert(std::string& key, storage::BeforeOrAfter& before_or_after, std::string& pivot, std::string& value); + rocksdb::Status LLen(std::string& key, uint64_t* len); + rocksdb::Status LPop(std::string& key, std::string* element); + rocksdb::Status LPush(std::string& key, std::vector &values); + rocksdb::Status LPushx(std::string& key, std::vector &values); + rocksdb::Status LRange(std::string& key, int64_t start, int64_t stop, std::vector* values); + rocksdb::Status LRem(std::string& key, int64_t count, std::string& value); + rocksdb::Status LSet(std::string& key, int64_t index, std::string& value); + rocksdb::Status LTrim(std::string& key, int64_t start, int64_t stop); + rocksdb::Status RPop(std::string& key, std::string* element); + rocksdb::Status RPush(std::string& key, std::vector &values); + rocksdb::Status RPushx(std::string& key, std::vector &values); + rocksdb::Status RPushnx(std::string& key, std::vector &values, int64_t ttl); + rocksdb::Status RPushnxWithoutTTL(std::string& key, std::vector &values); + + // Set Commands + rocksdb::Status SAdd(std::string& key, std::vector& members); + rocksdb::Status SAddIfKeyExist(std::string& key, std::vector& members); + rocksdb::Status SAddnx(std::string& key, std::vector& members, int64_t ttl); + rocksdb::Status SAddnxWithoutTTL(std::string& key, std::vector& members); + rocksdb::Status SCard(std::string& key, uint64_t* len); + rocksdb::Status SIsmember(std::string& key, std::string& member); + rocksdb::Status SMembers(std::string& key, std::vector* members); + rocksdb::Status SRem(std::string& key, std::vector& members); + rocksdb::Status SRandmember(std::string& key, int64_t count, std::vector* members); + + // ZSet Commands + rocksdb::Status ZAdd(std::string& key, std::vector& score_members); + rocksdb::Status ZAddIfKeyExist(std::string& key, std::vector& score_members); + rocksdb::Status ZAddnx(std::string& key, std::vector& score_members, int64_t ttl); + rocksdb::Status ZAddnxWithoutTTL(std::string& key, std::vector& score_members); + rocksdb::Status ZCard(std::string& key, uint32_t* len, const std::shared_ptr& db); + rocksdb::Status ZCount(std::string& key, std::string& min, std::string& max, uint64_t* len, ZCountCmd* cmd); + rocksdb::Status ZIncrby(std::string& key, std::string& member, double increment); + rocksdb::Status ZIncrbyIfKeyExist(std::string& key, std::string& member, double increment, ZIncrbyCmd* cmd, const std::shared_ptr& db); + rocksdb::Status ZRange(std::string& key, int64_t start, int64_t stop, std::vector* score_members, + const std::shared_ptr& db); + rocksdb::Status ZRangebyscore(std::string& key, std::string& min, std::string& max, + std::vector* score_members, ZRangebyscoreCmd* cmd); + rocksdb::Status ZRank(std::string& key, std::string& member, int64_t* rank, const std::shared_ptr& db); + rocksdb::Status ZRem(std::string& key, std::vector& members, std::shared_ptr db); + rocksdb::Status ZRemrangebyrank(std::string& key, std::string& min, std::string& max, int32_t ele_deleted = 0, + const std::shared_ptr& db = nullptr); + rocksdb::Status ZRemrangebyscore(std::string& key, std::string& min, std::string& max, const std::shared_ptr& db); + rocksdb::Status ZRevrange(std::string& key, int64_t start, int64_t stop, std::vector* score_members, + const std::shared_ptr& db); + rocksdb::Status ZRevrangebyscore(std::string& key, std::string& min, std::string& max, + std::vector* score_members, ZRevrangebyscoreCmd* cmd, + const std::shared_ptr& db); + rocksdb::Status ZRevrangebylex(std::string& key, std::string& min, std::string& max, std::vector* members, + const std::shared_ptr& db); + rocksdb::Status ZRevrank(std::string& key, std::string& member, int64_t *rank, const std::shared_ptr& db); + rocksdb::Status ZScore(std::string& key, std::string& member, double* score, const std::shared_ptr& db); + rocksdb::Status ZRangebylex(std::string& key, std::string& min, std::string& max, std::vector* members, const std::shared_ptr& db); + rocksdb::Status ZLexcount(std::string& key, std::string& min, std::string& max, uint64_t* len, + const std::shared_ptr& db); + rocksdb::Status ZRemrangebylex(std::string& key, std::string& min, std::string& max, const std::shared_ptr& db); + + // Bit Commands + rocksdb::Status SetBit(std::string& key, size_t offset, int64_t value); + rocksdb::Status SetBitIfKeyExist(std::string& key, size_t offset, int64_t value); + rocksdb::Status GetBit(std::string& key, size_t offset, int64_t* value); + rocksdb::Status BitCount(std::string& key, int64_t start, int64_t end, int64_t* value, bool have_offset); + rocksdb::Status BitPos(std::string& key, int64_t bit, int64_t* value); + rocksdb::Status BitPos(std::string& key, int64_t bit, int64_t start, int64_t* value); + rocksdb::Status BitPos(std::string& key, int64_t bit, int64_t start, int64_t end, int64_t* value); + + // Cache + rocksdb::Status WriteKVToCache(std::string& key, std::string& value, int64_t ttl); + rocksdb::Status WriteHashToCache(std::string& key, std::vector& fvs, int64_t ttl); + rocksdb::Status WriteListToCache(std::string& key, std::vector &values, int64_t ttl); + rocksdb::Status WriteSetToCache(std::string& key, std::vector& members, int64_t ttl); + rocksdb::Status WriteZSetToCache(std::string& key, std::vector& score_members, int64_t ttl); + void PushKeyToAsyncLoadQueue(const char key_type, std::string& key, const std::shared_ptr& db); + rocksdb::Status CacheZCard(std::string& key, uint64_t* len); + + private: + + rocksdb::Status InitWithoutLock(uint32_t cache_num, cache::CacheConfig* cache_cfg); + void DestroyWithoutLock(void); + int CacheIndex(const std::string& key); + RangeStatus CheckCacheRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t& out_start, + int64_t& out_stop); + RangeStatus CheckCacheRevRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t& out_start, + int64_t& out_stop); + RangeStatus CheckCacheRangeByScore(uint64_t cache_len, double cache_min, double cache_max, double min, + double max, bool left_close, bool right_close); + bool CacheSizeEqsDB(std::string& key, const std::shared_ptr& db); + void GetMinMaxScore(std::vector& score_members, double &min, double &max); + bool GetCacheMinMaxSM(cache::RedisCache* cache_obj, std::string& key, storage::ScoreMember &min_m, + storage::ScoreMember &max_m); + bool ReloadCacheKeyIfNeeded(cache::RedisCache* cache_obj, std::string& key, int mem_len = -1, int db_len = -1, + const std::shared_ptr& db = nullptr); + rocksdb::Status CleanCacheKeyIfNeeded(cache::RedisCache* cache_obj, std::string& key); + + private: + std::atomic cache_status_; + uint32_t cache_num_ = 0; + + // currently only take effects to zset + int zset_cache_start_direction_ = 0; + int zset_cache_field_num_per_key_ = 0; + std::shared_mutex rwlock_; + std::unique_ptr cache_load_thread_; + std::vector caches_; + std::vector> cache_mutexs_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_cache_load_thread.h b/tools/pika_migrate/include/pika_cache_load_thread.h new file mode 100644 index 0000000000..fa949e8d81 --- /dev/null +++ b/tools/pika_migrate/include/pika_cache_load_thread.h @@ -0,0 +1,55 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#ifndef PIKA_CACHE_LOAD_THREAD_H_ +#define PIKA_CACHE_LOAD_THREAD_H_ + +#include +#include +#include +#include + +#include "include/pika_cache.h" +#include "include/pika_define.h" +#include "net/include/net_thread.h" +#include "storage/storage.h" + +class PikaCacheLoadThread : public net::Thread { + public: + PikaCacheLoadThread(int zset_cache_start_direction, int zset_cache_field_num_per_key); + ~PikaCacheLoadThread() override; + + uint64_t AsyncLoadKeysNum(void) { return async_load_keys_num_; } + uint32_t WaittingLoadKeysNum(void) { return waitting_load_keys_num_; } + void Push(const char key_type, std::string& key, const std::shared_ptr& db); + + private: + bool LoadKV(std::string& key, const std::shared_ptr& db); + bool LoadHash(std::string& key, const std::shared_ptr& db); + bool LoadList(std::string& key, const std::shared_ptr& db); + bool LoadSet(std::string& key, const std::shared_ptr& db); + bool LoadZset(std::string& key, const std::shared_ptr& db); + bool LoadKey(const char key_type, std::string& key, const std::shared_ptr& db); + virtual void* ThreadMain() override; + + private: + std::atomic_bool should_exit_; + std::deque>> loadkeys_queue_; + + pstd::CondVar loadkeys_cond_; + pstd::Mutex loadkeys_mutex_; + + std::unordered_map loadkeys_map_; + pstd::Mutex loadkeys_map_mutex_; + std::atomic_uint64_t async_load_keys_num_; + std::atomic_uint32_t waitting_load_keys_num_; + // currently only take effects to zset + int zset_cache_start_direction_; + int zset_cache_field_num_per_key_; + std::shared_ptr cache_; +}; + +#endif // PIKA_CACHE_LOAD_THREAD_H_ diff --git a/tools/pika_migrate/include/pika_client_conn.h b/tools/pika_migrate/include/pika_client_conn.h new file mode 100644 index 0000000000..3124d2036c --- /dev/null +++ b/tools/pika_migrate/include/pika_client_conn.h @@ -0,0 +1,150 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CLIENT_CONN_H_ +#define PIKA_CLIENT_CONN_H_ + +#include +#include + +#include "acl.h" +#include "include/pika_command.h" +#include "include/pika_define.h" + +// TODO: stat time costing in write out data to connfd +struct TimeStat { + TimeStat() = default; + void Reset() { + enqueue_ts_ = dequeue_ts_ = 0; + process_done_ts_ = 0; + before_queue_ts_ = 0; + } + + uint64_t start_ts() const { return enqueue_ts_; } + + uint64_t total_time() const { return process_done_ts_ > enqueue_ts_ ? process_done_ts_ - enqueue_ts_ : 0; } + + uint64_t queue_time() const { return dequeue_ts_ > enqueue_ts_ ? dequeue_ts_ - enqueue_ts_ : 0; } + + uint64_t process_time() const { return process_done_ts_ > dequeue_ts_ ? process_done_ts_ - dequeue_ts_ : 0; } + + uint64_t before_queue_time() const { return process_done_ts_ > dequeue_ts_ ? before_queue_ts_ - enqueue_ts_ : 0; } + + uint64_t enqueue_ts_; + uint64_t dequeue_ts_; + uint64_t before_queue_ts_; + uint64_t process_done_ts_; +}; + +class PikaClientConn : public net::RedisConn { + public: + using WriteCompleteCallback = std::function; + + struct BgTaskArg { + std::shared_ptr cmd_ptr; + std::shared_ptr conn_ptr; + std::vector redis_cmds; + std::shared_ptr resp_ptr; + LogOffset offset; + std::string db_name; + bool cache_miss_in_rtc_; + }; + + struct TxnStateBitMask { + public: + static constexpr uint8_t Start = 0; + static constexpr uint8_t InitCmdFailed = 1; + static constexpr uint8_t WatchFailed = 2; + static constexpr uint8_t Execing = 3; + }; + + PikaClientConn(int fd, const std::string& ip_port, net::Thread* server_thread, net::NetMultiplexer* mpx, + const net::HandleType& handle_type, int max_conn_rbuf_size); + ~PikaClientConn() = default; + + bool IsInterceptedByRTC(std::string& opt); + + void ProcessRedisCmds(const std::vector& argvs, bool async, std::string* response) override; + + bool ReadCmdInCache(const net::RedisCmdArgsType& argv, const std::string& opt); + void BatchExecRedisCmd(const std::vector& argvs, bool cache_miss_in_rtc); + int DealMessage(const net::RedisCmdArgsType& argv, std::string* response) override { return 0; } + static void DoBackgroundTask(void* arg); + + bool IsPubSub() { return is_pubsub_; } + void SetIsPubSub(bool is_pubsub) { is_pubsub_ = is_pubsub; } + void SetCurrentDb(const std::string& db_name) { current_db_ = db_name; } + void SetWriteCompleteCallback(WriteCompleteCallback cb) { write_completed_cb_ = std::move(cb); } + const std::string& GetCurrentTable() override { return current_db_; } + + void DoAuth(const std::shared_ptr& user); + + void UnAuth(const std::shared_ptr& user); + + bool IsAuthed() const; + void InitUser(); + bool AuthRequired() const; + + std::string UserName() const; + + // Txn + std::queue> GetTxnCmdQue(); + void PushCmdToQue(std::shared_ptr cmd); + void ClearTxnCmdQue(); + void SetTxnWatchFailState(bool is_failed); + void SetTxnInitFailState(bool is_failed); + void SetTxnStartState(bool is_start); + void AddKeysToWatch(const std::vector& db_keys); + void RemoveWatchedKeys(); + void SetTxnFailedFromKeys(const std::vector& db_keys); + void SetTxnFailedIfKeyExists(const std::string target_db_name = ""); + void ExitTxn(); + bool IsInTxn(); + bool IsTxnInitFailed(); + bool IsTxnWatchFailed(); + bool IsTxnExecing(void); + + net::ServerThread* server_thread() { return server_thread_; } + void ClientInfoToString(std::string* info, const std::string& cmdName); + + std::atomic resp_num; + std::vector> resp_array; + + std::shared_ptr time_stat_; + + private: + net::ServerThread* const server_thread_; + std::string current_db_; + WriteCompleteCallback write_completed_cb_; + bool is_pubsub_ = false; + std::queue> txn_cmd_que_; + std::bitset<16> txn_state_; + std::unordered_set watched_db_keys_; + std::mutex txn_state_mu_; + + bool authenticated_ = false; + std::shared_ptr user_; + + std::shared_ptr DoCmd(const PikaCmdArgsType& argv, const std::string& opt, + const std::shared_ptr& resp_ptr, bool cache_miss_in_rtc); + + void ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t do_duration); + void ProcessMonitor(const PikaCmdArgsType& argv); + + void ExecRedisCmd(const PikaCmdArgsType& argv, std::shared_ptr& resp_ptr, bool cache_miss_in_rtc); + void TryWriteResp(); +}; + +struct ClientInfo { + int fd; + std::string ip_port; + int64_t last_interaction = 0; + std::shared_ptr conn; +}; + +extern bool AddrCompare(const ClientInfo& lhs, const ClientInfo& rhs); +extern bool IdleCompare(const ClientInfo& lhs, const ClientInfo& rhs); + +#endif diff --git a/tools/pika_migrate/include/pika_client_processor.h b/tools/pika_migrate/include/pika_client_processor.h new file mode 100644 index 0000000000..dccd4ef96c --- /dev/null +++ b/tools/pika_migrate/include/pika_client_processor.h @@ -0,0 +1,28 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CLIENT_PROCESSOR_H_ +#define PIKA_CLIENT_PROCESSOR_H_ + +#include +#include +#include +#include "net/include/bg_thread.h" +#include "net/include/thread_pool.h" + +class PikaClientProcessor { + public: + PikaClientProcessor(size_t worker_num, size_t max_queue_size, const std::string& name_prefix = "CliProcessor"); + ~PikaClientProcessor(); + int Start(); + void Stop(); + void SchedulePool(net::TaskFunc func, void* arg); + size_t ThreadPoolCurQueueSize(); + size_t ThreadPoolMaxQueueSize(); + + private: + std::unique_ptr pool_; +}; +#endif // PIKA_CLIENT_PROCESSOR_H_ diff --git a/tools/pika_migrate/include/pika_cmd_table_manager.h b/tools/pika_migrate/include/pika_cmd_table_manager.h new file mode 100644 index 0000000000..8177fa63b9 --- /dev/null +++ b/tools/pika_migrate/include/pika_cmd_table_manager.h @@ -0,0 +1,64 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CMD_TABLE_MANAGER_H_ +#define PIKA_CMD_TABLE_MANAGER_H_ + +#include +#include + +#include "include/acl.h" +#include "include/pika_command.h" +#include "include/pika_data_distribution.h" + +struct CommandStatistics { + CommandStatistics() = default; + CommandStatistics(const CommandStatistics& other) { + cmd_time_consuming.store(other.cmd_time_consuming.load()); + cmd_count.store(other.cmd_count.load()); + } + std::atomic cmd_count = 0; + std::atomic cmd_time_consuming = 0; +}; + +class PikaCmdTableManager { + friend AclSelector; + + public: + PikaCmdTableManager(); + virtual ~PikaCmdTableManager() = default; + void InitCmdTable(void); + void RenameCommand(const std::string before, const std::string after); + std::shared_ptr GetCmd(const std::string& opt); + bool CmdExist(const std::string& cmd) const; + CmdTable* GetCmdTable(); + uint32_t GetMaxCmdId(); + + std::vector GetAclCategoryCmdNames(uint32_t flag); + + /* + * Info Commandstats used + */ + std::unordered_map* GetCommandStatMap(); + + private: + std::shared_ptr NewCommand(const std::string& opt); + + void InsertCurrentThreadDistributionMap(); + bool CheckCurrentThreadDistributionMapExist(const std::thread::id& tid); + + std::unique_ptr cmds_; + + uint32_t cmdId_ = 0; + + std::shared_mutex map_protector_; + std::unordered_map> thread_distribution_map_; + + /* + * Info Commandstats used + */ + std::unordered_map cmdstat_map_; +}; +#endif diff --git a/tools/pika_migrate/include/pika_command.h b/tools/pika_migrate/include/pika_command.h new file mode 100644 index 0000000000..c132eae9c5 --- /dev/null +++ b/tools/pika_migrate/include/pika_command.h @@ -0,0 +1,641 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_COMMAND_H_ +#define PIKA_COMMAND_H_ + +#include +#include +#include +#include +#include + +#include "rocksdb/status.h" + +#include "net/include/net_conn.h" +#include "net/include/redis_conn.h" +#include "pstd/include/pstd_string.h" + +#include "net/src/dispatch_thread.h" + +class SyncMasterDB; +class SyncSlaveDB; +class DB; +// Constant for command name +// Admin +const std::string kCmdNameSlaveof = "slaveof"; +const std::string kCmdNameDbSlaveof = "dbslaveof"; +const std::string kCmdNameAuth = "auth"; +const std::string kCmdNameBgsave = "bgsave"; +const std::string kCmdNameCompact = "compact"; +const std::string kCmdNameCompactRange = "compactrange"; +const std::string kCmdNamePurgelogsto = "purgelogsto"; +const std::string kCmdNamePing = "ping"; +const std::string kCmdNameSelect = "select"; +const std::string kCmdNameFlushall = "flushall"; +const std::string kCmdNameFlushdb = "flushdb"; +const std::string kCmdNameClient = "client"; +const std::string kCmdNameShutdown = "shutdown"; +const std::string kCmdNameInfo = "info"; +const std::string kCmdNameConfig = "config"; +const std::string kCmdNameMonitor = "monitor"; +const std::string kCmdNameDbsize = "dbsize"; +const std::string kCmdNameTime = "time"; +const std::string kCmdNameDelbackup = "delbackup"; +const std::string kCmdNameEcho = "echo"; +const std::string kCmdNameScandb = "scandb"; +const std::string kCmdNameSlowlog = "slowlog"; +const std::string kCmdNamePadding = "padding"; +const std::string kCmdNamePKPatternMatchDel = "pkpatternmatchdel"; +const std::string kCmdDummy = "dummy"; +const std::string kCmdNameQuit = "quit"; +const std::string kCmdNameHello = "hello"; +const std::string kCmdNameCommand = "command"; +const std::string kCmdNameDiskRecovery = "diskrecovery"; +const std::string kCmdNameClearReplicationID = "clearreplicationid"; +const std::string kCmdNameDisableWal = "disablewal"; +const std::string kCmdNameLastSave = "lastsave"; +const std::string kCmdNameCache = "cache"; +const std::string kCmdNameClearCache = "clearcache"; + +// Migrate slot +const std::string kCmdNameSlotsMgrtSlot = "slotsmgrtslot"; +const std::string kCmdNameSlotsMgrtTagSlot = "slotsmgrttagslot"; +const std::string kCmdNameSlotsMgrtOne = "slotsmgrtone"; +const std::string kCmdNameSlotsMgrtTagOne = "slotsmgrttagone"; +const std::string kCmdNameSlotsInfo = "slotsinfo"; +const std::string kCmdNameSlotsHashKey = "slotshashkey"; +const std::string kCmdNameSlotsReload = "slotsreload"; +const std::string kCmdNameSlotsReloadOff = "slotsreloadoff"; +const std::string kCmdNameSlotsDel = "slotsdel"; +const std::string kCmdNameSlotsScan = "slotsscan"; +const std::string kCmdNameSlotsCleanup = "slotscleanup"; +const std::string kCmdNameSlotsCleanupOff = "slotscleanupoff"; +const std::string kCmdNameSlotsMgrtTagSlotAsync = "slotsmgrttagslot-async"; +const std::string kCmdNameSlotsMgrtSlotAsync = "slotsmgrtslot-async"; +const std::string kCmdNameSlotsMgrtExecWrapper = "slotsmgrt-exec-wrapper"; +const std::string kCmdNameSlotsMgrtAsyncStatus = "slotsmgrt-async-status"; +const std::string kCmdNameSlotsMgrtAsyncCancel = "slotsmgrt-async-cancel"; + +// Kv +const std::string kCmdNameSet = "set"; +const std::string kCmdNameGet = "get"; +const std::string kCmdNameDel = "del"; +const std::string kCmdNameUnlink = "unlink"; +const std::string kCmdNameIncr = "incr"; +const std::string kCmdNameIncrby = "incrby"; +const std::string kCmdNameIncrbyfloat = "incrbyfloat"; +const std::string kCmdNameDecr = "decr"; +const std::string kCmdNameDecrby = "decrby"; +const std::string kCmdNameGetset = "getset"; +const std::string kCmdNameAppend = "append"; +const std::string kCmdNameMget = "mget"; +const std::string kCmdNameKeys = "keys"; +const std::string kCmdNameSetnx = "setnx"; +const std::string kCmdNameSetex = "setex"; +const std::string kCmdNamePsetex = "psetex"; +const std::string kCmdNameDelvx = "delvx"; +const std::string kCmdNameMset = "mset"; +const std::string kCmdNameMsetnx = "msetnx"; +const std::string kCmdNameGetrange = "getrange"; +const std::string kCmdNameSetrange = "setrange"; +const std::string kCmdNameStrlen = "strlen"; +const std::string kCmdNameExists = "exists"; +const std::string kCmdNameExpire = "expire"; +const std::string kCmdNamePexpire = "pexpire"; +const std::string kCmdNameExpireat = "expireat"; +const std::string kCmdNamePexpireat = "pexpireat"; +const std::string kCmdNameTtl = "ttl"; +const std::string kCmdNamePttl = "pttl"; +const std::string kCmdNamePersist = "persist"; +const std::string kCmdNameType = "type"; +const std::string kCmdNameScan = "scan"; +const std::string kCmdNameScanx = "scanx"; +const std::string kCmdNamePKSetexAt = "pksetexat"; +const std::string kCmdNamePKScanRange = "pkscanrange"; +const std::string kCmdNamePKRScanRange = "pkrscanrange"; + +// Hash +const std::string kCmdNameHDel = "hdel"; +const std::string kCmdNameHSet = "hset"; +const std::string kCmdNameHGet = "hget"; +const std::string kCmdNameHGetall = "hgetall"; +const std::string kCmdNameHExists = "hexists"; +const std::string kCmdNameHIncrby = "hincrby"; +const std::string kCmdNameHIncrbyfloat = "hincrbyfloat"; +const std::string kCmdNameHKeys = "hkeys"; +const std::string kCmdNameHLen = "hlen"; +const std::string kCmdNameHMget = "hmget"; +const std::string kCmdNameHMset = "hmset"; +const std::string kCmdNameHSetnx = "hsetnx"; +const std::string kCmdNameHStrlen = "hstrlen"; +const std::string kCmdNameHVals = "hvals"; +const std::string kCmdNameHScan = "hscan"; +const std::string kCmdNameHScanx = "hscanx"; +const std::string kCmdNamePKHScanRange = "pkhscanrange"; +const std::string kCmdNamePKHRScanRange = "pkhrscanrange"; + +// List +const std::string kCmdNameLIndex = "lindex"; +const std::string kCmdNameLInsert = "linsert"; +const std::string kCmdNameLLen = "llen"; +const std::string kCmdNameBLPop = "blpop"; +const std::string kCmdNameLPop = "lpop"; +const std::string kCmdNameLPush = "lpush"; +const std::string kCmdNameLPushx = "lpushx"; +const std::string kCmdNameLRange = "lrange"; +const std::string kCmdNameLRem = "lrem"; +const std::string kCmdNameLSet = "lset"; +const std::string kCmdNameLTrim = "ltrim"; +const std::string kCmdNameBRpop = "brpop"; +const std::string kCmdNameRPop = "rpop"; +const std::string kCmdNameRPopLPush = "rpoplpush"; +const std::string kCmdNameRPush = "rpush"; +const std::string kCmdNameRPushx = "rpushx"; + +// BitMap +const std::string kCmdNameBitSet = "setbit"; +const std::string kCmdNameBitGet = "getbit"; +const std::string kCmdNameBitPos = "bitpos"; +const std::string kCmdNameBitOp = "bitop"; +const std::string kCmdNameBitCount = "bitcount"; + +// Zset +const std::string kCmdNameZAdd = "zadd"; +const std::string kCmdNameZCard = "zcard"; +const std::string kCmdNameZScan = "zscan"; +const std::string kCmdNameZIncrby = "zincrby"; +const std::string kCmdNameZRange = "zrange"; +const std::string kCmdNameZRangebyscore = "zrangebyscore"; +const std::string kCmdNameZCount = "zcount"; +const std::string kCmdNameZRem = "zrem"; +const std::string kCmdNameZUnionstore = "zunionstore"; +const std::string kCmdNameZInterstore = "zinterstore"; +const std::string kCmdNameZRank = "zrank"; +const std::string kCmdNameZRevrank = "zrevrank"; +const std::string kCmdNameZScore = "zscore"; +const std::string kCmdNameZRevrange = "zrevrange"; +const std::string kCmdNameZRevrangebyscore = "zrevrangebyscore"; +const std::string kCmdNameZRangebylex = "zrangebylex"; +const std::string kCmdNameZRevrangebylex = "zrevrangebylex"; +const std::string kCmdNameZLexcount = "zlexcount"; +const std::string kCmdNameZRemrangebyrank = "zremrangebyrank"; +const std::string kCmdNameZRemrangebylex = "zremrangebylex"; +const std::string kCmdNameZRemrangebyscore = "zremrangebyscore"; +const std::string kCmdNameZPopmax = "zpopmax"; +const std::string kCmdNameZPopmin = "zpopmin"; + +// Set +const std::string kCmdNameSAdd = "sadd"; +const std::string kCmdNameSPop = "spop"; +const std::string kCmdNameSCard = "scard"; +const std::string kCmdNameSMembers = "smembers"; +const std::string kCmdNameSScan = "sscan"; +const std::string kCmdNameSRem = "srem"; +const std::string kCmdNameSUnion = "sunion"; +const std::string kCmdNameSUnionstore = "sunionstore"; +const std::string kCmdNameSInter = "sinter"; +const std::string kCmdNameSInterstore = "sinterstore"; +const std::string kCmdNameSIsmember = "sismember"; +const std::string kCmdNameSDiff = "sdiff"; +const std::string kCmdNameSDiffstore = "sdiffstore"; +const std::string kCmdNameSMove = "smove"; +const std::string kCmdNameSRandmember = "srandmember"; + +// transation +const std::string kCmdNameMulti = "multi"; +const std::string kCmdNameExec = "exec"; +const std::string kCmdNameDiscard = "discard"; +const std::string kCmdNameWatch = "watch"; +const std::string kCmdNameUnWatch = "unwatch"; + +// HyperLogLog +const std::string kCmdNamePfAdd = "pfadd"; +const std::string kCmdNamePfCount = "pfcount"; +const std::string kCmdNamePfMerge = "pfmerge"; + +// GEO +const std::string kCmdNameGeoAdd = "geoadd"; +const std::string kCmdNameGeoPos = "geopos"; +const std::string kCmdNameGeoDist = "geodist"; +const std::string kCmdNameGeoHash = "geohash"; +const std::string kCmdNameGeoRadius = "georadius"; +const std::string kCmdNameGeoRadiusByMember = "georadiusbymember"; + +// Pub/Sub +const std::string kCmdNamePublish = "publish"; +const std::string kCmdNameSubscribe = "subscribe"; +const std::string kCmdNameUnSubscribe = "unsubscribe"; +const std::string kCmdNamePubSub = "pubsub"; +const std::string kCmdNamePSubscribe = "psubscribe"; +const std::string kCmdNamePUnSubscribe = "punsubscribe"; + +// ACL +const std::string KCmdNameAcl = "acl"; + +// Stream +const std::string kCmdNameXAdd = "xadd"; +const std::string kCmdNameXDel = "xdel"; +const std::string kCmdNameXRead = "xread"; +const std::string kCmdNameXLen = "xlen"; +const std::string kCmdNameXRange = "xrange"; +const std::string kCmdNameXRevrange = "xrevrange"; +const std::string kCmdNameXTrim = "xtrim"; +const std::string kCmdNameXInfo = "xinfo"; + +const std::string kClusterPrefix = "pkcluster"; + + +/* + * If a type holds a key, a new data structure + * that uses the key will use this error + */ +constexpr const char* ErrTypeMessage = "Invalid argument: WRONGTYPE"; + +using PikaCmdArgsType = net::RedisCmdArgsType; +static const int RAW_ARGS_LEN = 1024 * 1024; + +enum CmdFlagsMask { + kCmdFlagsMaskRW = 1, + kCmdFlagsMaskLocal = (1 << 1), + kCmdFlagsMaskSuspend = (1 << 2), + kCmdFlagsMaskReadCache = (1 << 3), + kCmdFlagsMaskAdminRequire = (1 << 4), + kCmdFlagsMaskUpdateCache = (1 << 5), + kCmdFlagsMaskDoThrouhDB = (1 << 6), +}; + +enum CmdFlags { + kCmdFlagsRead = 1, // default rw + kCmdFlagsWrite = (1 << 1), + kCmdFlagsAdmin = (1 << 2), // default type + kCmdFlagsKv = (1 << 3), + kCmdFlagsHash = (1 << 4), + kCmdFlagsList = (1 << 5), + kCmdFlagsSet = (1 << 6), + kCmdFlagsZset = (1 << 7), + kCmdFlagsBit = (1 << 8), + kCmdFlagsHyperLogLog = (1 << 9), + kCmdFlagsGeo = (1 << 10), + kCmdFlagsPubSub = (1 << 11), + kCmdFlagsLocal = (1 << 12), + kCmdFlagsSuspend = (1 << 13), + kCmdFlagsAdminRequire = (1 << 14), + kCmdFlagsNoAuth = (1 << 15), // command no auth can also be executed + kCmdFlagsReadCache = (1 << 16), + kCmdFlagsUpdateCache = (1 << 17), + kCmdFlagsDoThroughDB = (1 << 18), + kCmdFlagsOperateKey = (1 << 19), // redis keySpace + kCmdFlagsStream = (1 << 20), + kCmdFlagsFast = (1 << 21), + kCmdFlagsSlow = (1 << 22) +}; + +void inline RedisAppendContent(std::string& str, const std::string& value); +void inline RedisAppendLen(std::string& str, int64_t ori, const std::string& prefix); +void inline RedisAppendLenUint64(std::string& str, uint64_t ori, const std::string& prefix) { + RedisAppendLen(str, static_cast(ori), prefix); +} + +const std::string kNewLine = "\r\n"; + +class CmdRes { + public: + enum CmdRet { + kNone = 0, + kOk, + kPong, + kSyntaxErr, + kInvalidInt, + kInvalidBitInt, + kInvalidBitOffsetInt, + kInvalidBitPosArgument, + kWrongBitOpNotNum, + kInvalidFloat, + kOverFlow, + kNotFound, + kOutOfRange, + kInvalidPwd, + kNoneBgsave, + kPurgeExist, + kInvalidParameter, + kWrongNum, + kInvalidIndex, + kInvalidDbType, + kInvalidDB, + kInconsistentHashTag, + kErrOther, + kCacheMiss, + KIncrByOverFlow, + kInvalidTransaction, + kTxnQueued, + kTxnAbort, + kMultiKey, + kNoExists, + }; + + CmdRes() = default; + + bool none() const { return ret_ == kNone && message_.empty(); } + bool noexist() const { return ret_ == kNoExists; } + bool ok() const { return ret_ == kOk || ret_ == kNone || ret_ == kNoExists; } + CmdRet ret() const { return ret_; } + void clear() { + message_.clear(); + ret_ = kNone; + } + bool CacheMiss() const { return ret_ == kCacheMiss; } + std::string raw_message() const { return message_; } + std::string message() const { + std::string result; + switch (ret_) { + case kNone: + return message_; + case kOk: + return "+OK\r\n"; + case kPong: + return "+PONG\r\n"; + case kSyntaxErr: + return "-ERR syntax error\r\n"; + case kInvalidInt: + return "-ERR value is not an integer or out of range\r\n"; + case kInvalidBitInt: + return "-ERR bit is not an integer or out of range\r\n"; + case kInvalidBitOffsetInt: + return "-ERR bit offset is not an integer or out of range\r\n"; + case kWrongBitOpNotNum: + return "-ERR BITOP NOT must be called with a single source key.\r\n"; + case kInvalidBitPosArgument: + return "-ERR The bit argument must be 1 or 0.\r\n"; + case kInvalidFloat: + return "-ERR value is not a valid float\r\n"; + case kOverFlow: + return "-ERR increment or decrement would overflow\r\n"; + case kNotFound: + return "-ERR no such key\r\n"; + case kOutOfRange: + return "-ERR index out of range\r\n"; + case kInvalidPwd: + return "-ERR invalid password\r\n"; + case kNoneBgsave: + return "-ERR No BGSave Works now\r\n"; + case kPurgeExist: + return "-ERR binlog already in purging...\r\n"; + case kInvalidParameter: + return "-ERR Invalid Argument\r\n"; + case kWrongNum: + result = "-ERR wrong number of arguments for '"; + result.append(message_); + result.append("' command\r\n"); + break; + case kInvalidIndex: + result = "-ERR invalid DB index for '"; + result.append(message_); + result.append("'\r\n"); + break; + case kInvalidDbType: + result = "-ERR invalid DB for '"; + result.append(message_); + result.append("'\r\n"); + break; + case kInconsistentHashTag: + return "-ERR parameters hashtag is inconsistent\r\n"; + case kInvalidDB: + result = "-ERR invalid DB for '"; + result.append(message_); + result.append("'\r\n"); + break; + case kInvalidTransaction: + return "-ERR WATCH inside MULTI is not allowed\r\n"; + case kTxnQueued: + result = "+QUEUED"; + result.append("\r\n"); + break; + case kTxnAbort: + result = "-EXECABORT "; + result.append(message_); + result.append(kNewLine); + break; + case kErrOther: + result = "-ERR "; + result.append(message_); + result.append(kNewLine); + break; + case KIncrByOverFlow: + result = "-ERR increment would produce NaN or Infinity"; + result.append(message_); + result.append(kNewLine); + break; + case kMultiKey: + result = "-WRONGTYPE Operation against a key holding the wrong kind of value"; + result.append(kNewLine); + break; + case kNoExists: + return message_; + default: + break; + } + return result; + } + + // Inline functions for Create Redis protocol + void AppendStringLen(int64_t ori) { RedisAppendLen(message_, ori, "$"); } + void AppendStringLenUint64(uint64_t ori) { RedisAppendLenUint64(message_, ori, "$"); } + void AppendArrayLen(int64_t ori) { RedisAppendLen(message_, ori, "*"); } + void AppendArrayLenUint64(uint64_t ori) { RedisAppendLenUint64(message_, ori, "*"); } + void AppendInteger(int64_t ori) { RedisAppendLen(message_, ori, ":"); } + void AppendContent(const std::string& value) { RedisAppendContent(message_, value); } + void AppendString(const std::string& value) { + AppendStringLenUint64(value.size()); + AppendContent(value); + } + void AppendStringRaw(const std::string& value) { message_.append(value); } + + void AppendStringVector(const std::vector& strArray) { + if (strArray.empty()) { + AppendArrayLen(0); + return; + } + AppendArrayLen(strArray.size()); + for (const auto& item : strArray) { + AppendString(item); + } + } + + void SetRes(CmdRet _ret, const std::string& content = "") { + ret_ = _ret; + if (!content.empty()) { + message_ = content; + } + } + + private: + std::string message_; + CmdRet ret_ = kNone; +}; + +/** + * Current used by: + * blpop,brpop + */ +struct UnblockTaskArgs { + std::string key; + std::shared_ptr db; + net::DispatchThread* dispatchThread{ nullptr }; + UnblockTaskArgs(std::string key_, std::shared_ptr db_, net::DispatchThread* dispatchThread_) + : key(std::move(key_)), db(db_), dispatchThread(dispatchThread_) {} +}; + +class PikaClientConn; + +class Cmd : public std::enable_shared_from_this { + public: + friend class PikaClientConn; + enum CmdStage { kNone, kBinlogStage, kExecuteStage }; + struct HintKeys { + HintKeys() = default; + + bool empty() const { return keys.empty() && hints.empty(); } + std::vector keys; + std::vector hints; + }; + struct ProcessArg { + ProcessArg() = default; + ProcessArg(std::shared_ptr _db, std::shared_ptr _sync_db, HintKeys _hint_keys) + : db(std::move(_db)), sync_db(std::move(_sync_db)), hint_keys(std::move(_hint_keys)) {} + std::shared_ptr db; + std::shared_ptr sync_db; + HintKeys hint_keys; + }; + struct CommandStatistics { + CommandStatistics() = default; + CommandStatistics(const CommandStatistics& other) { + cmd_time_consuming.store(other.cmd_time_consuming.load()); + cmd_count.store(other.cmd_count.load()); + } + std::atomic cmd_count = {0}; + std::atomic cmd_time_consuming = {0}; + }; + CommandStatistics state; + Cmd(std::string name, int arity, uint32_t flag, uint32_t aclCategory = 0); + virtual ~Cmd() = default; + + virtual std::vector current_key() const; + virtual void Execute(); + virtual void Do() {}; + virtual void DoThroughDB() {} + virtual void DoUpdateCache() {} + virtual void ReadCache() {} + virtual Cmd* Clone() = 0; + // used for execute multikey command into different slots + virtual void Split(const HintKeys& hint_keys) = 0; + virtual void Merge() = 0; + virtual bool IsTooLargeKey(const int &max_sz) { return false; } + + int8_t SubCmdIndex(const std::string& cmdName); // if the command no subCommand,return -1; + + void Initial(const PikaCmdArgsType& argv, const std::string& db_name); + uint32_t flag() const; + bool hasFlag(uint32_t flag) const; + bool is_read() const; + bool is_write() const; + bool isCacheRead() const; + + bool IsLocal() const; + bool IsSuspend() const; + bool IsAdmin() const; + bool HasSubCommand() const; // The command is there a sub command + std::vector SubCommand() const; // Get command is there a sub command + bool IsNeedUpdateCache() const; + bool IsNeedReadCache() const; + bool IsNeedCacheDo() const; + bool HashtagIsConsistent(const std::string& lhs, const std::string& rhs) const; + uint64_t GetDoDuration() const { return do_duration_; }; + std::shared_ptr GetDB() const { return db_; }; + uint32_t AclCategory() const; + void AddAclCategory(uint32_t aclCategory); + void SetDbName(const std::string& db_name) { db_name_ = db_name; } + std::string GetDBName() { return db_name_; } + + std::string name() const; + CmdRes& res(); + std::string db_name() const; + PikaCmdArgsType& argv(); + virtual std::string ToRedisProtocol(); + + void SetConn(const std::shared_ptr& conn); + std::shared_ptr GetConn(); + + void SetResp(const std::shared_ptr& resp); + std::shared_ptr GetResp(); + + void SetStage(CmdStage stage); + void SetCmdId(uint32_t cmdId){cmdId_ = cmdId;} + + virtual void DoBinlog(); + + uint32_t GetCmdId() const { return cmdId_; }; + bool CheckArg(uint64_t num) const; + + bool IsCacheMissedInRtc() const; + void SetCacheMissedInRtc(bool value); + + protected: + // enable copy, used default copy + // Cmd(const Cmd&); + void ProcessCommand(const HintKeys& hint_key = HintKeys()); + void InternalProcessCommand(const HintKeys& hint_key); + void DoCommand(const HintKeys& hint_key); + bool DoReadCommandInCache(); + void LogCommand() const; + + std::string name_; + int arity_ = -2; + uint32_t flag_ = 0; + + std::vector subCmdName_; // sub command name, may be empty + + protected: + CmdRes res_; + PikaCmdArgsType argv_; + std::string db_name_; + rocksdb::Status s_; + std::shared_ptr db_; + std::shared_ptr sync_db_; + std::weak_ptr conn_; + std::weak_ptr resp_; + CmdStage stage_ = kNone; + uint64_t do_duration_ = 0; + uint32_t cmdId_ = 0; + uint32_t aclCategory_ = 0; + bool cache_missed_in_rtc_{false}; + + private: + virtual void DoInitial() = 0; + virtual void Clear(){}; + + Cmd& operator=(const Cmd&); +}; + +using CmdTable = std::unordered_map>; + +// Method for Cmd Table +void InitCmdTable(CmdTable* cmd_table); +Cmd* GetCmdFromDB(const std::string& opt, const CmdTable& cmd_table); + +void RedisAppendContent(std::string& str, const std::string& value) { + str.append(value.data(), value.size()); + str.append(kNewLine); +} + +void RedisAppendLen(std::string& str, int64_t ori, const std::string& prefix) { + char buf[32]; + pstd::ll2string(buf, 32, static_cast(ori)); + str.append(prefix); + str.append(buf); + str.append(kNewLine); +} + +#endif diff --git a/tools/pika_migrate/include/pika_conf.h b/tools/pika_migrate/include/pika_conf.h new file mode 100644 index 0000000000..5aa0c790c2 --- /dev/null +++ b/tools/pika_migrate/include/pika_conf.h @@ -0,0 +1,1122 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CONF_H_ +#define PIKA_CONF_H_ + +#include +#include +#include +#include + +#include "rocksdb/compression_type.h" + +#include "pstd/include/base_conf.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_string.h" + +#include "acl.h" +#include "include/pika_define.h" +#include "rocksdb/compression_type.h" + +#define kBinlogReadWinDefaultSize 9000 +#define kBinlogReadWinMaxSize 90000 +const uint32_t configRunIDSize = 40; +const uint32_t configReplicationIDSize = 50; + +// global class, class members well initialized +class PikaConf : public pstd::BaseConf { + public: + enum CompactionStrategy { + NONE, + FullCompact, + OldestOrBestDeleteRatioSstCompact + }; + PikaConf(const std::string& path); + ~PikaConf() override = default; + + // Getter + int port() { + std::shared_lock l(rwlock_); + return port_; + } + std::string slaveof() { + std::shared_lock l(rwlock_); + return slaveof_; + } + int slave_priority() { + std::shared_lock l(rwlock_); + return slave_priority_; + } + bool write_binlog() { + std::shared_lock l(rwlock_); + return write_binlog_; + } + int thread_num() { + std::shared_lock l(rwlock_); + return thread_num_; + } + int thread_pool_size() { + std::shared_lock l(rwlock_); + return thread_pool_size_; + } + int slow_cmd_thread_pool_size() { + std::shared_lock l(rwlock_); + return slow_cmd_thread_pool_size_; + } + int admin_thread_pool_size() { + std::shared_lock l(rwlock_); + return admin_thread_pool_size_; + } + int sync_thread_num() { + std::shared_lock l(rwlock_); + return sync_thread_num_; + } + int sync_binlog_thread_num() { + std::shared_lock l(rwlock_); + return sync_binlog_thread_num_; + } + std::string log_path() { + std::shared_lock l(rwlock_); + return log_path_; + } + int log_retention_time() { + std::shared_lock l(rwlock_); + return log_retention_time_; + } + std::string log_level() { + std::shared_lock l(rwlock_); + return log_level_; + } + std::string db_path() { + std::shared_lock l(rwlock_); + return db_path_; + } + int db_instance_num() { + return db_instance_num_; + } + uint64_t rocksdb_ttl_second() { + return rocksdb_ttl_second_.load(); + } + uint64_t rocksdb_periodic_compaction_second() { + return rocksdb_periodic_second_.load(); + } + std::string db_sync_path() { + std::shared_lock l(rwlock_); + return db_sync_path_; + } + int db_sync_speed() { + std::shared_lock l(rwlock_); + return db_sync_speed_; + } + std::string compact_cron() { + std::shared_lock l(rwlock_); + return compact_cron_; + } + std::string compact_interval() { + std::shared_lock l(rwlock_); + return compact_interval_; + } + int max_subcompactions() { + std::shared_lock l(rwlock_); + return max_subcompactions_; + } + int compact_every_num_of_files() { + std::shared_lock l(rwlock_); + return compact_every_num_of_files_; + } + int force_compact_file_age_seconds() { + std::shared_lock l(rwlock_); + return force_compact_file_age_seconds_; + } + int force_compact_min_delete_ratio() { + std::shared_lock l(rwlock_); + return force_compact_min_delete_ratio_; + } + int dont_compact_sst_created_in_seconds() { + std::shared_lock l(rwlock_); + return dont_compact_sst_created_in_seconds_; + } + int best_delete_min_ratio() { + std::shared_lock l(rwlock_); + return best_delete_min_ratio_; + } + CompactionStrategy compaction_strategy() { + std::shared_lock l(rwlock_); + return compaction_strategy_; + } + bool disable_auto_compactions() { + std::shared_lock l(rwlock_); + return disable_auto_compactions_; + } + int64_t least_resume_free_disk_size() { + std::shared_lock l(rwlock_); + return least_free_disk_to_resume_; + } + int64_t resume_interval() { + std::shared_lock l(rwlock_); + return resume_check_interval_; + } + double min_check_resume_ratio() { + std::shared_lock l(rwlock_); + return min_check_resume_ratio_; + } + int64_t write_buffer_size() { + std::shared_lock l(rwlock_); + return write_buffer_size_; + } + int min_write_buffer_number_to_merge() { + std::shared_lock l(rwlock_); + return min_write_buffer_number_to_merge_; + } + int level0_stop_writes_trigger() { + std::shared_lock l(rwlock_); + return level0_stop_writes_trigger_; + } + int level0_slowdown_writes_trigger() { + std::shared_lock l(rwlock_); + return level0_slowdown_writes_trigger_; + } + int level0_file_num_compaction_trigger() { + std::shared_lock l(rwlock_); + return level0_file_num_compaction_trigger_; + } + int64_t arena_block_size() { + std::shared_lock l(rwlock_); + return arena_block_size_; + } + int64_t slotmigrate_thread_num() { + std::shared_lock l(rwlock_); + return slotmigrate_thread_num_; + } + int64_t thread_migrate_keys_num() { + std::shared_lock l(rwlock_); + return thread_migrate_keys_num_; + } + int64_t max_write_buffer_size() { + std::shared_lock l(rwlock_); + return max_write_buffer_size_; + } + int max_write_buffer_number() { + std::shared_lock l(rwlock_); + return max_write_buffer_num_; + } + uint64_t MaxTotalWalSize() { + std::shared_lock l(rwlock_); + return max_total_wal_size_; + } + bool enable_db_statistics() { + return enable_db_statistics_; + } + int db_statistics_level() { + std::shared_lock l(rwlock_); + return db_statistics_level_; + } + int64_t max_client_response_size() { + std::shared_lock l(rwlock_); + return max_client_response_size_; + } + int timeout() { + std::shared_lock l(rwlock_); + return timeout_; + } + int binlog_writer_num() { + std::shared_lock l(rwlock_); + return binlog_writer_num_; + } + bool slotmigrate() { + std::shared_lock l(rwlock_); + return slotmigrate_; + } + bool slow_cmd_pool() { + std::shared_lock l(rwlock_); + return slow_cmd_pool_; + } + std::string server_id() { + std::shared_lock l(rwlock_); + return server_id_; + } + std::string run_id() { + std::shared_lock l(rwlock_); + return run_id_; + } + std::string replication_id() { + std::shared_lock l(rwlock_); + return replication_id_; + } + std::string requirepass() { + std::shared_lock l(rwlock_); + return requirepass_; + } + std::string masterauth() { + std::shared_lock l(rwlock_); + return masterauth_; + } + std::string userpass() { + std::shared_lock l(rwlock_); + return userpass_; + } + std::string bgsave_path() { + std::shared_lock l(rwlock_); + return bgsave_path_; + } + int expire_dump_days() { + std::shared_lock l(rwlock_); + return expire_dump_days_; + } + std::string bgsave_prefix() { + std::shared_lock l(rwlock_); + return bgsave_prefix_; + } + std::string user_blacklist_string() { + std::shared_lock l(rwlock_); + return pstd::StringConcat(user_blacklist_, COMMA); + } + const std::vector& user_blacklist_vector() { + std::shared_lock l(rwlock_); + return user_blacklist_; + } + bool classic_mode() { return classic_mode_.load(); } + int databases() { + std::shared_lock l(rwlock_); + return databases_; + } + int default_slot_num() { + std::shared_lock l(rwlock_); + return default_slot_num_; + } + const std::vector& db_structs() { + std::shared_lock l(rwlock_); + return db_structs_; + } + std::string default_db() { + std::shared_lock l(rwlock_); + return default_db_; + } + std::string compression() { + std::shared_lock l(rwlock_); + return compression_; + } + int64_t target_file_size_base() { + std::shared_lock l(rwlock_); + return target_file_size_base_; + } + + uint64_t max_compaction_bytes() { + std::shared_lock l(rwlock_); + return static_cast(max_compaction_bytes_); + } + + int max_cache_statistic_keys() { + std::shared_lock l(rwlock_); + return max_cache_statistic_keys_; + } + int small_compaction_threshold() { + std::shared_lock l(rwlock_); + return small_compaction_threshold_; + } + int small_compaction_duration_threshold() { + std::shared_lock l(rwlock_); + return small_compaction_duration_threshold_; + } + int max_background_flushes() { + std::shared_lock l(rwlock_); + return max_background_flushes_; + } + int max_background_compactions() { + std::shared_lock l(rwlock_); + return max_background_compactions_; + } + int max_background_jobs() { + std::shared_lock l(rwlock_); + return max_background_jobs_; + } + uint64_t delayed_write_rate(){ + std::shared_lock l(rwlock_); + return static_cast(delayed_write_rate_); + } + int max_cache_files() { + std::shared_lock l(rwlock_); + return max_cache_files_; + } + int max_bytes_for_level_multiplier() { + std::shared_lock l(rwlock_); + return max_bytes_for_level_multiplier_; + } + int64_t block_size() { + std::shared_lock l(rwlock_); + return block_size_; + } + int64_t block_cache() { + std::shared_lock l(rwlock_); + return block_cache_; + } + int64_t num_shard_bits() { + std::shared_lock l(rwlock_); + return num_shard_bits_; + } + bool share_block_cache() { + std::shared_lock l(rwlock_); + return share_block_cache_; + } + bool wash_data() { + std::shared_lock l(rwlock_); + return wash_data_; + } + bool enable_partitioned_index_filters() { + std::shared_lock l(rwlock_); + return enable_partitioned_index_filters_; + } + bool cache_index_and_filter_blocks() { + std::shared_lock l(rwlock_); + return cache_index_and_filter_blocks_; + } + bool pin_l0_filter_and_index_blocks_in_cache() { + std::shared_lock l(rwlock_); + return pin_l0_filter_and_index_blocks_in_cache_; + } + bool optimize_filters_for_hits() { + std::shared_lock l(rwlock_); + return optimize_filters_for_hits_; + } + bool level_compaction_dynamic_level_bytes() { + std::shared_lock l(rwlock_); + return level_compaction_dynamic_level_bytes_; + } + int expire_logs_nums() { + std::shared_lock l(rwlock_); + return expire_logs_nums_; + } + int expire_logs_days() { + std::shared_lock l(rwlock_); + return expire_logs_days_; + } + std::string conf_path() { + std::shared_lock l(rwlock_); + return conf_path_; + } + bool slave_read_only() { + std::shared_lock l(rwlock_); + return slave_read_only_; + } + int maxclients() { + std::shared_lock l(rwlock_); + return maxclients_; + } + int root_connection_num() { + std::shared_lock l(rwlock_); + return root_connection_num_; + } + bool slowlog_write_errorlog() { return slowlog_write_errorlog_.load(); } + int slowlog_slower_than() { return slowlog_log_slower_than_.load(); } + int slowlog_max_len() { + std::shared_lock l(rwlock_); + return slowlog_max_len_; + } + std::string network_interface() { + std::shared_lock l(rwlock_); + return network_interface_; + } + int cache_mode() { return cache_mode_; } + int sync_window_size() { return sync_window_size_.load(); } + int max_conn_rbuf_size() { return max_conn_rbuf_size_.load(); } + int consensus_level() { return consensus_level_.load(); } + int replication_num() { return replication_num_.load(); } + int rate_limiter_mode() { + std::shared_lock l(rwlock_); + return rate_limiter_mode_; + } + int64_t rate_limiter_bandwidth() { + std::shared_lock l(rwlock_); + return rate_limiter_bandwidth_; + } + int64_t rate_limiter_refill_period_us() { + std::shared_lock l(rwlock_); + return rate_limiter_refill_period_us_; + } + int64_t rate_limiter_fairness() { + std::shared_lock l(rwlock_); + return rate_limiter_fairness_; + } + bool rate_limiter_auto_tuned() { + std::shared_lock l(rwlock_); + return rate_limiter_auto_tuned_; + } + bool IsCacheDisabledTemporarily() { return tmp_cache_disable_flag_; } + int GetCacheString() { return cache_string_; } + int GetCacheSet() { return cache_set_; } + int GetCacheZset() { return cache_zset_; } + int GetCacheHash() { return cache_hash_; } + int GetCacheList() { return cache_list_; } + int GetCacheBit() { return cache_bit_; } + int GetCacheNum() { return cache_num_; } + void SetCacheNum(const int value) { cache_num_ = value; } + void SetCacheMode(const int value) { cache_mode_ = value; } + void SetCacheStartDirection(const int value) { zset_cache_start_direction_ = value; } + void SetCacheItemsPerKey(const int value) { zset_cache_field_num_per_key_ = value; } + void SetCacheMaxKeySize(const int value) { max_key_size_in_cache_ = value; } + void SetCacheMaxmemory(const int64_t value) { cache_maxmemory_ = value; } + void SetCacheMaxmemoryPolicy(const int value) { cache_maxmemory_policy_ = value; } + void SetCacheMaxmemorySamples(const int value) { cache_maxmemory_samples_ = value; } + void SetCacheLFUDecayTime(const int value) { cache_lfu_decay_time_ = value; } + void UnsetCacheDisableFlag() { tmp_cache_disable_flag_ = false; } + bool enable_blob_files() { return enable_blob_files_; } + int64_t min_blob_size() { return min_blob_size_; } + int64_t blob_file_size() { return blob_file_size_; } + std::string blob_compression_type() { return blob_compression_type_; } + bool enable_blob_garbage_collection() { return enable_blob_garbage_collection_; } + double blob_garbage_collection_age_cutoff() { return blob_garbage_collection_age_cutoff_; } + double blob_garbage_collection_force_threshold() { return blob_garbage_collection_force_threshold_; } + int64_t blob_cache() { return blob_cache_; } + int64_t blob_num_shard_bits() { return blob_num_shard_bits_; } + + // Rsync Rate limiting configuration + int throttle_bytes_per_second() { + std::shared_lock l(rwlock_); + return throttle_bytes_per_second_; + } + int max_rsync_parallel_num() { + std::shared_lock l(rwlock_); + return max_rsync_parallel_num_; + } + int64_t rsync_timeout_ms() { + return rsync_timeout_ms_.load(std::memory_order::memory_order_relaxed); + } + + // Slow Commands configuration + const std::string GetSlowCmd() { + std::shared_lock l(rwlock_); + return pstd::Set2String(slow_cmd_set_, ','); + } + + // Admin Commands configuration + const std::string GetAdminCmd() { + std::shared_lock l(rwlock_); + return pstd::Set2String(admin_cmd_set_, ','); + } + + const std::string GetUserBlackList() { + std::shared_lock l(rwlock_); + return userblacklist_; + } + + bool is_slow_cmd(const std::string& cmd) { + std::shared_lock l(rwlock_); + return slow_cmd_set_.find(cmd) != slow_cmd_set_.end(); + } + + bool is_admin_cmd(const std::string& cmd) { + return admin_cmd_set_.find(cmd) != admin_cmd_set_.end(); + } + + // Immutable config items, we don't use lock. + bool daemonize() { return daemonize_; } + bool rtc_cache_read_enabled() { return rtc_cache_read_enabled_; } + std::string pidfile() { return pidfile_; } + int binlog_file_size() { return binlog_file_size_; } + std::vector compression_per_level(); + std::string compression_all_levels() const { return compression_per_level_; }; + static rocksdb::CompressionType GetCompression(const std::string& value); + + std::vector& users() { return users_; }; + std::string acl_file() { return aclFile_; }; + + uint32_t acl_pubsub_default() { return acl_pubsub_default_.load(); } + uint32_t acl_log_max_len() { return acl_Log_max_len_.load(); } + + // Setter + void SetPort(const int value) { + std::lock_guard l(rwlock_); + port_ = value; + } + void SetThreadNum(const int value) { + std::lock_guard l(rwlock_); + thread_num_ = value; + } + void SetTimeout(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("timeout", std::to_string(value)); + timeout_ = value; + } + void SetThreadPoolSize(const int value) { + std::lock_guard l(rwlock_); + thread_pool_size_ = value; + } + + void SetLowLevelThreadPoolSize(const int value) { + std::lock_guard l(rwlock_); + slow_cmd_thread_pool_size_ = value; + } + + void SetAdminThreadPoolSize(const int value) { + std::lock_guard l(rwlock_); + admin_thread_pool_size_ = value; + } + + void SetSlaveof(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slaveof", value); + slaveof_ = value; + } + + void SetRocksdbTTLSecond(uint64_t ttl) { + rocksdb_ttl_second_.store(ttl); + } + + void SetRocksdbPeriodicSecond(uint64_t value) { + rocksdb_periodic_second_.store(value); + } + + void SetReplicationID(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("replication-id", value); + replication_id_ = value; + } + void SetSlavePriority(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slave-priority", std::to_string(value)); + slave_priority_ = value; + } + void SetWriteBinlog(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("write-binlog", value); + write_binlog_ = value == "yes"; + } + void SetMaxCacheStatisticKeys(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-cache-statistic-keys", std::to_string(value)); + max_cache_statistic_keys_ = value; + } + void SetSmallCompactionThreshold(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("small-compaction-threshold", std::to_string(value)); + small_compaction_threshold_ = value; + } + void SetSmallCompactionDurationThreshold(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("small-compaction-duration-threshold", std::to_string(value)); + small_compaction_duration_threshold_ = value; + } + void SetMaxClientResponseSize(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-client-response-size", std::to_string(value)); + max_client_response_size_ = value; + } + void SetBgsavePath(const std::string& value) { + std::lock_guard l(rwlock_); + bgsave_path_ = value; + if (value[value.length() - 1] != '/') { + bgsave_path_ += "/"; + } + } + void SetExpireDumpDays(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("dump-expire", std::to_string(value)); + expire_dump_days_ = value; + } + void SetBgsavePrefix(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("dump-prefix", value); + bgsave_prefix_ = value; + } + void SetRunID(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("run-id", value); + run_id_ = value; + } + void SetRequirePass(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("requirepass", value); + requirepass_ = value; + } + void SetMasterAuth(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("masterauth", value); + masterauth_ = value; + } + void SetUserPass(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("userpass", value); + userpass_ = value; + } + void SetUserBlackList(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("userblacklist", value); + pstd::StringSplit(value, COMMA, user_blacklist_); + for (auto& item : user_blacklist_) { + pstd::StringToLower(item); + } + } + void SetSlotMigrate(const bool value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slotmigrate", value ? "yes" : "no"); + slotmigrate_.store(value); + } + void SetSlowCmdPool(const bool value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slow-cmd-pool", value ? "yes" : "no"); + slow_cmd_pool_.store(value); + } + void SetSlotMigrateThreadNum(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slotmigrate-thread-num", std::to_string(value)); + slotmigrate_thread_num_ = value; + } + void SetThreadMigrateKeysNum(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("thread-migrate-keys-num", std::to_string(value)); + thread_migrate_keys_num_ = value; + } + void SetExpireLogsNums(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("expire-logs-nums", std::to_string(value)); + expire_logs_nums_ = value; + } + void SetExpireLogsDays(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("expire-logs-days", std::to_string(value)); + expire_logs_days_ = value; + } + void SetMaxConnection(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("maxclients", std::to_string(value)); + maxclients_ = value; + } + void SetRootConnectionNum(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("root-connection-num", std::to_string(value)); + root_connection_num_ = value; + } + void SetSlowlogWriteErrorlog(const bool value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slowlog-write-errorlog", value ? "yes" : "no"); + slowlog_write_errorlog_.store(value); + } + void SetSlowlogSlowerThan(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slowlog-log-slower-than", std::to_string(value)); + slowlog_log_slower_than_.store(value); + } + void SetSlowlogMaxLen(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("slowlog-max-len", std::to_string(value)); + slowlog_max_len_ = value; + } + void SetDbSyncSpeed(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("db-sync-speed", std::to_string(value)); + db_sync_speed_ = value; + } + void SetCompactCron(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("compact-cron", value); + compact_cron_ = value; + } + void SetCompactInterval(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("compact-interval", value); + compact_interval_ = value; + } + void SetDisableAutoCompaction(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("disable_auto_compactions", value); + disable_auto_compactions_ = value == "true"; + } + void SetMaxSubcompactions(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-subcompactions", std::to_string(value)); + max_subcompactions_ = value; + } + void SetLeastResumeFreeDiskSize(const int64_t& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("least-free-disk-resume-size", std::to_string(value)); + least_free_disk_to_resume_ = value; + } + void SetResumeInterval(const int64_t& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("manually-resume-interval", std::to_string(value)); + resume_check_interval_ = value; + } + void SetMinCheckResumeRatio(const double& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("min-check-resume-ratio", std::to_string(value)); + min_check_resume_ratio_ = value; + } + void SetSyncWindowSize(const int& value) { + TryPushDiffCommands("sync-window-size", std::to_string(value)); + sync_window_size_.store(value); + } + void SetMaxConnRbufSize(const int& value) { + TryPushDiffCommands("max-conn-rbuf-size", std::to_string(value)); + max_conn_rbuf_size_.store(value); + } + void SetMaxCacheFiles(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-cache-files", std::to_string(value)); + max_cache_files_ = value; + } + void SetMaxBackgroudCompactions(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-background-compactions", std::to_string(value)); + max_background_compactions_ = value; + } + void SetMaxBackgroudJobs(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-background-jobs", std::to_string(value)); + max_background_jobs_ = value; + } + void SetWriteBufferSize(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("write-buffer-size", std::to_string(value)); + write_buffer_size_ = value; + } + void SetMinWriteBufferNumberToMerge(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("min-write-buffer-number-to-merge", std::to_string(value)); + min_write_buffer_number_to_merge_ = value; + } + void SetLevel0StopWritesTrigger(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("level0-stop-writes-trigger", std::to_string(value)); + level0_stop_writes_trigger_ = value; + } + void SetLevel0SlowdownWritesTrigger(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("level0-slowdown-writes-trigger", std::to_string(value)); + level0_slowdown_writes_trigger_ = value; + } + void SetLevel0FileNumCompactionTrigger(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("level0-file-num-compaction-trigger", std::to_string(value)); + level0_file_num_compaction_trigger_ = value; + } + void SetMaxWriteBufferNumber(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-write-buffer-num", std::to_string(value)); + max_write_buffer_num_ = value; + } + void SetMaxTotalWalSize(uint64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-total-wal-size", std::to_string(value)); + max_total_wal_size_ = value; + } + void SetArenaBlockSize(const int& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("arena-block-size", std::to_string(value)); + arena_block_size_ = value; + } + + void SetRateLmiterBandwidth(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("rate-limiter-bandwidth", std::to_string(value)); + rate_limiter_bandwidth_ = value; + } + + void SetDelayedWriteRate(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("delayed-write-rate", std::to_string(value)); + delayed_write_rate_ = value; + } + + void SetMaxCompactionBytes(int64_t value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-compaction-bytes", std::to_string(value)); + max_compaction_bytes_ = value; + } + + void SetLogLevel(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("loglevel", value); + log_level_ = value; + } + + // Rsync Rate limiting configuration + void SetThrottleBytesPerSecond(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("throttle-bytes-per-second", std::to_string(value)); + throttle_bytes_per_second_ = value; + } + + void SetMaxRsyncParallelNum(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("max-rsync-parallel-num", std::to_string(value)); + max_rsync_parallel_num_ = value; + } + + void SetRsyncTimeoutMs(int64_t value){ + std::lock_guard l(rwlock_); + TryPushDiffCommands("rsync-timeout-ms", std::to_string(value)); + rsync_timeout_ms_.store(value); + } + + void SetAclPubsubDefault(const std::string& value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("acl-pubsub-default", value); + if (value == "resetchannels") { + acl_pubsub_default_ = 0; + } else { + acl_pubsub_default_ = static_cast(AclSelectorFlag::ALL_CHANNELS); + } + } + void SetAclLogMaxLen(const int value) { + std::lock_guard l(rwlock_); + TryPushDiffCommands("acllog-max-len", std::to_string(value)); + acl_Log_max_len_ = value; + } + + const std::string scache_type() { + std::shared_lock l(rwlock_); + return pstd::StringConcat(cache_type_, COMMA); + } + + int64_t cache_maxmemory() { return cache_maxmemory_; } + + void SetSlowCmd(const std::string& value) { + std::lock_guard l(rwlock_); + std::string lower_value = value; + pstd::StringToLower(lower_value); + TryPushDiffCommands("slow-cmd-list", lower_value); + pstd::StringSplit2Set(lower_value, ',', slow_cmd_set_); + } + + void SetAdminCmd(const std::string& value) { + std::lock_guard l(rwlock_); + std::string lower_value = value; + pstd::StringToLower(lower_value); + TryPushDiffCommands("admin-cmd-list", lower_value); + pstd::StringSplit2Set(lower_value, ',', admin_cmd_set_); + } + + void SetInternalUsedUnFinishedFullSync(const std::string& value) { + std::lock_guard l(rwlock_); + std::string lower_value = value; + pstd::StringToLower(lower_value); + TryPushDiffCommands("internal-used-unfinished-full-sync", lower_value); + pstd::StringSplit2Set(lower_value, ',', internal_used_unfinished_full_sync_); + } + + void AddInternalUsedUnfinishedFullSync(const std::string& db_name) { + { + std::lock_guard l(rwlock_); + internal_used_unfinished_full_sync_.insert(db_name); + std::string lower_value = pstd::Set2String(internal_used_unfinished_full_sync_, ','); + pstd::StringToLower(lower_value); + TryPushDiffCommands("internal-used-unfinished-full-sync", lower_value); + } + ConfigRewrite(); + } + + void RemoveInternalUsedUnfinishedFullSync(const std::string& db_name) { + { + std::lock_guard l(rwlock_); + internal_used_unfinished_full_sync_.erase(db_name); + std::string lower_value = pstd::Set2String(internal_used_unfinished_full_sync_, ','); + pstd::StringToLower(lower_value); + TryPushDiffCommands("internal-used-unfinished-full-sync", lower_value); + } + ConfigRewrite(); + } + + size_t GetUnfinishedFullSyncCount() { + std::shared_lock l(rwlock_); + return internal_used_unfinished_full_sync_.size(); + } + void SetCacheType(const std::string &value); + void SetCacheDisableFlag() { tmp_cache_disable_flag_ = true; } + int zset_cache_start_direction() { return zset_cache_start_direction_; } + int zset_cache_field_num_per_key() { return zset_cache_field_num_per_key_; } + int max_key_size_in_cache() { return max_key_size_in_cache_; } + int cache_maxmemory_policy() { return cache_maxmemory_policy_; } + int cache_maxmemory_samples() { return cache_maxmemory_samples_; } + int cache_lfu_decay_time() { return cache_lfu_decay_time_; } + int Load(); + int ConfigRewrite(); + int ConfigRewriteReplicationID(); + + private: + // TODO: replace mutex with atomic value + int port_ = 0; + int slave_priority_ = 100; + int thread_num_ = 0; + int thread_pool_size_ = 0; + int slow_cmd_thread_pool_size_ = 0; + int admin_thread_pool_size_ = 0; + std::unordered_set slow_cmd_set_; + std::unordered_set admin_cmd_set_ = {"info", "ping", "monitor"}; + int sync_thread_num_ = 0; + int sync_binlog_thread_num_ = 0; + int expire_dump_days_ = 3; + int db_sync_speed_ = 0; + std::string slaveof_; + std::string log_path_; + int log_retention_time_; + std::string log_level_; + std::string db_path_; + int db_instance_num_ = 0; + std::string db_sync_path_; + + // compact + std::string compact_cron_; + std::string compact_interval_; + int max_subcompactions_ = 1; + bool disable_auto_compactions_ = false; + + // for obd_compact + int compact_every_num_of_files_; + int force_compact_file_age_seconds_; + int force_compact_min_delete_ratio_; + int dont_compact_sst_created_in_seconds_; + int best_delete_min_ratio_; + CompactionStrategy compaction_strategy_; + + int64_t resume_check_interval_ = 60; // seconds + int64_t least_free_disk_to_resume_ = 268435456; // 256 MB + double min_check_resume_ratio_ = 0.7; + int64_t write_buffer_size_ = 0; + int64_t arena_block_size_ = 0; + int64_t slotmigrate_thread_num_ = 0; + int64_t thread_migrate_keys_num_ = 0; + int64_t max_write_buffer_size_ = 0; + int64_t max_total_wal_size_ = 0; + bool enable_db_statistics_ = false; + int db_statistics_level_ = 0; + int max_write_buffer_num_ = 0; + int min_write_buffer_number_to_merge_ = 1; + int level0_stop_writes_trigger_ = 36; + int level0_slowdown_writes_trigger_ = 20; + int level0_file_num_compaction_trigger_ = 4; + int64_t max_client_response_size_ = 0; + bool daemonize_ = false; + bool rtc_cache_read_enabled_ = false; + int timeout_ = 0; + std::string server_id_; + std::string run_id_; + std::string replication_id_; + std::string requirepass_; + std::string masterauth_; + std::string userpass_; + std::vector user_blacklist_; + std::atomic classic_mode_; + int databases_ = 0; + int default_slot_num_ = 1; + std::vector db_structs_; + std::string default_db_; + std::string bgsave_path_; + std::string bgsave_prefix_; + std::string pidfile_; + std::atomic slow_cmd_pool_; + + std::string compression_; + std::string compression_per_level_; + int maxclients_ = 0; + int root_connection_num_ = 0; + std::atomic slowlog_write_errorlog_; + std::atomic slowlog_log_slower_than_; + std::atomic slotmigrate_; + std::atomic binlog_writer_num_; + int slowlog_max_len_ = 0; + int expire_logs_days_ = 0; + int expire_logs_nums_ = 0; + bool slave_read_only_ = false; + std::string conf_path_; + + int max_cache_statistic_keys_ = 0; + int small_compaction_threshold_ = 0; + int small_compaction_duration_threshold_ = 0; + int max_background_flushes_ = -1; + int max_background_compactions_ = -1; + int max_background_jobs_ = 0; + int64_t delayed_write_rate_ = 0; + int max_cache_files_ = 0; + std::atomic rocksdb_ttl_second_ = 0; + std::atomic rocksdb_periodic_second_ = 0; + int max_bytes_for_level_multiplier_ = 0; + int64_t block_size_ = 0; + int64_t block_cache_ = 0; + int64_t num_shard_bits_ = 0; + bool share_block_cache_ = false; + bool enable_partitioned_index_filters_ = false; + bool cache_index_and_filter_blocks_ = false; + bool pin_l0_filter_and_index_blocks_in_cache_ = false; + bool optimize_filters_for_hits_ = false; + bool level_compaction_dynamic_level_bytes_ = true; + int rate_limiter_mode_ = 0; // kReadsOnly = 0, kWritesOnly = 1, kAllIo = 2 + int64_t rate_limiter_bandwidth_ = 0; + int64_t rate_limiter_refill_period_us_ = 0; + int64_t rate_limiter_fairness_ = 0; + bool rate_limiter_auto_tuned_ = true; + + std::atomic sync_window_size_; + std::atomic max_conn_rbuf_size_; + std::atomic consensus_level_; + std::atomic replication_num_; + + std::string network_interface_; + + std::string userblacklist_; + std::vector users_; // acl user rules + + std::string aclFile_; + std::vector cmds_; + std::atomic acl_pubsub_default_ = 0; // default channel pub/sub permission + std::atomic acl_Log_max_len_ = 0; // default acl log max len + + // diff commands between cached commands and config file commands + std::map diff_commands_; + void TryPushDiffCommands(const std::string& command, const std::string& value); + + // + // Critical configure items + // + bool write_binlog_ = false; + int64_t target_file_size_base_ = 0; + int64_t max_compaction_bytes_ = 0; + int binlog_file_size_ = 0; + + // cache + std::vector cache_type_; + std::atomic_bool tmp_cache_disable_flag_ = false; + std::atomic_int64_t cache_maxmemory_ = 10737418240; + std::atomic_int cache_num_ = 5; + std::atomic_int cache_mode_ = 1; + std::atomic_int cache_string_ = 1; + std::atomic_int cache_set_ = 1; + std::atomic_int cache_zset_ = 1; + std::atomic_int cache_hash_ = 1; + std::atomic_int cache_list_ = 1; + std::atomic_int cache_bit_ = 1; + std::atomic_int zset_cache_start_direction_ = 0; + std::atomic_int zset_cache_field_num_per_key_ = 512; + std::atomic_int max_key_size_in_cache_ = 512; + std::atomic_int cache_maxmemory_policy_ = 1; + std::atomic_int cache_maxmemory_samples_ = 5; + std::atomic_int cache_lfu_decay_time_ = 1; + + // rocksdb blob + bool enable_blob_files_ = false; + bool enable_blob_garbage_collection_ = false; + double blob_garbage_collection_age_cutoff_ = 0.25; + double blob_garbage_collection_force_threshold_ = 1.0; + int64_t min_blob_size_ = 4096; // 4K + int64_t blob_cache_ = 0; + int64_t blob_num_shard_bits_ = 0; + int64_t blob_file_size_ = 256 * 1024 * 1024; // 256M + std::string blob_compression_type_ = "none"; + + std::shared_mutex rwlock_; + + // Rsync Rate limiting configuration + int throttle_bytes_per_second_ = 200 << 20; // 200MB/s + int max_rsync_parallel_num_ = kMaxRsyncParallelNum; + std::atomic_int64_t rsync_timeout_ms_ = 1000; + + //Internal used metrics Persisted by pika.conf + std::unordered_set internal_used_unfinished_full_sync_; + + // for wash data from 4.0.0 to 4.0.1 + bool wash_data_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_consensus.h b/tools/pika_migrate/include/pika_consensus.h new file mode 100644 index 0000000000..bb774b5e3b --- /dev/null +++ b/tools/pika_migrate/include/pika_consensus.h @@ -0,0 +1,203 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#ifndef PIKA_CONSENSUS_H_ +#define PIKA_CONSENSUS_H_ + +#include + +#include "include/pika_define.h" +#include "pstd/include/env.h" +#include "include/pika_binlog_transverter.h" +#include "include/pika_client_conn.h" +#include "include/pika_slave_node.h" +#include "include/pika_stable_log.h" + +class Context : public pstd::noncopyable { + public: + Context(std::string path); + + pstd::Status Init(); + // RWLock should be held when access members. + pstd::Status StableSave(); + void UpdateAppliedIndex(const LogOffset& offset); + void Reset(const LogOffset& offset); + + std::shared_mutex rwlock_; + LogOffset applied_index_; + SyncWindow applied_win_; + + std::string ToString() { + std::stringstream tmp_stream; + std::shared_lock l(rwlock_); + tmp_stream << " Applied_index " << applied_index_.ToString() << "\r\n"; + tmp_stream << " Applied window " << applied_win_.ToStringStatus(); + return tmp_stream.str(); + } + + private: + std::string path_; + std::unique_ptr save_; +}; + +class SyncProgress { + public: + SyncProgress() = default; + ~SyncProgress() = default; + std::shared_ptr GetSlaveNode(const std::string& ip, int port); + std::unordered_map> GetAllSlaveNodes(); + pstd::Status AddSlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id); + pstd::Status RemoveSlaveNode(const std::string& ip, int port); + pstd::Status Update(const std::string& ip, int port, const LogOffset& start, const LogOffset& end, + LogOffset* committed_index); + int SlaveSize(); + + private: + std::shared_mutex rwlock_; + std::unordered_map> slaves_; + std::unordered_map match_index_; +}; + +class MemLog { + public: + struct LogItem { + LogItem(const LogOffset& _offset, std::shared_ptr _cmd_ptr, std::shared_ptr _conn_ptr, + std::shared_ptr _resp_ptr) + : offset(_offset), cmd_ptr(std::move(_cmd_ptr)), conn_ptr(std::move(_conn_ptr)), resp_ptr(std::move(_resp_ptr)) {} + LogOffset offset; + std::shared_ptr cmd_ptr; + std::shared_ptr conn_ptr; + std::shared_ptr resp_ptr; + }; + + MemLog(); + int Size(); + void AppendLog(const LogItem& item) { + std::lock_guard lock(logs_mu_); + logs_.push_back(item); + last_offset_ = item.offset; + } + pstd::Status TruncateTo(const LogOffset& offset); + + void Reset(const LogOffset& offset); + + LogOffset last_offset() { + std::lock_guard lock(logs_mu_); + return last_offset_; + } + void SetLastOffset(const LogOffset& offset) { + std::lock_guard lock(logs_mu_); + last_offset_ = offset; + } + bool FindLogItem(const LogOffset& offset, LogOffset* found_offset); + + private: + int InternalFindLogByBinlogOffset(const LogOffset& offset); + int InternalFindLogByLogicIndex(const LogOffset& offset); + pstd::Mutex logs_mu_; + std::vector logs_; + LogOffset last_offset_; +}; + +class ConsensusCoordinator { + public: + ConsensusCoordinator(const std::string& db_name); + ~ConsensusCoordinator(); + // since it is invoked in constructor all locks not hold + void Init(); + // invoked by dbsync process + pstd::Status Reset(const LogOffset& offset); + + pstd::Status ProposeLog(const std::shared_ptr& cmd_ptr); + pstd::Status UpdateSlave(const std::string& ip, int port, const LogOffset& start, const LogOffset& end); + pstd::Status AddSlaveNode(const std::string& ip, int port, int session_id); + pstd::Status RemoveSlaveNode(const std::string& ip, int port); + void UpdateTerm(uint32_t term); + uint32_t term(); + + // invoked by follower + pstd::Status ProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute); + + // Negotiate + pstd::Status LeaderNegotiate(const LogOffset& f_last_offset, bool* reject, std::vector* hints); + pstd::Status FollowerNegotiate(const std::vector& hints, LogOffset* reply_offset); + + SyncProgress& SyncPros() { return sync_pros_; } + std::shared_ptr StableLogger() { return stable_logger_; } + std::shared_ptr MemLogger() { return mem_logger_; } + + LogOffset committed_index() { + std::lock_guard lock(index_mu_); + return committed_index_; + } + + std::shared_ptr context() { return context_; } + + // redis parser cb + struct CmdPtrArg { + CmdPtrArg(std::shared_ptr ptr) : cmd_ptr(std::move(ptr)) {} + std::shared_ptr cmd_ptr; + }; + static int InitCmd(net::RedisParser* parser, const net::RedisCmdArgsType& argv); + + std::string ToStringStatus() { + std::stringstream tmp_stream; + { + std::lock_guard lock(index_mu_); + tmp_stream << " Committed_index: " << committed_index_.ToString() << "\r\n"; + } + tmp_stream << " Context: " + << "\r\n" + << context_->ToString(); + { + std::shared_lock lock(term_rwlock_); + tmp_stream << " Term: " << term_ << "\r\n"; + } + tmp_stream << " Mem_logger size: " << mem_logger_->Size() << " last offset " + << mem_logger_->last_offset().ToString() << "\r\n"; + tmp_stream << " Stable_logger first offset " << stable_logger_->first_offset().ToString() << "\r\n"; + LogOffset log_status; + stable_logger_->Logger()->GetProducerStatus(&(log_status.b_offset.filenum), &(log_status.b_offset.offset), + &(log_status.l_offset.term), &(log_status.l_offset.index)); + tmp_stream << " Physical Binlog Status: " << log_status.ToString() << "\r\n"; + return tmp_stream.str(); + } + + private: + pstd::Status TruncateTo(const LogOffset& offset); + + pstd::Status InternalAppendLog(const std::shared_ptr& cmd_ptr); + pstd::Status InternalAppendBinlog(const std::shared_ptr& cmd_ptr); + void InternalApply(const MemLog::LogItem& log); + void InternalApplyFollower(const std::shared_ptr& cmd_ptr); + + pstd::Status GetBinlogOffset(const BinlogOffset& start_offset, LogOffset* log_offset); + pstd::Status GetBinlogOffset(const BinlogOffset& start_offset, const BinlogOffset& end_offset, + std::vector* log_offset); + pstd::Status FindBinlogFileNum(const std::map& binlogs, uint64_t target_index, uint32_t start_filenum, + uint32_t* founded_filenum); + pstd::Status FindLogicOffsetBySearchingBinlog(const BinlogOffset& hint_offset, uint64_t target_index, + LogOffset* found_offset); + pstd::Status FindLogicOffset(const BinlogOffset& start_offset, uint64_t target_index, LogOffset* found_offset); + pstd::Status GetLogsBefore(const BinlogOffset& start_offset, std::vector* hints); + + private: + // keep members in this class works in order + pstd::Mutex order_mu_; + + pstd::Mutex index_mu_; + LogOffset committed_index_; + + std::shared_ptr context_; + + std::shared_mutex term_rwlock_; + uint32_t term_ = 0; + + std::string db_name_; + + SyncProgress sync_pros_; + std::shared_ptr stable_logger_; + std::shared_ptr mem_logger_; +}; +#endif // INCLUDE_PIKA_CONSENSUS_H_ diff --git a/tools/pika_migrate/include/pika_data_distribution.h b/tools/pika_migrate/include/pika_data_distribution.h new file mode 100644 index 0000000000..7f8d494fe0 --- /dev/null +++ b/tools/pika_migrate/include/pika_data_distribution.h @@ -0,0 +1,28 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_DATA_DISTRIBUTION_H_ +#define PIKA_DATA_DISTRIBUTION_H_ + +#include +#include + +// polynomial reserved Crc32 magic num +const uint32_t IEEE_POLY = 0xedb88320; + +class PikaDataDistribution { + public: + virtual ~PikaDataDistribution() = default; + // Initialization + virtual void Init() = 0; +}; + +class HashModulo : public PikaDataDistribution { + public: + ~HashModulo() override = default; + void Init() override; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_db.h b/tools/pika_migrate/include/pika_db.h new file mode 100644 index 0000000000..3dfe3b69f5 --- /dev/null +++ b/tools/pika_migrate/include/pika_db.h @@ -0,0 +1,206 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_DB_H_ +#define PIKA_DB_H_ + +#include + +#include "storage/storage.h" +#include "include/pika_command.h" +#include "lock_mgr.h" +#include "pika_cache.h" +#include "pika_define.h" +#include "storage/backupable.h" + +class PikaCache; +class CacheInfo; +/* + *Keyscan used + */ +struct KeyScanInfo { + time_t start_time = 0; + std::string s_start_time; + int32_t duration = -3; + std::vector key_infos; // the order is strings, hashes, lists, zsets, sets, streams + bool key_scaning_ = false; + KeyScanInfo() : + s_start_time("0"), + key_infos({{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}) + {} +}; + +struct BgSaveInfo { + bool bgsaving = false; + time_t start_time = 0; + std::string s_start_time; + std::string path; + LogOffset offset; + BgSaveInfo() = default; + void Clear() { + bgsaving = false; + path.clear(); + offset = LogOffset(); + } +}; + +struct DisplayCacheInfo { + int status = 0; + uint32_t cache_num = 0; + uint64_t keys_num = 0; + uint64_t used_memory = 0; + uint64_t hits = 0; + uint64_t misses = 0; + uint64_t hits_per_sec = 0; + uint64_t read_cmd_per_sec = 0; + double hitratio_per_sec = 0.0; + double hitratio_all = 0.0; + uint64_t load_keys_per_sec = 0; + uint64_t last_time_us = 0; + uint64_t last_load_keys_num = 0; + uint32_t waitting_load_keys_num = 0; + DisplayCacheInfo& operator=(const DisplayCacheInfo &obj) { + status = obj.status; + cache_num = obj.cache_num; + keys_num = obj.keys_num; + used_memory = obj.used_memory; + hits = obj.hits; + misses = obj.misses; + hits_per_sec = obj.hits_per_sec; + read_cmd_per_sec = obj.read_cmd_per_sec; + hitratio_per_sec = obj.hitratio_per_sec; + hitratio_all = obj.hitratio_all; + load_keys_per_sec = obj.load_keys_per_sec; + last_time_us = obj.last_time_us; + last_load_keys_num = obj.last_load_keys_num; + waitting_load_keys_num = obj.waitting_load_keys_num; + return *this; + } +}; + +class DB : public std::enable_shared_from_this, public pstd::noncopyable { + public: + DB(std::string db_name, const std::string& db_path, const std::string& log_path); + virtual ~DB(); + + friend class Cmd; + friend class InfoCmd; + friend class PkClusterInfoCmd; + friend class PikaServer; + + /** + * When it is the first time for upgrading version from 4.0.0 to 4.0.1, you should call + * this function to wash data. true if successful, false otherwise. + * @see https://github.com/OpenAtomFoundation/pika/issues/2886 + */ + bool WashData(); + + std::string GetDBName(); + std::shared_ptr storage() const; + void GetBgSaveMetaData(std::vector* fileNames, std::string* snapshot_uuid); + void BgSaveDB(); + void SetBinlogIoError(); + void SetBinlogIoErrorrelieve(); + bool IsBinlogIoError(); + std::shared_ptr cache() const; + std::shared_mutex& GetDBLock() { + return dbs_rw_; + } + void DBLock() { + dbs_rw_.lock(); + } + void DBLockShared() { + dbs_rw_.lock_shared(); + } + void DBUnlock() { + dbs_rw_.unlock(); + } + void DBUnlockShared() { + dbs_rw_.unlock_shared(); + } + + // KeyScan use; + void KeyScan(); + bool IsKeyScaning(); + void RunKeyScan(); + void StopKeyScan(); + void ScanDatabase(const storage::DataType& type); + KeyScanInfo GetKeyScanInfo(); + + // Compact use; + void Compact(const storage::DataType& type); + void CompactRange(const storage::DataType& type, const std::string& start, const std::string& end); + void LongestNotCompactionSstCompact(const storage::DataType& type); + + void SetCompactRangeOptions(const bool is_canceled); + + std::shared_ptr LockMgr(); + /* + * Cache used + */ + DisplayCacheInfo GetCacheInfo(); + void UpdateCacheInfo(CacheInfo& cache_info); + void ResetDisplayCacheInfo(int status); + uint64_t cache_usage_; + void Init(); + bool TryUpdateMasterOffset(); + /* + * FlushDB used + */ + bool FlushDBWithoutLock(); + bool ChangeDb(const std::string& new_path); + pstd::Status GetBgSaveUUID(std::string* snapshot_uuid); + void PrepareRsync(); + bool IsBgSaving(); + BgSaveInfo bgsave_info(); + pstd::Status GetKeyNum(std::vector* key_info); + + private: + bool opened_ = false; + std::string dbsync_path_; + std::string db_name_; + std::string db_path_; + std::string snapshot_uuid_; + std::string log_path_; + std::string bgsave_sub_path_; + pstd::Mutex key_info_protector_; + std::atomic binlog_io_error_; + std::shared_mutex dbs_rw_; + // class may be shared, using shared_ptr would be a better choice + std::shared_ptr lock_mgr_; + std::shared_ptr storage_; + std::shared_ptr cache_; + /* + * KeyScan use + */ + static void DoKeyScan(void* arg); + void InitKeyScan(); + pstd::Mutex key_scan_protector_; + KeyScanInfo key_scan_info_; + /* + * Cache used + */ + DisplayCacheInfo cache_info_; + std::shared_mutex cache_info_rwlock_; + /* + * BgSave use + */ + static void DoBgSave(void* arg); + bool RunBgsaveEngine(); + + bool InitBgsaveEnv(); + bool InitBgsaveEngine(); + void ClearBgsave(); + void FinishBgsave(); + BgSaveInfo bgsave_info_; + pstd::Mutex bgsave_protector_; + std::shared_ptr bgsave_engine_; +}; + +struct BgTaskArg { + std::shared_ptr db; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_define.h b/tools/pika_migrate/include/pika_define.h new file mode 100644 index 0000000000..3968f9072f --- /dev/null +++ b/tools/pika_migrate/include/pika_define.h @@ -0,0 +1,412 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_DEFINE_H_ +#define PIKA_DEFINE_H_ + +#include +#include +#include + +#include "net/include/redis_cli.h" + +/* + * TTL type + */ +#define PIKA_TTL_ZERO 0 +#define PIKA_TTL_NONE (-1) +#define PIKA_TTL_STALE (-2) + +#define PIKA_SYNC_BUFFER_SIZE 1000 +#define PIKA_MAX_WORKER_THREAD_NUM 24 +#define PIKA_REPL_SERVER_TP_SIZE 3 +#define PIKA_META_SYNC_MAX_WAIT_TIME 10 +#define PIKA_SCAN_STEP_LENGTH 1000 +#define PIKA_MAX_CONN_RBUF (1 << 28) // 256MB +#define PIKA_MAX_CONN_RBUF_LB (1 << 26) // 64MB +#define PIKA_MAX_CONN_RBUF_HB (1 << 29) // 512MB +#define PIKA_SERVER_ID_MAX 65535 + +class PikaServer; +/* Global Const */ +constexpr int MAX_DB_NUM = 8; + +/* Port shift */ +const int kPortShiftRSync = 1000; +const int kPortShiftReplServer = 2000; +const int kPortShiftRsync2 = 10001; +const std::string kPikaPidFile = "pika.pid"; +const std::string kPikaSecretFile = "rsync.secret"; +const std::string kDefaultRsyncAuth = "default"; + +/* Rsync */ +const int kMaxRsyncParallelNum = 4; +constexpr int kMaxRsyncInitReTryTimes = 64; + +struct DBStruct { + DBStruct(std::string tn, int32_t inst_num) + : db_name(std::move(tn)), db_instance_num(inst_num) {} + + bool operator==(const DBStruct& db_struct) const { + return db_name == db_struct.db_name && db_instance_num == db_struct.db_instance_num; + } + std::string db_name; + int32_t db_instance_num = 0; +}; + +struct SlaveItem { + std::string ip_port; + std::string ip; + int port; + int conn_fd; + int stage; + std::vector db_structs; + struct timeval create_time; +}; + +enum ReplState { + kNoConnect = 0, + kTryConnect = 1, + kTryDBSync = 2, + kWaitDBSync = 3, + kWaitReply = 4, + kConnected = 5, + kError = 6, + // set to kDBNoConnect if execute cmd 'dbslaveof db no one' + kDBNoConnect = 7 +}; + +// debug only +const std::string ReplStateMsg[] = {"kNoConnect", "kTryConnect", "kTryDBSync", "kWaitDBSync", + "kWaitReply", "kConnected", "kError", "kDBNoConnect"}; + +struct LogicOffset { + uint32_t term{0}; + uint64_t index{0}; + LogicOffset() = default; + LogicOffset(uint32_t _term, uint64_t _index) : term(_term), index(_index) {} + LogicOffset(const LogicOffset& other) { + term = other.term; + index = other.index; + } + bool operator==(const LogicOffset& other) const { return term == other.term && index == other.index; } + bool operator!=(const LogicOffset& other) const { return term != other.term || index != other.index; } + + std::string ToString() const { return "term: " + std::to_string(term) + " index: " + std::to_string(index); } +}; + +struct BinlogOffset { + uint32_t filenum{0}; + uint64_t offset{0}; + BinlogOffset() = default; + BinlogOffset(uint32_t num, uint64_t off) : filenum(num), offset(off) {} + BinlogOffset(const BinlogOffset& other) { + filenum = other.filenum; + offset = other.offset; + } + std::string ToString() const { return "filenum: " + std::to_string(filenum) + " offset: " + std::to_string(offset); } + bool operator==(const BinlogOffset& other) const { + return filenum == other.filenum && offset == other.offset; + } + bool operator!=(const BinlogOffset& other) const { + return filenum != other.filenum || offset != other.offset; + } + + bool operator>(const BinlogOffset& other) const { + return filenum > other.filenum || (filenum == other.filenum && offset > other.offset); + } + bool operator<(const BinlogOffset& other) const { + return filenum < other.filenum || (filenum == other.filenum && offset < other.offset); + } + bool operator<=(const BinlogOffset& other) const { + return filenum < other.filenum || (filenum == other.filenum && offset <= other.offset); + } + bool operator>=(const BinlogOffset& other) const { + return filenum > other.filenum || (filenum == other.filenum && offset >= other.offset); + } +}; + +struct LogOffset { + LogOffset(const LogOffset& _log_offset) { + b_offset = _log_offset.b_offset; + l_offset = _log_offset.l_offset; + } + LogOffset() = default; + LogOffset(const BinlogOffset& _b_offset, const LogicOffset& _l_offset) : b_offset(_b_offset), l_offset(_l_offset) {} + bool operator<(const LogOffset& other) const { return b_offset < other.b_offset; } + bool operator==(const LogOffset& other) const { return b_offset == other.b_offset; } + bool operator<=(const LogOffset& other) const { return b_offset <= other.b_offset; } + bool operator>=(const LogOffset& other) const { return b_offset >= other.b_offset; } + bool operator>(const LogOffset& other) const { return b_offset > other.b_offset; } + std::string ToString() const { return b_offset.ToString() + " " + l_offset.ToString(); } + BinlogOffset b_offset; + LogicOffset l_offset; +}; + +// dbsync arg +struct DBSyncArg { + PikaServer* p; + std::string ip; + int port; + std::string db_name; + DBSyncArg(PikaServer* const _p, std::string _ip, int _port, std::string _db_name) + : p(_p), ip(std::move(_ip)), port(_port), db_name(std::move(_db_name)) {} +}; + +// rm define +enum SlaveState { + kSlaveNotSync = 0, + kSlaveDbSync = 1, + kSlaveBinlogSync = 2, +}; + +// debug only +const std::string SlaveStateMsg[] = {"SlaveNotSync", "SlaveDbSync", "SlaveBinlogSync"}; + +enum BinlogSyncState { + kNotSync = 0, + kReadFromCache = 1, + kReadFromFile = 2, +}; + +// debug only +const std::string BinlogSyncStateMsg[] = {"NotSync", "ReadFromCache", "ReadFromFile"}; + +struct BinlogChip { + LogOffset offset_; + std::string binlog_; + BinlogChip(const LogOffset& offset, std::string binlog) : offset_(offset), binlog_(std::move(binlog)) {} + BinlogChip(const BinlogChip& binlog_chip) { + offset_ = binlog_chip.offset_; + binlog_ = binlog_chip.binlog_; + } +}; + +struct DBInfo { + DBInfo(std::string db_name) + : db_name_(std::move(db_name)) {} + + DBInfo() = default; + + bool operator==(const DBInfo& other) const { + return db_name_ == other.db_name_; + } + + bool operator<(const DBInfo& other) const { + return db_name_ < other.db_name_ || (db_name_ == other.db_name_); + } + + std::string ToString() const { return "(" + db_name_ + ")"; } + std::string db_name_; +}; + +/* + * Used to define the sorting rule of the db in the map + */ +struct hash_db_info { + size_t operator()(const DBInfo& n) const { + return std::hash()(n.db_name_); + } +}; + +class Node { + public: + Node(std::string ip, int port) : ip_(std::move(ip)), port_(port) {} + virtual ~Node() = default; + Node() = default; + const std::string& Ip() const { return ip_; } + int Port() const { return port_; } + std::string ToString() const { return ip_ + ":" + std::to_string(port_); } + + private: + std::string ip_; + int port_ = 0; +}; + +class RmNode : public Node { + public: + RmNode(const std::string& ip, int port, DBInfo db_info) + : Node(ip, port), db_info_(std::move(db_info)) {} + + RmNode(const std::string& ip, int port, const std::string& db_name) + : Node(ip, port), + db_info_(db_name) + {} + + RmNode(const std::string& ip, int port, const std::string& db_name, int32_t session_id) + : Node(ip, port), + db_info_(db_name), + session_id_(session_id) + {} + + RmNode(const std::string& db_name) + : db_info_(db_name) {} + RmNode() = default; + + ~RmNode() override = default; + bool operator==(const RmNode& other) const { + return db_info_.db_name_ == other.DBName() && + Ip() == other.Ip() && Port() == other.Port(); + } + + const std::string& DBName() const { return db_info_.db_name_; } + const DBInfo& NodeDBInfo() const { return db_info_; } + void SetSessionId(int32_t session_id) { session_id_ = session_id; } + int32_t SessionId() const { return session_id_; } + std::string ToString() const { + return "db=" + DBName() + "_,ip_port=" + Ip() + ":" + + std::to_string(Port()) + ",session id=" + std::to_string(SessionId()); + } + void SetLastSendTime(uint64_t last_send_time) { last_send_time_ = last_send_time; } + uint64_t LastSendTime() const { return last_send_time_; } + void SetLastRecvTime(uint64_t last_recv_time) { last_recv_time_ = last_recv_time; } + uint64_t LastRecvTime() const { return last_recv_time_; } + + private: + DBInfo db_info_; + int32_t session_id_ = 0; + uint64_t last_send_time_ = 0; + uint64_t last_recv_time_ = 0; +}; + +struct WriteTask { + struct RmNode rm_node_; + struct BinlogChip binlog_chip_; + LogOffset prev_offset_; + WriteTask(const RmNode& rm_node, const BinlogChip& binlog_chip, const LogOffset& prev_offset) + : rm_node_(rm_node), binlog_chip_(binlog_chip), prev_offset_(prev_offset) {} +}; + +// slowlog define +#define SLOWLOG_ENTRY_MAX_ARGC 32 +#define SLOWLOG_ENTRY_MAX_STRING 128 + +// slowlog entry +struct SlowlogEntry { + int64_t id; + int64_t start_time; + int64_t duration; + net::RedisCmdArgsType argv; +}; + +#define PIKA_MIN_RESERVED_FDS 5000 + +const int SLAVE_ITEM_STAGE_ONE = 1; +const int SLAVE_ITEM_STAGE_TWO = 2; + +// repl_state_ +const int PIKA_REPL_NO_CONNECT = 0; +const int PIKA_REPL_SHOULD_META_SYNC = 1; +const int PIKA_REPL_META_SYNC_DONE = 2; +const int PIKA_REPL_ERROR = 3; + +// role +const int PIKA_ROLE_SINGLE = 0; +const int PIKA_ROLE_SLAVE = 1; +const int PIKA_ROLE_MASTER = 2; + +/* + * cache mode + */ +constexpr int PIKA_CACHE_NONE = 0; +constexpr int PIKA_CACHE_READ = 1; + +/* + * cache size + */ +#define PIKA_CACHE_SIZE_MIN 536870912 // 512M +#define PIKA_CACHE_SIZE_DEFAULT 10737418240 // 10G + +enum RecordType { + kZeroType = 0, + kFullType = 1, + kFirstType = 2, + kMiddleType = 3, + kLastType = 4, + kEof = 5, + kBadRecord = 6, + kOldRecord = 7 +}; + +/* + * the block size that we read and write from write2file + * the default size is 64KB + */ +static const size_t kBlockSize = 64 * 1024; + +/* + * Header is Type(1 byte), length (3 bytes), time (4 bytes) + */ +static const size_t kHeaderSize = 1 + 3 + 4; + +/* + * the size of memory when we use memory mode + * the default memory size is 2GB + */ +const int64_t kPoolSize = 1073741824; + +const std::string kBinlogPrefix = "write2file"; +const size_t kBinlogPrefixLen = 10; + +const std::string kPikaMeta = "meta"; +const std::string kManifest = "manifest"; +const std::string kContext = "context"; + +/* + * define common character + */ +#define COMMA ',' + +/* + * define reply between master and slave + */ +const std::string kInnerReplOk = "ok"; +const std::string kInnerReplWait = "wait"; + +const unsigned int kMaxBitOpInputKey = 12800; +const int kMaxBitOpInputBit = 21; +/* + * db sync + */ +const uint32_t kDBSyncMaxGap = 50; +const std::string kDBSyncModule = "document"; + +const std::string kBgsaveInfoFile = "info"; + +/* + * cache status + */ +const int PIKA_CACHE_STATUS_NONE = 0; +const int PIKA_CACHE_STATUS_INIT = 1; +const int PIKA_CACHE_STATUS_OK = 2; +const int PIKA_CACHE_STATUS_RESET = 3; +const int PIKA_CACHE_STATUS_DESTROY = 4; +const int PIKA_CACHE_STATUS_CLEAR = 5; +const int CACHE_START_FROM_BEGIN = 0; +const int CACHE_START_FROM_END = -1; + +/* + * key type + */ +const char PIKA_KEY_TYPE_KV = 'k'; +const char PIKA_KEY_TYPE_HASH = 'h'; +const char PIKA_KEY_TYPE_LIST = 'l'; +const char PIKA_KEY_TYPE_SET = 's'; +const char PIKA_KEY_TYPE_ZSET = 'z'; + +/* + * cache task type + */ +enum CacheBgTask { + CACHE_BGTASK_CLEAR = 0, + CACHE_BGTASK_RESET_NUM = 1, + CACHE_BGTASK_RESET_CFG = 2 +}; + +const int64_t CACHE_LOAD_QUEUE_MAX_SIZE = 2048; +const int64_t CACHE_VALUE_ITEM_MAX_SIZE = 2048; +const int64_t CACHE_LOAD_NUM_ONE_TIME = 256; + +#endif diff --git a/tools/pika_migrate/include/pika_dispatch_thread.h b/tools/pika_migrate/include/pika_dispatch_thread.h new file mode 100644 index 0000000000..01a6fe96b0 --- /dev/null +++ b/tools/pika_migrate/include/pika_dispatch_thread.h @@ -0,0 +1,56 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_DISPATCH_THREAD_H_ +#define PIKA_DISPATCH_THREAD_H_ + +#include "include/pika_client_conn.h" + +class PikaDispatchThread { + public: + PikaDispatchThread(std::set& ips, int port, int work_num, int cron_interval, int queue_limit, + int max_conn_rbuf_size); + ~PikaDispatchThread(); + int StartThread(); + void StopThread(); + uint64_t ThreadClientList(std::vector* clients); + + bool ClientKill(const std::string& ip_port); + void ClientKillAll(); + + void SetQueueLimit(int queue_limit) { thread_rep_->SetQueueLimit(queue_limit); } + + void UnAuthUserAndKillClient(const std::set &users, const std::shared_ptr& defaultUser); + net::ServerThread* server_thread() { return thread_rep_; } + + private: + class ClientConnFactory : public net::ConnFactory { + public: + explicit ClientConnFactory(int max_conn_rbuf_size) : max_conn_rbuf_size_(max_conn_rbuf_size) {} + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, net::Thread* server_thread, + void* worker_specific_data, net::NetMultiplexer* net) const override { + return std::make_shared(connfd, ip_port, server_thread, net, net::HandleType::kAsynchronous, max_conn_rbuf_size_); + } + + private: + int max_conn_rbuf_size_ = 0; + }; + + class Handles : public net::ServerHandle { + public: + explicit Handles(PikaDispatchThread* pika_disptcher) : pika_disptcher_(pika_disptcher) {} + using net::ServerHandle::AccessHandle; + bool AccessHandle(std::string& ip) const override; + void CronHandle() const override; + + private: + PikaDispatchThread* pika_disptcher_ = nullptr; + }; + + ClientConnFactory conn_factory_; + Handles handles_; + net::ServerThread* thread_rep_ = nullptr; +}; +#endif diff --git a/tools/pika_migrate/include/pika_geo.h b/tools/pika_migrate/include/pika_geo.h new file mode 100644 index 0000000000..70b287da03 --- /dev/null +++ b/tools/pika_migrate/include/pika_geo.h @@ -0,0 +1,184 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_GEO_H_ +#define PIKA_GEO_H_ + +#include "include/pika_db.h" +#include "include/acl.h" +#include "include/pika_command.h" +#include "storage/storage.h" + +/* + * zset + */ +enum Sort { + Unsort, // default + Asc, + Desc +}; + +struct GeoPoint { + std::string member; + double longitude; + double latitude; +}; + +struct NeighborPoint { + std::string member; + double score; + double distance; +}; + +struct GeoRange { + std::string member; + double longitude; + double latitude; + double distance; + std::string unit; + bool withdist; + bool withhash; + bool withcoord; + int option_num; + bool count; + int count_limit; + bool store; + bool storedist; + std::string storekey; + Sort sort; +}; + +class GeoAddCmd : public Cmd { + public: + GeoAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new GeoAddCmd(*this); } + + private: + std::string key_; + std::vector pos_; + void DoInitial() override; +}; + +class GeoPosCmd : public Cmd { + public: + GeoPosCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GeoPosCmd(*this); } + + private: + std::string key_; + std::vector members_; + void DoInitial() override; +}; + +class GeoDistCmd : public Cmd { + public: + GeoDistCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GeoDistCmd(*this); } + + private: + std::string key_, first_pos_, second_pos_, unit_; + void DoInitial() override; +}; + +class GeoHashCmd : public Cmd { + public: + GeoHashCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override{}; + Cmd* Clone() override { return new GeoHashCmd(*this); } + + private: + std::string key_; + std::vector members_; + void DoInitial() override; +}; + +class GeoRadiusCmd : public Cmd { + public: + GeoRadiusCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new GeoRadiusCmd(*this); } + + private: + std::string key_; + GeoRange range_; + void DoInitial() override; + void Clear() override { + range_.withdist = false; + range_.withcoord = false; + range_.withhash = false; + range_.count = false; + range_.store = false; + range_.storedist = false; + range_.option_num = 0; + range_.count_limit = 0; + range_.sort = Unsort; + } +}; + +class GeoRadiusByMemberCmd : public Cmd { + public: + GeoRadiusByMemberCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::GEO)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new GeoRadiusByMemberCmd(*this); } + + private: + std::string key_; + GeoRange range_; + void DoInitial() override; + void Clear() override { + range_.withdist = false; + range_.withcoord = false; + range_.withhash = false; + range_.count = false; + range_.store = false; + range_.storedist = false; + range_.option_num = 0; + range_.count_limit = 0; + range_.sort = Unsort; + } +}; + +#endif diff --git a/tools/pika_migrate/include/pika_geohash.h b/tools/pika_migrate/include/pika_geohash.h new file mode 100644 index 0000000000..1ba348515e --- /dev/null +++ b/tools/pika_migrate/include/pika_geohash.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015, Salvatore Sanfilippo . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PIKA_GEOHASH_H_ +#define PIKA_GEOHASH_H_ + +#include +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +#define HASHISZERO(r) (!(r).bits && !(r).step) +#define RANGEISZERO(r) (!(r).max && !(r).min) +#define RANGEPISZERO(r) ((r) == nullptr || RANGEISZERO(*(r))) + +#define GEO_STEP_MAX 26 /* 26 * 2 = 52 bits. */ + +/* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ +constexpr double GEO_LAT_MIN{-85.05112878}; +constexpr double GEO_LAT_MAX{85.05112878}; +constexpr int64_t GEO_LONG_MIN{-180}; +constexpr int64_t GEO_LONG_MAX{180}; + +struct GeoHashBits { + uint64_t bits; + uint8_t step; +}; + +struct GeoHashRange { + double min; + double max; +}; + +struct GeoHashArea { + GeoHashBits hash; + GeoHashRange longitude; + GeoHashRange latitude; +}; + +struct GeoHashNeighbors { + GeoHashBits north; + GeoHashBits east; + GeoHashBits west; + GeoHashBits south; + GeoHashBits north_east; + GeoHashBits south_east; + GeoHashBits north_west; + GeoHashBits south_west; +}; + +/* + * 0:success + * -1:failed + */ +void geohashGetCoordRange(GeoHashRange* long_range, GeoHashRange* lat_range); +int geohashEncode(const GeoHashRange* long_range, const GeoHashRange* lat_range, double longitude, double latitude, + uint8_t step, GeoHashBits* hash); +int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits* hash); +int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits* hash); +int geohashDecode(GeoHashRange long_range, GeoHashRange lat_range, GeoHashBits hash, + GeoHashArea* area); +int geohashDecodeType(GeoHashBits hash, GeoHashArea* area); +int geohashDecodeAreaToLongLat(const GeoHashArea* area, double* xy); +int geohashDecodeToLongLatType(GeoHashBits hash, double* xy); +int geohashDecodeToLongLatWGS84(GeoHashBits hash, double* xy); +void geohashNeighbors(const GeoHashBits* hash, GeoHashNeighbors* neighbors); + +#if defined(__cplusplus) +} +#endif +#endif /* PIKA_GEOHASH_H_ */ diff --git a/tools/pika_migrate/include/pika_geohash_helper.h b/tools/pika_migrate/include/pika_geohash_helper.h new file mode 100644 index 0000000000..63ad4782a2 --- /dev/null +++ b/tools/pika_migrate/include/pika_geohash_helper.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015, Salvatore Sanfilippo . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PIKA_GEOHASH_HELPER_HPP_ +#define PIKA_GEOHASH_HELPER_HPP_ + +#include "include/pika_geohash.h" + +#define GZERO(s) s.bits = (s).step = 0; + +using GeoHashFix52Bits = uint64_t; + +struct GeoHashRadius { + GeoHashBits hash; + GeoHashArea area; + GeoHashNeighbors neighbors; +}; + +uint8_t geohashEstimateStepsByRadius(double range_meters, double lat); +int geohashBoundingBox(double longitude, double latitude, double radius_meters, double* bounds); +GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters); +GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters); +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits& hash); +double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d); +int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double* distance); +int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double* distance); + +#endif /* PIKA_GEOHASH_HELPER_HPP_ */ diff --git a/tools/pika_migrate/include/pika_hash.h b/tools/pika_migrate/include/pika_hash.h new file mode 100644 index 0000000000..1362040682 --- /dev/null +++ b/tools/pika_migrate/include/pika_hash.h @@ -0,0 +1,445 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_HASH_H_ +#define PIKA_HASH_H_ + +#include "storage/storage.h" +#include "include/acl.h" +#include "include/pika_command.h" +#include "include/pika_db.h" +#include "storage/storage.h" + +/* + * hash + */ +class HDelCmd : public Cmd { + public: + HDelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HDelCmd(*this); } + + private: + std::string key_; + std::vector fields_; + int32_t deleted_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HGetCmd : public Cmd { + public: + HGetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + bool IsTooLargeKey(const int &max_sz) override { return key_.size() > static_cast(max_sz); } + Cmd* Clone() override { return new HGetCmd(*this); } + + private: + std::string key_, field_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HGetallCmd : public Cmd { + public: + HGetallCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HGetallCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HSetCmd : public Cmd { + public: + HSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HSetCmd(*this); } + + private: + std::string key_, field_, value_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HExistsCmd : public Cmd { + public: + HExistsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HExistsCmd(*this); } + + private: + std::string key_, field_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HIncrbyCmd : public Cmd { + public: + HIncrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HIncrbyCmd(*this); } + + private: + std::string key_, field_; + int64_t by_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HIncrbyfloatCmd : public Cmd { + public: + HIncrbyfloatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HIncrbyfloatCmd(*this); } + + private: + std::string key_, field_, by_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HKeysCmd : public Cmd { + public: + HKeysCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HKeysCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HLenCmd : public Cmd { + public: + HLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HLenCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HMgetCmd : public Cmd { + public: + HMgetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HMgetCmd(*this); } + + private: + std::string key_; + std::vector fields_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HMsetCmd : public Cmd { + public: + HMsetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HMsetCmd(*this); } + + private: + std::string key_; + std::vector fvs_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HSetnxCmd : public Cmd { + public: + HSetnxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HSetnxCmd(*this); } + + private: + std::string key_, field_, value_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HStrlenCmd : public Cmd { + public: + HStrlenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HStrlenCmd(*this); } + + private: + std::string key_, field_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HValsCmd : public Cmd { + public: + HValsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HValsCmd(*this); } + + private: + std::string key_, field_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class HScanCmd : public Cmd { + public: + HScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HScanCmd(*this); } + + private: + std::string key_; + std::string pattern_; + int64_t cursor_; + int64_t count_{10}; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + } +}; + +class HScanxCmd : public Cmd { + public: + HScanxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new HScanxCmd(*this); } + + private: + std::string key_; + std::string start_field_; + std::string pattern_; + int64_t count_{10}; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + } +}; + +class PKHScanRangeCmd : public Cmd { + public: + PKHScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHScanRangeCmd(*this); } + + private: + std::string key_; + std::string field_start_; + std::string field_end_; + std::string pattern_; + int64_t limit_ = 10; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + limit_ = 10; + } +}; + +class PKHRScanRangeCmd : public Cmd { + public: + PKHRScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::HASH)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKHRScanRangeCmd(*this); } + + private: + std::string key_; + std::string field_start_; + std::string field_end_; + std::string pattern_ = "*"; + int64_t limit_ = 10; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + limit_ = 10; + } +}; +#endif diff --git a/tools/pika_migrate/include/pika_hyperloglog.h b/tools/pika_migrate/include/pika_hyperloglog.h new file mode 100644 index 0000000000..77c374642f --- /dev/null +++ b/tools/pika_migrate/include/pika_hyperloglog.h @@ -0,0 +1,75 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_HYPERLOGLOG_H_ +#define PIKA_HYPERLOGLOG_H_ + +#include "include/pika_command.h" +#include "include/pika_kv.h" +/* + * hyperloglog + */ +class PfAddCmd : public Cmd { + public: + PfAddCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PfAddCmd(*this); } + + private: + std::string key_; + std::vector values_; + void DoInitial() override; + void Clear() override { values_.clear(); } +}; + +class PfCountCmd : public Cmd { + public: + PfCountCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PfCountCmd(*this); } + + private: + std::vector keys_; + void DoInitial() override; + void Clear() override { keys_.clear(); } +}; + +class PfMergeCmd : public Cmd { + public: + PfMergeCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + PfMergeCmd(const PfMergeCmd& other) + : Cmd(other), keys_(other.keys_), value_to_dest_(other.value_to_dest_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + std::vector current_key() const override { + return keys_; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PfMergeCmd(*this); } + void DoBinlog() override; + + private: + std::vector keys_; + void DoInitial() override; + void Clear() override { keys_.clear(); } + // used for write binlog + std::string value_to_dest_; + std::shared_ptr set_cmd_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_instant.h b/tools/pika_migrate/include/pika_instant.h new file mode 100644 index 0000000000..630e5478a0 --- /dev/null +++ b/tools/pika_migrate/include/pika_instant.h @@ -0,0 +1,39 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_PIKA_INSTANT_H +#define PIKA_PIKA_INSTANT_H + +#include +#include + +inline constexpr size_t STATS_METRIC_SAMPLES = 16; /* Number of samples per metric. */ +inline const std::string STATS_METRIC_NET_INPUT = "stats_metric_net_input"; +inline const std::string STATS_METRIC_NET_OUTPUT = "stats_metric_net_output"; +inline const std::string STATS_METRIC_NET_INPUT_REPLICATION = "stats_metric_net_input_replication"; +inline const std::string STATS_METRIC_NET_OUTPUT_REPLICATION = "stats_metric_net_output_replication"; + +/* The following two are used to track instantaneous metrics, like +* number of operations per second, network traffic. */ +struct InstMetric{ + size_t last_sample_base; /* The divisor of last sample window */ + size_t last_sample_value; /* The dividend of last sample window */ + double samples[STATS_METRIC_SAMPLES]; + int idx; +}; + +class Instant { + public: + Instant() = default; + ~Instant() = default; + + void trackInstantaneousMetric(std::string metric, size_t current_value, size_t current_base, size_t factor); + double getInstantaneousMetric(std::string metric); + + private: + std::unordered_map inst_metrics_; +}; + +#endif // PIKA_PIKA_INSTANT_H diff --git a/tools/pika_migrate/include/pika_kv.h b/tools/pika_migrate/include/pika_kv.h new file mode 100644 index 0000000000..7da694705b --- /dev/null +++ b/tools/pika_migrate/include/pika_kv.h @@ -0,0 +1,879 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_KV_H_ +#define PIKA_KV_H_ + +#include "storage/storage.h" +#include "include/pika_db.h" +#include "include/acl.h" +#include "include/pika_command.h" + +/* + * kv + */ +class SetCmd : public Cmd { + public: + enum SetCondition { kNONE, kNX, kXX, kVX, kEXORPX }; + SetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + bool IsTooLargeKey(const int& max_sz) override { return key_.size() > static_cast(max_sz); } + Cmd* Clone() override { return new SetCmd(*this); } + + private: + std::string key_; + std::string value_; + std::string target_; + int32_t success_ = 0; + int64_t ttl_millsec = 0; + bool has_ttl_ = false; + SetCmd::SetCondition condition_{kNONE}; + void DoInitial() override; + void Clear() override { + ttl_millsec = 0; + success_ = 0; + condition_ = kNONE; + } + std::string ToRedisProtocol() override; + rocksdb::Status s_; +}; + +class GetCmd : public Cmd { + public: + GetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void ReadCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + bool IsTooLargeKey(const int &max_sz) override { return key_.size() > static_cast(max_sz); } + Cmd* Clone() override { return new GetCmd(*this); } + + private: + std::string key_; + std::string value_; + int64_t ttl_millsec_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class DelCmd : public Cmd { + public: + DelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)){}; + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + std::vector current_key() const override { return keys_; } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new DelCmd(*this); } + void DoBinlog() override; + + private: + std::vector keys_; + int64_t split_res_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class IncrCmd : public Cmd { + public: + IncrCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new IncrCmd(*this); } + + private: + std::string key_; + int64_t new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; +}; + +class IncrbyCmd : public Cmd { + public: + IncrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new IncrbyCmd(*this); } + + private: + std::string key_; + int64_t by_ = 0, new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; +}; + +class IncrbyfloatCmd : public Cmd { + public: + IncrbyfloatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new IncrbyfloatCmd(*this); } + + private: + std::string key_, value_, new_value_; + double by_ = 0; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; +}; + +class DecrCmd : public Cmd { + public: + DecrCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new DecrCmd(*this); } + + private: + std::string key_; + int64_t new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class DecrbyCmd : public Cmd { + public: + DecrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new DecrbyCmd(*this); } + + private: + std::string key_; + int64_t by_ = 0, new_value_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class GetsetCmd : public Cmd { + public: + GetsetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GetsetCmd(*this); } + + private: + std::string key_; + std::string new_value_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class AppendCmd : public Cmd { + public: + AppendCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new AppendCmd(*this); } + + private: + std::string key_; + std::string value_; + std::string new_value_; + void DoInitial() override; + rocksdb::Status s_; + int64_t expired_timestamp_millsec_ = 0; + std::string ToRedisProtocol() override; +}; + +class MgetCmd : public Cmd { + public: + MgetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)){}; + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + std::vector current_key() const override { return keys_; } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new MgetCmd(*this); } + + private: + void DoInitial() override; + void MergeCachedAndDbResults(); + void AssembleResponseFromCache(); + + private: + std::vector keys_; + std::vector cache_miss_keys_; + std::string value_; + std::unordered_map cache_hit_values_; + std::vector split_res_; + std::vector db_value_status_array_; + std::vector cache_value_status_array_; + rocksdb::Status s_; +}; + +class KeysCmd : public Cmd { + public: + KeysCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new KeysCmd(*this); } + + private: + std::string pattern_; + storage::DataType type_{storage::DataType::kAll}; + void DoInitial() override; + void Clear() override { type_ = storage::DataType::kAll; } + rocksdb::Status s_; +}; + +class SetnxCmd : public Cmd { + public: + SetnxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SetnxCmd(*this); } + + private: + std::string key_; + std::string value_; + int32_t success_ = 0; + void DoInitial() override; + rocksdb::Status s_; + std::string ToRedisProtocol() override; +}; + +class SetexCmd : public Cmd { + public: + SetexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SetexCmd(*this); } + + private: + std::string key_; + int64_t ttl_sec_ = 0; + std::string value_; + void DoInitial() override; + rocksdb::Status s_; + std::string ToRedisProtocol() override; +}; + +class PsetexCmd : public Cmd { + public: + PsetexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PsetexCmd(*this); } + + private: + std::string key_; + int64_t ttl_millsec = 0; + std::string value_; + void DoInitial() override; + rocksdb::Status s_; + std::string ToRedisProtocol() override; +}; + +class DelvxCmd : public Cmd { + public: + DelvxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new DelvxCmd(*this); } + + private: + std::string key_; + std::string value_; + int32_t success_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class MsetCmd : public Cmd { + public: + MsetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + MsetCmd(const MsetCmd& other) : Cmd(other), kvs_(other.kvs_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + std::vector current_key() const override { + std::vector res; + for (auto& kv : kvs_) { + res.push_back(kv.key); + } + return res; + } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new MsetCmd(*this); } + void DoBinlog() override; + + private: + std::vector kvs_; + void DoInitial() override; + // used for write binlog + std::shared_ptr set_cmd_; + rocksdb::Status s_; +}; + +class MsetnxCmd : public Cmd { + public: + MsetnxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + MsetnxCmd(const MsetnxCmd& other) + : Cmd(other), kvs_(other.kvs_), success_(other.success_) { + set_cmd_ = std::make_shared(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv); + } + std::vector current_key() const override { + std::vector res; + for (auto& kv : kvs_) { + res.push_back(kv.key); + } + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new MsetnxCmd(*this); } + void DoBinlog() override; + + private: + std::vector kvs_; + int32_t success_ = 0; + void DoInitial() override; + // used for write binlog + std::shared_ptr set_cmd_; +}; + +class GetrangeCmd : public Cmd { + public: + GetrangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new GetrangeCmd(*this); } + + private: + std::string key_; + int64_t start_ = 0; + int64_t end_ = 0; + std::string value_; + int64_t sec_ = 0; + rocksdb::Status s_; + void DoInitial() override; +}; + +class SetrangeCmd : public Cmd { + public: + SetrangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SetrangeCmd(*this); } + + private: + std::string key_; + int64_t offset_ = 0; + std::string value_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class StrlenCmd : public Cmd { + public: + StrlenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STRING)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new StrlenCmd(*this); } + + private: + std::string key_; + std::string value_; + int64_t ttl_millsec = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class ExistsCmd : public Cmd { + public: + ExistsCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + std::vector current_key() const override { return keys_; } + void Split(const HintKeys& hint_keys) override; + void Merge() override; + Cmd* Clone() override { return new ExistsCmd(*this); } + + private: + std::vector keys_; + int64_t split_res_ = 0; + void DoInitial() override; +}; + +class ExpireCmd : public Cmd { + public: + ExpireCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ExpireCmd(*this); } + + private: + std::string key_; + int64_t ttl_sec_ = 0; + void DoInitial() override; + std::string ToRedisProtocol() override; + rocksdb::Status s_; +}; + +class PexpireCmd : public Cmd { + public: + PexpireCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PexpireCmd(*this); } + + private: + std::string key_; + int64_t ttl_millsec = 0; + void DoInitial() override; + std::string ToRedisProtocol() override; + rocksdb::Status s_; +}; + +class ExpireatCmd : public Cmd { + public: + ExpireatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ExpireatCmd(*this); } + + private: + std::string key_; + int64_t time_stamp_sec_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PexpireatCmd : public Cmd { + public: + PexpireatCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PexpireatCmd(*this); } + + private: + std::string key_; + int64_t time_stamp_millsec_ = 0; + void DoInitial() override; + rocksdb::Status s_; +}; + +class TtlCmd : public Cmd { + public: + TtlCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new TtlCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PttlCmd : public Cmd { + public: + PttlCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PttlCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class PersistCmd : public Cmd { + public: + PersistCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PersistCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class TypeCmd : public Cmd { + public: + TypeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new TypeCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class ScanCmd : public Cmd { + public: + ScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ScanCmd(*this); } + + private: + int64_t cursor_ = 0; + std::string pattern_ = "*"; + int64_t count_ = 10; + storage::DataType type_ = storage::DataType::kAll; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + type_ = storage::DataType::kAll; + } + rocksdb::Status s_; +}; + +class ScanxCmd : public Cmd { + public: + ScanxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ScanxCmd(*this); } + + private: + storage::DataType type_; + std::string start_key_; + std::string pattern_ = "*"; + int64_t count_ = 10; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + } + rocksdb::Status s_; +}; + +class PKSetexAtCmd : public Cmd { + public: + PKSetexAtCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKSetexAtCmd(*this); } + + private: + std::string key_; + std::string value_; + int64_t time_stamp_sec_ = 0; + void DoInitial() override; + void Clear() override { time_stamp_sec_ = 0; } + rocksdb::Status s_; +}; + +class PKScanRangeCmd : public Cmd { + public: + PKScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_start_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKScanRangeCmd(*this); } + + private: + storage::DataType type_; + std::string key_start_; + std::string key_end_; + std::string pattern_ = "*"; + int64_t limit_ = 10; + bool string_with_value = false; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + limit_ = 10; + string_with_value = false; + } + rocksdb::Status s_; +}; + +class PKRScanRangeCmd : public Cmd { + public: + PKRScanRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::KEYSPACE)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_start_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new PKRScanRangeCmd(*this); } + + private: + storage::DataType type_ = storage::DataType::kAll; + std::string key_start_; + std::string key_end_; + std::string pattern_ = "*"; + int64_t limit_ = 10; + bool string_with_value = false; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + limit_ = 10; + string_with_value = false; + } + rocksdb::Status s_; +}; +#endif diff --git a/tools/pika_migrate/include/pika_list.h b/tools/pika_migrate/include/pika_list.h new file mode 100644 index 0000000000..1591e76c32 --- /dev/null +++ b/tools/pika_migrate/include/pika_list.h @@ -0,0 +1,429 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_LIST_H_ +#define PIKA_LIST_H_ + +#include "include/acl.h" +#include "include/pika_command.h" +#include "storage/storage.h" + +/* + * list + */ +class LIndexCmd : public Cmd { + public: + LIndexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LIndexCmd(*this); } + + private: + std::string key_; + int64_t index_ = 0; + void DoInitial() override; + void Clear() override { index_ = 0; } + rocksdb::Status s_; +}; + +class LInsertCmd : public Cmd { + public: + LInsertCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LInsertCmd(*this); } + + private: + std::string key_; + storage::BeforeOrAfter dir_{storage::After}; + std::string pivot_; + std::string value_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class LLenCmd : public Cmd { + public: + LLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LLenCmd(*this); } + + private: + std::string key_; + void DoInitial() override; + rocksdb::Status s_; +}; + +class BlockingBaseCmd : public Cmd { + public: + BlockingBaseCmd(const std::string& name, int arity, uint32_t flag, uint32_t category = 0) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST) | category) {} + + // blpop/brpop used start + struct WriteBinlogOfPopArgs { + BlockKeyType block_type; + std::string key; + std::shared_ptr db; + std::shared_ptr conn; + WriteBinlogOfPopArgs() = default; + WriteBinlogOfPopArgs(BlockKeyType block_type_, const std::string& key_, std::shared_ptr db_, + std::shared_ptr conn_) + : block_type(block_type_), key(key_), db(db_), conn(conn_) {} + }; + void BlockThisClientToWaitLRPush(BlockKeyType block_pop_type, std::vector& keys, int64_t expire_time); + void TryToServeBLrPopWithThisKey(const std::string& key, std::shared_ptr db); + static void ServeAndUnblockConns(void* args); + static void WriteBinlogOfPopAndUpdateCache(std::vector& pop_args); + void removeDuplicates(std::vector& keys_); + // blpop/brpop used functions end +}; + +class BLPopCmd final : public BlockingBaseCmd { + public: + BLPopCmd(const std::string& name, int arity, uint32_t flag) + : BlockingBaseCmd(name, arity, flag, static_cast(AclCategory::BLOCKING)){}; + std::vector current_key() const override { return {keys_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new BLPopCmd(*this); } + void DoInitial() override; + void DoBinlog() override; + + private: + std::vector keys_; + int64_t expire_time_{0}; + WriteBinlogOfPopArgs binlog_args_; + bool is_binlog_deferred_{false}; + rocksdb::Status s_; +}; + +class LPopCmd : public Cmd { + public: + LPopCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LPopCmd(*this); } + + private: + std::string key_; + std::int64_t count_ = 1; + void DoInitial() override; + rocksdb::Status s_; +}; + +class LPushCmd : public BlockingBaseCmd { + public: + LPushCmd(const std::string& name, int arity, uint32_t flag) : BlockingBaseCmd(name, arity, flag){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LPushCmd(*this); } + + private: + std::string key_; + std::vector values_; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { values_.clear(); } +}; + +class LPushxCmd : public Cmd { + public: + LPushxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LPushxCmd(*this); } + + private: + std::string key_; + rocksdb::Status s_; + std::vector values_; + void DoInitial() override; +}; + +class LRangeCmd : public Cmd { + public: + LRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LRangeCmd(*this); } + + private: + std::string key_; + int64_t left_ = 0; + int64_t right_ = 0; + rocksdb::Status s_; + void DoInitial() override; +}; + +class LRemCmd : public Cmd { + public: + LRemCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LRemCmd(*this); } + + private: + std::string key_; + int64_t count_ = 0; + std::string value_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class LSetCmd : public Cmd { + public: + LSetCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LSetCmd(*this); } + + private: + std::string key_; + int64_t index_ = 0; + rocksdb::Status s_; + std::string value_; + void DoInitial() override; +}; + +class LTrimCmd : public Cmd { + public: + LTrimCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new LTrimCmd(*this); } + + private: + std::string key_; + int64_t start_ = 0; + int64_t stop_ = 0; + rocksdb::Status s_; + void DoInitial() override; +}; + +class BRPopCmd final : public BlockingBaseCmd { + public: + BRPopCmd(const std::string& name, int arity, uint32_t flag) + : BlockingBaseCmd(name, arity, flag, static_cast(AclCategory::BLOCKING)){}; + std::vector current_key() const override { return {keys_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new BRPopCmd(*this); } + void DoInitial() override; + void DoBinlog() override; + + private: + std::vector keys_; + int64_t expire_time_{0}; + WriteBinlogOfPopArgs binlog_args_; + bool is_binlog_deferred_{false}; +}; + +class RPopCmd : public Cmd { + public: + RPopCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPopCmd(*this); } + + private: + std::string key_; + std::int64_t count_ = 1; + void DoInitial() override; + rocksdb::Status s_; +}; + +class RPopLPushCmd : public BlockingBaseCmd { + public: + RPopLPushCmd(const std::string& name, int arity, uint32_t flag) + : BlockingBaseCmd(name, arity, flag, static_cast(AclCategory::BLOCKING)) { + rpop_cmd_ = std::make_shared(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsList); + lpush_cmd_ = std::make_shared(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList); + }; + RPopLPushCmd(const RPopLPushCmd& other) + : BlockingBaseCmd(other), + source_(other.source_), + receiver_(other.receiver_), + value_poped_from_source_(other.value_poped_from_source_), + is_write_binlog_(other.is_write_binlog_) { + rpop_cmd_ = std::make_shared(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsList); + lpush_cmd_ = std::make_shared(kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList); + } + std::vector current_key() const override { + std::vector res; + res.push_back(receiver_); + res.push_back(source_); + return res; + } + void Do() override; + void ReadCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPopLPushCmd(*this); } + void DoBinlog() override; + + private: + std::string source_; + std::string receiver_; + std::string value_poped_from_source_; + bool is_write_binlog_ = false; + // used for write binlog + std::shared_ptr rpop_cmd_; + std::shared_ptr lpush_cmd_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class RPushCmd : public BlockingBaseCmd { + public: + RPushCmd(const std::string& name, int arity, uint32_t flag) : BlockingBaseCmd(name, arity, flag){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPushCmd(*this); } + + private: + std::string key_; + std::vector values_; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { values_.clear(); } +}; + +class RPushxCmd : public Cmd { + public: + RPushxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::LIST)){}; + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoThroughDB() override; + void DoUpdateCache() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new RPushxCmd(*this); } + + private: + std::string key_; + std::string value_; + std::vector values_; + rocksdb::Status s_; + void DoInitial() override; +}; +#endif diff --git a/tools/pika_migrate/include/pika_migrate_thread.h b/tools/pika_migrate/include/pika_migrate_thread.h new file mode 100644 index 0000000000..50a3658eca --- /dev/null +++ b/tools/pika_migrate/include/pika_migrate_thread.h @@ -0,0 +1,118 @@ +#ifndef PIKA_MIGRATE_THREAD_H_ +#define PIKA_MIGRATE_THREAD_H_ + +#include "include/pika_client_conn.h" +#include "include/pika_command.h" +#include "net/include/net_cli.h" +#include "net/include/net_thread.h" +#include "pika_client_conn.h" +#include "pika_db.h" +#include "storage/storage.h" +#include "storage/src/base_data_key_format.h" +#include "strings.h" + +void WriteDelKeyToBinlog(const std::string& key, const std::shared_ptr& db); + +class PikaMigrateThread; +class DB; +class PikaParseSendThread : public net::Thread { + public: + PikaParseSendThread(PikaMigrateThread* migrate_thread, const std::shared_ptr& db_); + ~PikaParseSendThread() override; + bool Init(const std::string& ip, int64_t port, int64_t timeout_ms, int64_t mgrtkeys_num); + void ExitThread(void); + + private: + int MigrateOneKey(net::NetCli* cli, const std::string& key, const char key_type, bool async); + void DelKeysAndWriteBinlog(std::deque>& send_keys, const std::shared_ptr& db); + bool CheckMigrateRecv(int64_t need_receive_num); + void *ThreadMain() override; + + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int32_t mgrtkeys_num_ = 0; + std::atomic should_exit_; + PikaMigrateThread *migrate_thread_ = nullptr; + net::NetCli *cli_ = nullptr; + pstd::Mutex working_mutex_; + std::shared_ptr db_; +}; + +class PikaMigrateThread : public net::Thread { + public: + PikaMigrateThread(); + ~PikaMigrateThread() override; + bool ReqMigrateBatch(const std::string& ip, int64_t port, int64_t time_out, int64_t keys_num, int64_t slot_id, + const std::shared_ptr& db); + int ReqMigrateOne(const std::string& key, const std::shared_ptr& db); + void GetMigrateStatus(std::string* ip, int64_t* port, int64_t* slot, bool* migrating, int64_t* moved, + int64_t* remained); + void CancelMigrate(void); + void IncWorkingThreadNum(void); + void DecWorkingThreadNum(void); + void OnTaskFailed(void); + void AddResponseNum(int32_t response_num); + bool IsMigrating(void) {return is_migrating_.load();} + time_t GetStartTime(void) {return start_time_;} + time_t GetEndTime(void) {return end_time_;} + std::string GetStartTimeStr(void) {return s_start_time_;} + + private: + void ResetThread(void); + void DestroyThread(bool is_self_exit); + void NotifyRequestMigrate(void); + bool IsMigrating(std::pair& kpair); + void ReadSlotKeys(const std::string& slotKey, int64_t need_read_num, int64_t& real_read_num, int32_t* finish); + bool CreateParseSendThreads(int32_t dispatch_num); + void DestroyParseSendThreads(void); + void *ThreadMain() override; + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int64_t keys_num_ = 0; + time_t start_time_ = 0; + time_t end_time_ = 0; + std::string s_start_time_; + std::shared_ptr db_; + std::atomic is_migrating_; + std::atomic should_exit_; + std::atomic is_task_success_; + std::atomic send_num_; + std::atomic response_num_; + std::atomic moved_num_; + + bool request_migrate_ = false; + pstd::CondVar request_migrate_cond_; + std::mutex request_migrate_mutex_; + + int32_t workers_num_ = 0; + std::vector workers_; + + std::atomic working_thread_num_; + pstd::CondVar workers_cond_; + std::mutex workers_mutex_; + int64_t slot_id_ = 0; + std::deque> mgrtone_queue_; + std::mutex mgrtone_queue_mutex_; + + int64_t cursor_ = 0; + std::deque> mgrtkeys_queue_; + pstd::CondVar mgrtkeys_cond_; + std::mutex mgrtkeys_queue_mutex_; + + std::map, std::string> mgrtkeys_map_; + std::mutex mgrtkeys_map_mutex_; + + std::mutex migrator_mutex_; + + friend class PikaParseSendThread; +}; + +#endif + +/* EOF */ \ No newline at end of file diff --git a/tools/pika_migrate/include/pika_monitor_thread.h b/tools/pika_migrate/include/pika_monitor_thread.h new file mode 100644 index 0000000000..27bfa24050 --- /dev/null +++ b/tools/pika_migrate/include/pika_monitor_thread.h @@ -0,0 +1,47 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_MONITOR_THREAD_H_ +#define PIKA_MONITOR_THREAD_H_ + +#include +#include +#include +#include + +#include "net/include/net_thread.h" +#include "pstd/include/pstd_mutex.h" +#include "include/pika_define.h" +#include "include/pika_client_conn.h" + +class PikaMonitorThread : public net::Thread { + public: + PikaMonitorThread(); + ~PikaMonitorThread() override; + + void AddMonitorClient(const std::shared_ptr& client_ptr); + void AddMonitorMessage(const std::string& monitor_message); + int32_t ThreadClientList(std::vector* client = nullptr); + bool ThreadClientKill(const std::string& ip_port = "all"); + bool HasMonitorClients(); + + private: + void AddCronTask(const MonitorCronTask& task); + bool FindClient(const std::string& ip_port); + net::WriteStatus SendMessage(int32_t fd, std::string& message); + void RemoveMonitorClient(const std::string& ip_port); + + std::atomic has_monitor_clients_; + pstd::Mutex monitor_mutex_protector_; + pstd::CondVar monitor_cond_; + + std::list monitor_clients_; + std::deque monitor_messages_; + std::queue cron_tasks_; + + void* ThreadMain() override; + void RemoveMonitorClient(int32_t client_fd); +}; +#endif diff --git a/tools/pika_migrate/include/pika_monotonic_time.h b/tools/pika_migrate/include/pika_monotonic_time.h new file mode 100644 index 0000000000..909fadfaec --- /dev/null +++ b/tools/pika_migrate/include/pika_monotonic_time.h @@ -0,0 +1,20 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_MONOTONIC_TIME_H +#define PIKA_MONOTONIC_TIME_H + +#include + +/* A counter in micro-seconds. The 'monotime' type is provided for variables + * holding a monotonic time. This will help distinguish & document that the + * variable is associated with the monotonic clock and should not be confused + * with other types of time.*/ +using monotime = uint64_t; + +// Get monotonic time in microseconds +monotime getMonotonicUs(); + +#endif // PIKA_MONOTONIC_TIME_H \ No newline at end of file diff --git a/tools/pika_migrate/include/pika_pubsub.h b/tools/pika_migrate/include/pika_pubsub.h new file mode 100644 index 0000000000..f9f7d85a30 --- /dev/null +++ b/tools/pika_migrate/include/pika_pubsub.h @@ -0,0 +1,107 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_PUBSUB_H_ +#define PIKA_PUBSUB_H_ + +#include "acl.h" +#include "pika_command.h" + +/* + * pubsub + */ +class PublishCmd : public Cmd { + public: + PublishCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PublishCmd(*this); } + std::vector current_key() const override { return {channel_}; } + + private: + std::string channel_; + std::string msg_; + void DoInitial() override; +}; + +class SubscribeCmd : public Cmd { + public: + SubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SubscribeCmd(*this); } + std::vector current_key() const override { return channels_; } + + private: + std::vector channels_; + void DoInitial() override; +}; + +class UnSubscribeCmd : public Cmd { + public: + UnSubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new UnSubscribeCmd(*this); } + std::vector current_key() const override { return channels_; } + + private: + std::vector channels_; + void DoInitial() override; +}; + +class PUnSubscribeCmd : public Cmd { + public: + PUnSubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PUnSubscribeCmd(*this); } + std::vector current_key() const override { return {channels_}; } + + private: + std::vector channels_; + void DoInitial() override; +}; + +class PSubscribeCmd : public Cmd { + public: + PSubscribeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PSubscribeCmd(*this); } + std::vector current_key() const override { return {channels_}; } + + std::vector channels_; + private: + void DoInitial() override; +}; + +class PubSubCmd : public Cmd { + public: + PubSubCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::PUBSUB)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new PubSubCmd(*this); } + + private: + std::string subcommand_; + std::vector arguments_; + void DoInitial() override; + void Clear() override { arguments_.clear(); } +}; + +#endif // INCLUDE_PIKA_PUBSUB_H_ diff --git a/tools/pika_migrate/include/pika_repl_bgworker.h b/tools/pika_migrate/include/pika_repl_bgworker.h new file mode 100644 index 0000000000..dd62622fb9 --- /dev/null +++ b/tools/pika_migrate/include/pika_repl_bgworker.h @@ -0,0 +1,52 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_REPL_BGWROKER_H_ +#define PIKA_REPL_BGWROKER_H_ + +#include +#include +#include +#include "net/include/bg_thread.h" +#include "net/include/pb_conn.h" +#include "net/include/thread_pool.h" + +#include "pika_inner_message.pb.h" + +#include "include/pika_binlog_transverter.h" +#include "include/pika_define.h" +#include "include/pika_command.h" + +class PikaReplBgWorker { + public: + explicit PikaReplBgWorker(int queue_size); + int StartThread(); + int StopThread(); + int TaskQueueSize() { + int pri_size = 0; + int qu_size = 0; + bg_thread_.QueueSize(&pri_size, &qu_size); + return pri_size + qu_size; + } + void Schedule(net::TaskFunc func, void* arg); + void Schedule(net::TaskFunc func, void* arg, std::function& call_back); + static void HandleBGWorkerWriteBinlog(void* arg); + static void HandleBGWorkerWriteDB(void* arg); + static void WriteDBInSyncWay(const std::shared_ptr& c_ptr); + void SetThreadName(const std::string& thread_name) { + bg_thread_.set_thread_name(thread_name); + } + BinlogItem binlog_item_; + net::RedisParser redis_parser_; + std::string ip_port_; + std::string db_name_; + + private: + net::BGThread bg_thread_; + static int HandleWriteBinlog(net::RedisParser* parser, const net::RedisCmdArgsType& argv); + static void ParseBinlogOffset(const InnerMessage::BinlogOffset& pb_offset, LogOffset* offset); +}; + +#endif // PIKA_REPL_BGWROKER_H_ diff --git a/tools/pika_migrate/include/pika_repl_client.h b/tools/pika_migrate/include/pika_repl_client.h new file mode 100644 index 0000000000..73fb897a62 --- /dev/null +++ b/tools/pika_migrate/include/pika_repl_client.h @@ -0,0 +1,117 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_REPL_CLIENT_H_ +#define PIKA_REPL_CLIENT_H_ + +#include +#include +#include + +#include "net/include/client_thread.h" +#include "net/include/net_conn.h" +#include "net/include/thread_pool.h" +#include "pstd/include/pstd_status.h" +#include "include/pika_define.h" + +#include "include/pika_binlog_reader.h" +#include "include/pika_repl_bgworker.h" +#include "include/pika_repl_client_thread.h" + +#include "net/include/thread_pool.h" +#include "pika_inner_message.pb.h" + + +struct ReplClientTaskArg { + std::shared_ptr res; + std::shared_ptr conn; + ReplClientTaskArg(const std::shared_ptr& _res, const std::shared_ptr& _conn) + : res(_res), conn(_conn) {} +}; + +struct ReplClientWriteBinlogTaskArg { + std::shared_ptr res; + std::shared_ptr conn; + void* res_private_data; + PikaReplBgWorker* worker; + ReplClientWriteBinlogTaskArg(const std::shared_ptr& _res, + const std::shared_ptr& _conn, + void* _res_private_data, PikaReplBgWorker* _worker) + : res(_res), conn(_conn), res_private_data(_res_private_data), worker(_worker) {} +}; + +struct ReplClientWriteDBTaskArg { + const std::shared_ptr cmd_ptr; + explicit ReplClientWriteDBTaskArg(std::shared_ptr _cmd_ptr) + : cmd_ptr(std::move(_cmd_ptr)) {} + ~ReplClientWriteDBTaskArg() = default; +}; + +class PikaReplClient { + public: + PikaReplClient(int cron_interval, int keepalive_timeout); + ~PikaReplClient(); + + int Start(); + int Stop(); + + pstd::Status Write(const std::string& ip, int port, const std::string& msg); + pstd::Status Close(const std::string& ip, int port); + + void Schedule(net::TaskFunc func, void* arg); + void ScheduleByDBName(net::TaskFunc func, void* arg, const std::string& db_name); + void ScheduleWriteBinlogTask(const std::string& db_name, const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data); + void ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name); + + pstd::Status SendMetaSync(); + pstd::Status SendDBSync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip); + pstd::Status SendTrySync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip); + pstd::Status SendBinlogSync(const std::string& ip, uint32_t port, const std::string& db_name, + const LogOffset& ack_start, const LogOffset& ack_end, + const std::string& local_ip, bool is_first_send); + pstd::Status SendRemoveSlaveNode(const std::string& ip, uint32_t port, const std::string& db_name, const std::string& local_ip); + + void IncrAsyncWriteDBTaskCount(const std::string& db_name, int32_t incr_step) { + int32_t db_index = db_name.back() - '0'; + assert(db_index >= 0 && db_index <= 7); + async_write_db_task_counts_[db_index].fetch_add(incr_step, std::memory_order::memory_order_seq_cst); + } + + void DecrAsyncWriteDBTaskCount(const std::string& db_name, int32_t incr_step) { + int32_t db_index = db_name.back() - '0'; + assert(db_index >= 0 && db_index <= 7); + async_write_db_task_counts_[db_index].fetch_sub(incr_step, std::memory_order::memory_order_seq_cst); + } + + int32_t GetUnfinishedAsyncWriteDBTaskCount(const std::string& db_name) { + int32_t db_index = db_name.back() - '0'; + assert(db_index >= 0 && db_index <= 7); + return async_write_db_task_counts_[db_index].load(std::memory_order_seq_cst); + } + + private: + size_t GetBinlogWorkerIndexByDBName(const std::string &db_name); + size_t GetHashIndexByKey(const std::string& key); + void UpdateNextAvail() { next_avail_ = (next_avail_ + 1) % static_cast(write_binlog_workers_.size()); } + + std::unique_ptr client_thread_; + int next_avail_ = 0; + std::hash str_hash; + + // async_write_db_task_counts_ is used when consuming binlog, which indicates the nums of async write-DB tasks that are + // queued or being executing by WriteDBWorkers. If a flushdb-binlog need to apply DB, it must wait + // util this count drop to zero. you can also check pika discussion #2807 to know more + // it is only used in slaveNode when consuming binlog + std::atomic async_write_db_task_counts_[MAX_DB_NUM]; + // [NOTICE] write_db_workers_ must be declared after async_write_db_task_counts_ to ensure write_db_workers_ will be destroyed before async_write_db_task_counts_ + // when PikaReplClient is de-constructing, because some of the async task that exec by write_db_workers_ will manipulate async_write_db_task_counts_ + std::vector> write_binlog_workers_; + std::vector> write_db_workers_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_repl_client_conn.h b/tools/pika_migrate/include/pika_repl_client_conn.h new file mode 100644 index 0000000000..bfd697dfa0 --- /dev/null +++ b/tools/pika_migrate/include/pika_repl_client_conn.h @@ -0,0 +1,39 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_REPL_CLIENT_CONN_H_ +#define PIKA_REPL_CLIENT_CONN_H_ + +#include "net/include/pb_conn.h" + +#include +#include + +#include "include/pika_conf.h" +#include "pika_inner_message.pb.h" + +class SyncMasterDB; +class SyncSlaveDB; + +class PikaReplClientConn : public net::PbConn { + public: + PikaReplClientConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx); + ~PikaReplClientConn() override = default; + + static void HandleMetaSyncResponse(void* arg); + static void HandleDBSyncResponse(void* arg); + static void HandleTrySyncResponse(void* arg); + static void HandleRemoveSlaveNodeResponse(void* arg); + static bool IsDBStructConsistent(const std::vector& current_dbs, + const std::vector& expect_tables); + int DealMessage() override; + + private: + // dispatch binlog by its db_name + void DispatchBinlogRes(const std::shared_ptr& response); +}; + +#endif diff --git a/tools/pika_migrate/include/pika_repl_client_thread.h b/tools/pika_migrate/include/pika_repl_client_thread.h new file mode 100644 index 0000000000..fe8213b090 --- /dev/null +++ b/tools/pika_migrate/include/pika_repl_client_thread.h @@ -0,0 +1,49 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_REPL_CLIENT_THREAD_H_ +#define PIKA_REPL_CLIENT_THREAD_H_ + +#include +#include + +#include "include/pika_repl_client_conn.h" + +#include "net/include/client_thread.h" +#include "net/include/net_conn.h" + +class PikaReplClientThread : public net::ClientThread { + public: + PikaReplClientThread(int cron_interval, int keepalive_timeout); + ~PikaReplClientThread() override = default; + + private: + class ReplClientConnFactory : public net::ConnFactory { + public: + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, net::Thread* thread, + void* worker_specific_data, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, worker_specific_data, net)); + } + }; + class ReplClientHandle : public net::ClientHandle { + public: + void CronHandle() const override {} + void FdTimeoutHandle(int fd, const std::string& ip_port) const override; + void FdClosedHandle(int fd, const std::string& ip_port) const override; + bool AccessHandle(std::string& ip) const override { + return true; + } + int CreateWorkerSpecificData(void** data) const override { return 0; } + int DeleteWorkerSpecificData(void* data) const override { return 0; } + void DestConnectFailedHandle(const std::string& ip_port, const std::string& reason) const override {} + }; + + ReplClientConnFactory conn_factory_; + ReplClientHandle handle_; +}; + +#endif // PIKA_REPL_CLIENT_THREAD_H_ diff --git a/tools/pika_migrate/include/pika_repl_server.h b/tools/pika_migrate/include/pika_repl_server.h new file mode 100644 index 0000000000..4a12f99cb9 --- /dev/null +++ b/tools/pika_migrate/include/pika_repl_server.h @@ -0,0 +1,51 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_REPL_SERVER_H_ +#define PIKA_REPL_SERVER_H_ + +#include "net/include/thread_pool.h" + +#include +#include +#include + +#include "include/pika_command.h" +#include "include/pika_repl_bgworker.h" +#include "include/pika_repl_server_thread.h" + +struct ReplServerTaskArg { + std::shared_ptr req; + std::shared_ptr conn; + ReplServerTaskArg(std::shared_ptr _req, std::shared_ptr _conn) + : req(std::move(_req)), conn(std::move(_conn)) {} +}; + +class PikaReplServer { + public: + PikaReplServer(const std::set& ips, int port, int cron_interval); + ~PikaReplServer(); + + int Start(); + int Stop(); + + pstd::Status SendSlaveBinlogChips(const std::string& ip, int port, const std::vector& tasks); + pstd::Status Write(const std::string& ip, int port, const std::string& msg); + + void BuildBinlogOffset(const LogOffset& offset, InnerMessage::BinlogOffset* boffset); + void BuildBinlogSyncResp(const std::vector& tasks, InnerMessage::InnerResponse* resp); + void Schedule(net::TaskFunc func, void* arg); + void UpdateClientConnMap(const std::string& ip_port, int fd); + void RemoveClientConn(int fd); + void KillAllConns(); + + private: + std::unique_ptr server_tp_ = nullptr; + std::unique_ptr pika_repl_server_thread_ = nullptr; + std::shared_mutex client_conn_rwlock_; + std::map client_conn_map_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_repl_server_conn.h b/tools/pika_migrate/include/pika_repl_server_conn.h new file mode 100644 index 0000000000..c96159e0fe --- /dev/null +++ b/tools/pika_migrate/include/pika_repl_server_conn.h @@ -0,0 +1,42 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_REPL_SERVER_CONN_H_ +#define PIKA_REPL_SERVER_CONN_H_ + +#include + +#include "net/include/net_thread.h" +#include "net/include/pb_conn.h" + +#include "include/pika_define.h" +#include "pika_inner_message.pb.h" + +class SyncMasterDB; + +class PikaReplServerConn : public net::PbConn { + public: + PikaReplServerConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx); + ~PikaReplServerConn() override; + + static void HandleMetaSyncRequest(void* arg); + static void HandleTrySyncRequest(void* arg); + + static bool TrySyncOffsetCheck(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + InnerMessage::InnerResponse::TrySync* try_sync_response); + static bool TrySyncUpdateSlaveNode(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + const std::shared_ptr& conn, + InnerMessage::InnerResponse::TrySync* try_sync_response); + static void HandleDBSyncRequest(void* arg); + static void HandleBinlogSyncRequest(void* arg); + static void HandleRemoveSlaveNodeRequest(void* arg); + + int DealMessage() override; +}; + +#endif // INCLUDE_PIKA_REPL_SERVER_CONN_H_ diff --git a/tools/pika_migrate/include/pika_repl_server_thread.h b/tools/pika_migrate/include/pika_repl_server_thread.h new file mode 100644 index 0000000000..c4e356839b --- /dev/null +++ b/tools/pika_migrate/include/pika_repl_server_thread.h @@ -0,0 +1,46 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_REPL_SERVER_THREAD_H_ +#define PIKA_REPL_SERVER_THREAD_H_ + +#include "net/src/holy_thread.h" + +#include "include/pika_repl_server_conn.h" + +class PikaReplServerThread : public net::HolyThread { + public: + PikaReplServerThread(const std::set& ips, int port, int cron_interval); + ~PikaReplServerThread() override = default; + int ListenPort(); + + private: + class ReplServerConnFactory : public net::ConnFactory { + public: + explicit ReplServerConnFactory(PikaReplServerThread* binlog_receiver) : binlog_receiver_(binlog_receiver) {} + + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, net::Thread* thread, + void* worker_specific_data, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, binlog_receiver_, net)); + } + + private: + PikaReplServerThread* binlog_receiver_ = nullptr; + }; + + class ReplServerHandle : public net::ServerHandle { + public: + void FdClosedHandle(int fd, const std::string& ip_port) const override; + }; + + ReplServerConnFactory conn_factory_; + ReplServerHandle handle_; + int port_ = 0; + uint64_t serial_ = 0; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_rm.h b/tools/pika_migrate/include/pika_rm.h new file mode 100644 index 0000000000..ec80c1ff58 --- /dev/null +++ b/tools/pika_migrate/include/pika_rm.h @@ -0,0 +1,228 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_RM_H_ +#define PIKA_RM_H_ + +#include +#include +#include +#include +#include +#include + +#include "pstd/include/pstd_status.h" + +#include "include/pika_binlog_reader.h" +#include "include/pika_consensus.h" +#include "include/pika_repl_client.h" +#include "include/pika_repl_server.h" +#include "include/pika_slave_node.h" +#include "include/pika_stable_log.h" +#include "include/rsync_client.h" + +#define kBinlogSendPacketNum 40 +#define kBinlogSendBatchNum 100 + +// unit seconds +#define kSendKeepAliveTimeout (2 * 1000000) +#define kRecvKeepAliveTimeout (20 * 1000000) + + +class SyncDB { + public: + SyncDB(const std::string& db_name); + virtual ~SyncDB() = default; + DBInfo& SyncDBInfo() { return db_info_; } + std::string DBName(); + + protected: + DBInfo db_info_; +}; + +class SyncMasterDB : public SyncDB { + public: + SyncMasterDB(const std::string& db_name); + pstd::Status AddSlaveNode(const std::string& ip, int port, int session_id); + pstd::Status RemoveSlaveNode(const std::string& ip, int port); + pstd::Status ActivateSlaveBinlogSync(const std::string& ip, int port, const LogOffset& offset); + pstd::Status ActivateSlaveDbSync(const std::string& ip, int port); + pstd::Status SyncBinlogToWq(const std::string& ip, int port); + pstd::Status GetSlaveSyncBinlogInfo(const std::string& ip, int port, BinlogOffset* sent_offset, BinlogOffset* acked_offset); + pstd::Status GetSlaveState(const std::string& ip, int port, SlaveState* slave_state); + pstd::Status SetLastRecvTime(const std::string& ip, int port, uint64_t time); + pstd::Status GetSafetyPurgeBinlog(std::string* safety_purge); + pstd::Status WakeUpSlaveBinlogSync(); + pstd::Status CheckSyncTimeout(uint64_t now); + pstd::Status GetSlaveNodeSession(const std::string& ip, int port, int32_t* session); + int GetNumberOfSlaveNode(); + bool BinlogCloudPurge(uint32_t index); + bool CheckSlaveNodeExist(const std::string& ip, int port); + + // debug use + std::string ToStringStatus(); + int32_t GenSessionId(); + bool CheckSessionId(const std::string& ip, int port, const std::string& db_name, int session_id); + + // consensus use + pstd::Status ConsensusUpdateSlave(const std::string& ip, int port, const LogOffset& start, const LogOffset& end); + pstd::Status ConsensusProposeLog(const std::shared_ptr& cmd_ptr); + pstd::Status ConsensusProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute); + LogOffset ConsensusCommittedIndex(); + LogOffset ConsensusLastIndex(); + + std::shared_ptr StableLogger() { return coordinator_.StableLogger(); } + + std::shared_ptr Logger() { + if (!coordinator_.StableLogger()) { + return nullptr; + } + return coordinator_.StableLogger()->Logger(); + } + + private: + // invoker need to hold slave_mu_ + pstd::Status ReadBinlogFileToWq(const std::shared_ptr& slave_ptr); + + std::shared_ptr GetSlaveNode(const std::string& ip, int port); + std::unordered_map> GetAllSlaveNodes(); + + pstd::Mutex session_mu_; + int32_t session_id_ = 0; + ConsensusCoordinator coordinator_; +}; + +class SyncSlaveDB : public SyncDB { + public: + SyncSlaveDB(const std::string& db_name); + void Activate(const RmNode& master, const ReplState& repl_state); + void Deactivate(); + void SetLastRecvTime(uint64_t time); + void SetReplState(const ReplState& repl_state); + ReplState State(); + pstd::Status CheckSyncTimeout(uint64_t now); + + // For display + pstd::Status GetInfo(std::string* info); + // For debug + std::string ToStringStatus(); + std::string LocalIp(); + int32_t MasterSessionId(); + const std::string& MasterIp(); + int MasterPort(); + void SetMasterSessionId(int32_t session_id); + void SetLocalIp(const std::string& local_ip); + void StopRsync(); + pstd::Status ActivateRsync(); + bool IsRsyncExited() { return rsync_cli_->IsExitedFromRunning(); } + + private: + std::unique_ptr rsync_cli_; + int32_t rsync_init_retry_count_{0}; + pstd::Mutex db_mu_; + RmNode m_info_; + ReplState repl_state_{kNoConnect}; + std::string local_ip_; +}; + +class PikaReplicaManager { + public: + PikaReplicaManager(); + ~PikaReplicaManager() = default; + friend Cmd; + void Start(); + void Stop(); + bool CheckMasterSyncFinished(); + pstd::Status ActivateSyncSlaveDB(const RmNode& node, const ReplState& repl_state); + + // For Pika Repl Client Thread + pstd::Status SendMetaSyncRequest(); + pstd::Status SendRemoveSlaveNodeRequest(const std::string& table); + pstd::Status SendTrySyncRequest(const std::string& db_name); + pstd::Status SendDBSyncRequest(const std::string& db_name); + pstd::Status SendBinlogSyncAckRequest(const std::string& table, const LogOffset& ack_start, + const LogOffset& ack_end, bool is_first_send = false); + pstd::Status CloseReplClientConn(const std::string& ip, int32_t port); + + // For Pika Repl Server Thread + pstd::Status SendSlaveBinlogChipsRequest(const std::string& ip, int port, const std::vector& tasks); + + // For SyncMasterDB + std::shared_ptr GetSyncMasterDBByName(const DBInfo& p_info); + + // For SyncSlaveDB + std::shared_ptr GetSyncSlaveDBByName(const DBInfo& p_info); + + pstd::Status RunSyncSlaveDBStateMachine(); + + pstd::Status CheckSyncTimeout(uint64_t now); + + // To check db info + // For pkcluster info command + static bool CheckSlaveDBState(const std::string& ip, int port); + void FindCommonMaster(std::string* master); + void RmStatus(std::string* debug_info); + pstd::Status CheckDBRole(const std::string& table, int* role); + pstd::Status LostConnection(const std::string& ip, int port); + pstd::Status DeactivateSyncSlaveDB(const std::string& ip, int port); + + // Update binlog win and try to send next binlog + pstd::Status UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& offset_start, const LogOffset& offset_end); + pstd::Status WakeUpBinlogSync(); + + // write_queue related + void ProduceWriteQueue(const std::string& ip, int port, std::string db_name, const std::vector& tasks); + void DropItemInOneWriteQueue(const std::string& ip, int port, const std::string& db_name); + void DropItemInWriteQueue(const std::string& ip, int port); + int ConsumeWriteQueue(); + + // Schedule Task + void ScheduleReplServerBGTask(net::TaskFunc func, void* arg); + void ScheduleReplClientBGTask(net::TaskFunc func, void* arg); + void ScheduleWriteBinlogTask(const std::string& db_name, + const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data); + void ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name); + void ScheduleReplClientBGTaskByDBName(net::TaskFunc , void* arg, const std::string &db_name); + void ReplServerRemoveClientConn(int fd); + void ReplServerUpdateClientConnMap(const std::string& ip_port, int fd); + + std::shared_mutex& GetDBLock() { return dbs_rw_; } + + void DBLock() { + dbs_rw_.lock(); + } + void DBUnlock() { + dbs_rw_.unlock(); + } + + std::unordered_map, hash_db_info>& GetSyncMasterDBs() { + return sync_master_dbs_; + } + std::unordered_map, hash_db_info>& GetSyncSlaveDBs() { + return sync_slave_dbs_; + } + + int32_t GetUnfinishedAsyncWriteDBTaskCount(const std::string& db_name) { + return pika_repl_client_->GetUnfinishedAsyncWriteDBTaskCount(db_name); + } + + private: + void InitDB(); + pstd::Status SelectLocalIp(const std::string& remote_ip, int remote_port, std::string* local_ip); + + std::shared_mutex dbs_rw_; + std::unordered_map, hash_db_info> sync_master_dbs_; + std::unordered_map, hash_db_info> sync_slave_dbs_; + + pstd::Mutex write_queue_mu_; + + // every host owns a queue, the key is "ip + port" + std::unordered_map>> write_queues_; + std::unique_ptr pika_repl_client_; + std::unique_ptr pika_repl_server_; +}; + +#endif // PIKA_RM_H diff --git a/tools/pika_migrate/include/pika_rsync_service.h b/tools/pika_migrate/include/pika_rsync_service.h new file mode 100644 index 0000000000..ccd4605a15 --- /dev/null +++ b/tools/pika_migrate/include/pika_rsync_service.h @@ -0,0 +1,27 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_RSYNC_SERVICE_H_ +#define PIKA_RSYNC_SERVICE_H_ + +#include + +class PikaRsyncService { + public: + PikaRsyncService(const std::string& raw_path, int port); + ~PikaRsyncService(); + int StartRsync(); + bool CheckRsyncAlive(); + int ListenPort(); + + private: + int CreateSecretFile(); + std::string raw_path_; + std::string rsync_path_; + std::string pid_path_; + int port_ = 0; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_server.h b/tools/pika_migrate/include/pika_server.h new file mode 100644 index 0000000000..8c24a07a36 --- /dev/null +++ b/tools/pika_migrate/include/pika_server.h @@ -0,0 +1,662 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_SERVER_H_ +#define PIKA_SERVER_H_ + +#include + +#if defined(__APPLE__) || defined(__FreeBSD__) +# include +# include +#else +# include +#endif + +#include +#include + +#include "src/cache/include/config.h" +#include "net/include/bg_thread.h" +#include "net/include/net_pubsub.h" +#include "net/include/thread_pool.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/pstd_string.h" +#include "storage/backupable.h" +#include "storage/storage.h" + +#include "acl.h" +#include "include/pika_auxiliary_thread.h" +#include "include/pika_binlog.h" +#include "include/pika_cache.h" +#include "include/pika_client_processor.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_conf.h" +#include "include/pika_db.h" +#include "include/pika_define.h" +#include "include/pika_dispatch_thread.h" +#include "include/pika_instant.h" +#include "include/pika_migrate_thread.h" +#include "include/pika_repl_client.h" +#include "include/pika_repl_server.h" +#include "include/pika_rsync_service.h" +#include "include/pika_slot_command.h" +#include "include/pika_statistic.h" +#include "include/pika_transaction.h" +#include "include/rsync_server.h" + +extern std::unique_ptr g_pika_conf; + +enum TaskType { + kCompactAll, + kResetReplState, + kPurgeLog, + kStartKeyScan, + kStopKeyScan, + kBgSave, + kCompactRangeAll, + kCompactOldestOrBestDeleteRatioSst, +}; + +struct TaskArg { + TaskType type; + std::vector argv; + TaskArg(TaskType t) : type(t) {} + TaskArg(TaskType t, const std::vector& a) : type(t), argv(a) {} +}; + +void DoBgslotscleanup(void* arg); +void DoBgslotsreload(void* arg); + +class PikaServer : public pstd::noncopyable { + public: + PikaServer(); + ~PikaServer(); + + /* + * Server init info + */ + bool ServerInit(); + void Start(); + void Exit(); + + std::string host(); + int port(); + time_t start_time_s(); + std::string master_ip(); + int master_port(); + int role(); + bool leader_protected_mode(); + void CheckLeaderProtectedMode(); + bool readonly(const std::string& table); + int repl_state(); + std::string repl_state_str(); + bool force_full_sync(); + void SetForceFullSync(bool v); + void SetDispatchQueueLimit(int queue_limit); + void SetSlowCmdThreadPoolFlag(bool flag); + storage::StorageOptions storage_options(); + std::unique_ptr& pika_dispatch_thread() { + return pika_dispatch_thread_; + } + + /* + * DB use + */ + void InitDBStruct(); + bool IsBgSaving(); + bool IsKeyScaning(); + bool IsCompacting(); + bool IsDBExist(const std::string& db_name); + bool IsDBBinlogIoError(const std::string& db_name); + std::shared_ptr GetDB(const std::string& db_name); + std::set GetAllDBName(); + pstd::Status DoSameThingSpecificDB(const std::set& dbs, const TaskArg& arg); + std::shared_mutex& GetDBLock() { + return dbs_rw_; + } + void DBLockShared() { + dbs_rw_.lock_shared(); + } + void DBLock() { + dbs_rw_.lock(); + } + void DBUnlock() { + dbs_rw_.unlock(); + } + void DBUnlockShared() { + dbs_rw_.unlock_shared(); + } + + /* + * DB use + */ + void PrepareDBTrySync(); + void DBSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys); + void DBSetSmallCompactionThreshold(uint32_t small_compaction_threshold); + void DBSetSmallCompactionDurationThreshold(uint32_t small_compaction_duration_threshold); + bool GetDBBinlogOffset(const std::string& db_name, BinlogOffset* boffset); + pstd::Status DoSameThingEveryDB(const TaskType& type); + + /* + * Master use + */ + void BecomeMaster(); + void DeleteSlave(int fd); // conn fd + int32_t CountSyncSlaves(); + int32_t GetSlaveListString(std::string& slave_list_str); + bool TryAddSlave(const std::string& ip, int64_t port, int fd, const std::vector& table_structs); + pstd::Mutex slave_mutex_; // protect slaves_; + std::vector slaves_; + + /** + * Sotsmgrt use + */ + std::unique_ptr pika_migrate_; + + /* + * Slave use + */ + void SyncError(); + void RemoveMaster(); + bool SetMaster(std::string& master_ip, int master_port); + + /* + * Slave State Machine + */ + bool ShouldMetaSync(); + void FinishMetaSync(); + bool MetaSyncDone(); + void ResetMetaSyncStatus(); + int GetMetaSyncTimestamp(); + void UpdateMetaSyncTimestamp(); + void UpdateMetaSyncTimestampWithoutLock(); + bool IsFirstMetaSync(); + void SetFirstMetaSync(bool v); + + /* + * PikaClientProcessor Process Task + */ + void ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd, bool is_admin_cmd); + + // for info debug + size_t ClientProcessorThreadPoolCurQueueSize(); + size_t ClientProcessorThreadPoolMaxQueueSize(); + size_t SlowCmdThreadPoolCurQueueSize(); + size_t SlowCmdThreadPoolMaxQueueSize(); + + /* + * BGSave used + */ + void BGSaveTaskSchedule(net::TaskFunc func, void* arg); + + /* + * PurgeLog used + */ + void PurgelogsTaskSchedule(net::TaskFunc func, void* arg); + + /* + * Flushall & Flushdb used + */ + void PurgeDir(const std::string& path); + void PurgeDirTaskSchedule(void (*function)(void*), void* arg); + + /* + * DBSync used + */ + pstd::Status GetDumpUUID(const std::string& db_name, std::string* snapshot_uuid); + pstd::Status GetDumpMeta(const std::string& db_name, std::vector* files, std::string* snapshot_uuid); + void TryDBSync(const std::string& ip, int port, const std::string& db_name, int32_t top); + + /* + * Keyscan used + */ + void KeyScanTaskSchedule(net::TaskFunc func, void* arg); + + /* + * Client used + */ + void ClientKillAll(); + int ClientKill(const std::string& ip_port); + int64_t ClientList(std::vector* clients = nullptr); + void ClientKillPubSub(); + void ClientKillAllNormal(); + + /* + * Monitor used + */ + bool HasMonitorClients() const; + bool ClientIsMonitor(const std::shared_ptr& client_ptr) const; + void AddMonitorMessage(const std::string& monitor_message); + void AddMonitorClient(const std::shared_ptr& client_ptr); + + /* + * Slowlog used + */ + void SlowlogTrim(); + void SlowlogReset(); + void SlowlogObtain(int64_t number, std::vector* slowlogs); + void SlowlogPushEntry(const std::vector& argv, int64_t time, int64_t duration); + uint32_t SlowlogLen(); + uint64_t SlowlogCount(); + + /* + * Statistic used + */ + uint64_t ServerQueryNum(); + uint64_t ServerCurrentQps(); + uint64_t accumulative_connections(); + long long ServerKeyspaceHits(); + long long ServerKeyspaceMisses(); + void ResetStat(); + void incr_accumulative_connections(); + void incr_server_keyspace_hits(); + void incr_server_keyspace_misses(); + void ResetLastSecQuerynum(); + void UpdateQueryNumAndExecCountDB(const std::string& db_name, const std::string& command, bool is_write); + std::unordered_map ServerExecCountDB(); + std::unordered_map ServerAllDBStat(); + + /* + * Disk usage statistic + */ + uint64_t GetDBSize() const { + return disk_statistic_.db_size_.load(); + } + uint64_t GetLogSize() const { + return disk_statistic_.log_size_.load(); + } + + /* + * Network Statistic used + */ + size_t NetInputBytes(); + size_t NetOutputBytes(); + size_t NetReplInputBytes(); + size_t NetReplOutputBytes(); + float InstantaneousInputKbps(); + float InstantaneousOutputKbps(); + float InstantaneousInputReplKbps(); + float InstantaneousOutputReplKbps(); + + /* + * Slave to Master communication used + */ + int SendToPeer(); + void SignalAuxiliary(); + pstd::Status TriggerSendBinlogSync(); + + /* + * PubSub used + */ + int PubSubNumPat(); + int Publish(const std::string& channel, const std::string& msg); + void EnablePublish(int fd); + int UnSubscribe(const std::shared_ptr& conn, const std::vector& channels, bool pattern, + std::vector>* result); + void Subscribe(const std::shared_ptr& conn, const std::vector& channels, bool pattern, + std::vector>* result); + void PubSubChannels(const std::string& pattern, std::vector* result); + void PubSubNumSub(const std::vector& channels, std::vector>* result); + int ClientPubSubChannelSize(const std::shared_ptr& conn); + int ClientPubSubChannelPatternSize(const std::shared_ptr& conn); + + pstd::Status GetCmdRouting(std::vector& redis_cmds, std::vector* dst, bool* all_local); + + // info debug use + void ServerStatus(std::string* info); + + /* + * Async migrate used + */ + int SlotsMigrateOne(const std::string& key, const std::shared_ptr &db); + bool SlotsMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slots, int64_t keys_num, const std::shared_ptr& db); + void GetSlotsMgrtSenderStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, int64_t *remained); + bool SlotsMigrateAsyncCancel(); + std::shared_mutex bgslots_protector_; + + /* + * BGSlotsReload used + */ + struct BGSlotsReload { + bool reloading = false; + time_t start_time = 0; + time_t end_time = 0; + std::string s_start_time; + int64_t cursor = 0; + std::string pattern = "*"; + int64_t count = 100; + std::shared_ptr db; + BGSlotsReload() = default; + void Clear() { + reloading = false; + pattern = "*"; + count = 100; + cursor = 0; + } + }; + + BGSlotsReload bgslots_reload_; + + BGSlotsReload bgslots_reload() { + std::lock_guard ml(bgslots_protector_); + return bgslots_reload_; + } + bool GetSlotsreloading() { + std::lock_guard ml(bgslots_protector_); + return bgslots_reload_.reloading; + } + void SetSlotsreloading(bool reloading) { + std::lock_guard ml(bgslots_protector_); + bgslots_reload_.reloading = reloading; + } + void SetSlotsreloadingCursor(int64_t cursor) { + std::lock_guard ml(bgslots_protector_); + bgslots_reload_.cursor = cursor; + } + int64_t GetSlotsreloadingCursor() { + std::lock_guard ml(bgslots_protector_); + return bgslots_reload_.cursor; + } + + void SetSlotsreloadingEndTime() { + std::lock_guard ml(bgslots_protector_); + bgslots_reload_.end_time = time(nullptr); + } + void Bgslotsreload(const std::shared_ptr& db); + + // Revoke the authorization of the specified account, when handle Cmd deleteUser + void AllClientUnAuth(const std::set& users); + + // Determine whether the user's conn can continue to subscribe to the channel + void CheckPubsubClientKill(const std::string& userName, const std::vector& allChannel); + + /* + * BGSlotsCleanup used + */ + struct BGSlotsCleanup { + bool cleaningup = false; + time_t start_time = 0; + time_t end_time = 0; + std::string s_start_time; + int64_t cursor = 0; + std::string pattern = "*"; + int64_t count = 100; + std::shared_ptr db; + storage::DataType type_; + std::vector cleanup_slots; + BGSlotsCleanup() = default; + void Clear() { + cleaningup = false; + pattern = "*"; + count = 100; + cursor = 0; + } + }; + + /* + * BGSlotsCleanup use + */ + BGSlotsCleanup bgslots_cleanup_; + net::BGThread bgslots_cleanup_thread_; + + BGSlotsCleanup bgslots_cleanup() { + std::lock_guard ml(bgslots_protector_); + return bgslots_cleanup_; + } + bool GetSlotscleaningup() { + std::lock_guard ml(bgslots_protector_); + return bgslots_cleanup_.cleaningup; + } + void SetSlotscleaningup(bool cleaningup) { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cleaningup = cleaningup; + } + void SetSlotscleaningupCursor(int64_t cursor) { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cursor = cursor; + } + void SetCleanupSlots(std::vector cleanup_slots) { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); + } + std::vector GetCleanupSlots() { + std::lock_guard ml(bgslots_protector_); + return bgslots_cleanup_.cleanup_slots; + } + + void Bgslotscleanup(std::vector cleanup_slots, const std::shared_ptr& db); + void StopBgslotscleanup() { + std::lock_guard ml(bgslots_protector_); + bgslots_cleanup_.cleaningup = false; + std::vector cleanup_slots; + bgslots_cleanup_.cleanup_slots.swap(cleanup_slots); + } + + /* + * StorageOptions used + */ + storage::Status RewriteStorageOptions(const storage::OptionType& option_type, + const std::unordered_map& options); + + /* + * Instantaneous Metric used + */ + std::unique_ptr instant_; + + /* + * Diskrecovery used + */ + std::map> GetDB() { + return dbs_; + } + + /* + * acl init + */ + pstd::Status InitAcl() { return acl_->Initialization(); } + + std::unique_ptr<::Acl>& Acl() { return acl_; } + + friend class Cmd; + friend class InfoCmd; + friend class PikaReplClientConn; + friend class PkClusterInfoCmd; + + struct BGCacheTaskArg { + BGCacheTaskArg() : conf(nullptr), reenable_cache(false) {} + int task_type; + std::shared_ptr db; + uint32_t cache_num; + cache::CacheConfig cache_cfg; + std::unique_ptr conf; + bool reenable_cache; + }; + + /* + * Cache used + */ + static void DoCacheBGTask(void* arg); + void ResetCacheAsync(uint32_t cache_num, std::shared_ptr db, cache::CacheConfig *cache_cfg = nullptr); + void ClearCacheDbAsync(std::shared_ptr db); + void ClearCacheDbAsyncV2(std::shared_ptr db); + void ResetCacheConfig(std::shared_ptr db); + void ClearHitRatio(std::shared_ptr db); + void OnCacheStartPosChanged(int zset_cache_start_direction, std::shared_ptr db); + void UpdateCacheInfo(void); + void ResetDisplayCacheInfo(int status, std::shared_ptr db); + void CacheConfigInit(cache::CacheConfig &cache_cfg); + void ProcessCronTask(); + double HitRatio(); + + /* + * disable compact + */ + void DisableCompact(); + + /* + * lastsave used + */ + int64_t GetLastSave() const {return lastsave_;} + void UpdateLastSave(int64_t lastsave) {lastsave_ = lastsave;} + void InitStatistic(CmdTable *inited_cmd_table) { + // we insert all cmd name to statistic_.server_stat.exec_count_db, + // then when we can call PikaServer::UpdateQueryNumAndExecCountDB(const std::string&, const std::string&, bool) in parallel without lock + // although exec_count_db(unordered_map) is not thread-safe, but we won't trigger any insert or erase operation toward exec_count_db(unordered_map) during the running of pika + auto &exec_stat_map = statistic_.server_stat.exec_count_db; + for (auto& it : *inited_cmd_table) { + std::string cmd_name = it.first; //value copy is needed + pstd::StringToUpper(cmd_name); //cmd_name now is all uppercase + exec_stat_map.insert(std::make_pair(cmd_name, 0)); + } + } + private: + /* + * TimingTask use + */ + void DoTimingTask(); + void AutoCompactRange(); + void AutoBinlogPurge(); + void AutoServerlogPurge(); + void AutoDeleteExpiredDump(); + void AutoUpdateNetworkMetric(); + void PrintThreadPoolQueueStatus(); + void StatDiskUsage(); + int64_t GetLastSaveTime(const std::string& dump_dir); + + std::string host_; + int port_ = 0; + time_t start_time_s_ = 0; + + std::shared_mutex storage_options_rw_; + storage::StorageOptions storage_options_; + void InitStorageOptions(); + + std::atomic exit_; + std::timed_mutex exit_mutex_; + + /* + * DB used + */ + std::shared_mutex dbs_rw_; + std::map> dbs_; + + /* + * CronTask used + */ + bool have_scheduled_crontask_ = false; + struct timeval last_check_compact_time_; + + /* + * ResumeDB used + */ + struct timeval last_check_resume_time_; + + /* + * Communicate with the client used + */ + int worker_num_ = 0; + std::unique_ptr pika_client_processor_; + std::unique_ptr pika_slow_cmd_thread_pool_; + std::unique_ptr pika_admin_cmd_thread_pool_; + std::unique_ptr pika_dispatch_thread_ = nullptr; + + /* + * Slave used + */ + std::string master_ip_; + int master_port_ = 0; + int repl_state_ = PIKA_REPL_NO_CONNECT; + int role_ = PIKA_ROLE_SINGLE; + int last_meta_sync_timestamp_ = 0; + bool first_meta_sync_ = false; + bool force_full_sync_ = false; + bool leader_protected_mode_ = false; // reject request after master slave sync done + std::shared_mutex state_protector_; // protect below, use for master-slave mode + + /* + * Bgsave used + */ + net::BGThread bgsave_thread_; + + /* + * Purgelogs use + */ + net::BGThread purge_thread_; + + /* + * Keyscan used + */ + net::BGThread key_scan_thread_; + + /* + * Monitor used + */ + mutable pstd::Mutex monitor_mutex_protector_; + std::set, std::owner_less>> pika_monitor_clients_; + + /* + * Rsync used + */ + std::unique_ptr pika_rsync_service_; + std::unique_ptr rsync_server_; + + /* + * Pubsub used + */ + std::unique_ptr pika_pubsub_thread_; + + /* + * Communication used + */ + std::unique_ptr pika_auxiliary_thread_; + + /* + * Async slotsMgrt use + */ + std::unique_ptr pika_migrate_thread_; + + /* + * Slowlog used + */ + uint64_t slowlog_entry_id_ = 0; + uint64_t slowlog_counter_ = 0; + std::shared_mutex slowlog_protector_; + std::list slowlog_list_; + + /* + * Statistic used + */ + Statistic statistic_; + + DiskStatistic disk_statistic_; + + net::BGThread common_bg_thread_; + + /* + * Cache used + */ + std::shared_mutex mu_; + std::shared_mutex cache_info_rwlock_; + + /* + * lastsave used + */ + int64_t lastsave_ = 0; + + /* + * acl + */ + std::unique_ptr<::Acl> acl_ = nullptr; + + /* + * fast and slow thread pools + */ + bool slow_cmd_thread_pool_flag_; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_set.h b/tools/pika_migrate/include/pika_set.h new file mode 100644 index 0000000000..c4b8eb2031 --- /dev/null +++ b/tools/pika_migrate/include/pika_set.h @@ -0,0 +1,371 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_SET_H_ +#define PIKA_SET_H_ + +#include "include/acl.h" +#include "include/pika_command.h" +#include "pika_kv.h" + +/* + * set + */ +class SAddCmd : public Cmd { + public: + SAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SAddCmd(*this); } + + private: + std::string key_; + std::vector members_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class SRemCmd : public Cmd { + public: + SRemCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SRemCmd(*this); } + + private: + void DoInitial() override; + + private: + std::string key_; + std::vector members_; + rocksdb::Status s_; + int32_t deleted_ = 0; +}; + +class SPopCmd : public Cmd { + public: + SPopCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + } + SPopCmd(const SPopCmd& other) + : Cmd(other), key_(other.key_), members_(other.members_), count_(other.count_), s_(other.s_) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + } + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SPopCmd(*this); } + void DoBinlog() override; + + private: + void DoInitial() override; + + private: + std::string key_; + std::vector members_; + // used for write binlog + std::shared_ptr srem_cmd_; + int64_t count_ = 1; + rocksdb::Status s_; +}; + +class SCardCmd : public Cmd { + public: + SCardCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SCardCmd(*this); } + + private: + std::string key_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class SMembersCmd : public Cmd { + public: + SMembersCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SMembersCmd(*this); } + + private: + std::string key_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class SScanCmd : public Cmd { + public: + SScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SScanCmd(*this); } + + private: + std::string key_, pattern_ = "*"; + int64_t cursor_ = 0; + int64_t count_ = 10; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + } +}; + +class SUnionCmd : public Cmd { + public: + SUnionCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SUnionCmd(*this); } + + private: + std::vector keys_; + void DoInitial() override; +}; + +class SetOperationCmd : public Cmd { + public: + SetOperationCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) { + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); + del_cmd_ = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + } + SetOperationCmd(const SetOperationCmd& other) + : Cmd(other), dest_key_(other.dest_key_), value_to_dest_(other.value_to_dest_) { + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); + del_cmd_ = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + } + + std::vector current_key() const override { return {dest_key_}; } + void DoBinlog() override; + + protected: + std::string dest_key_; + std::vector keys_; + // used for write binlog + std::shared_ptr sadd_cmd_; + std::shared_ptr del_cmd_; + std::vector value_to_dest_; +}; + +class SUnionstoreCmd : public SetOperationCmd { + public: + SUnionstoreCmd(const std::string& name, int arity, uint32_t flag) : SetOperationCmd(name, arity, flag) {} + // current_key() is override in base class : SetOperationCmd + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SUnionstoreCmd(*this); } + + private: + void DoInitial() override; + rocksdb::Status s_; +}; + +class SInterCmd : public Cmd { + public: + SInterCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SInterCmd(*this); } + + private: + std::vector keys_; + void DoInitial() override; +}; + +class SInterstoreCmd : public SetOperationCmd { + public: + SInterstoreCmd(const std::string& name, int arity, uint32_t flag) : SetOperationCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SInterstoreCmd(*this); } + + private: + void DoInitial() override; + rocksdb::Status s_; +}; + +class SIsmemberCmd : public Cmd { + public: + SIsmemberCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SIsmemberCmd(*this); } + + private: + std::string key_; + std::string member_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class SDiffCmd : public Cmd { + public: + SDiffCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SDiffCmd(*this); } + + private: + std::vector keys_; + void DoInitial() override; +}; + +class SDiffstoreCmd : public SetOperationCmd { + public: + SDiffstoreCmd(const std::string& name, int arity, uint32_t flag) : SetOperationCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SDiffstoreCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class SMoveCmd : public Cmd { + public: + SMoveCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); + } + SMoveCmd(const SMoveCmd& other) + : Cmd(other), + src_key_(other.src_key_), + dest_key_(other.dest_key_), + member_(other.member_), + move_success_(other.move_success_) { + srem_cmd_ = std::make_shared(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet); + sadd_cmd_ = std::make_shared(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet); + } + std::vector current_key() const override { return {src_key_, dest_key_}; } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SMoveCmd(*this); } + void DoBinlog() override; + + private: + std::string src_key_, dest_key_, member_; + void DoInitial() override; + // used for write binlog + std::shared_ptr srem_cmd_; + std::shared_ptr sadd_cmd_; + int32_t move_success_{0}; +}; + +class SRandmemberCmd : public Cmd { + public: + SRandmemberCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new SRandmemberCmd(*this); } + + private: + std::string key_; + int64_t count_ = 1; + bool reply_arr = false; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { + count_ = 1; + reply_arr = false; + } +}; + +#endif diff --git a/tools/pika_migrate/include/pika_slave_node.h b/tools/pika_migrate/include/pika_slave_node.h new file mode 100644 index 0000000000..e37325b521 --- /dev/null +++ b/tools/pika_migrate/include/pika_slave_node.h @@ -0,0 +1,82 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_SLAVE_NODE_H_ +#define PIKA_SLAVE_NODE_H_ + +#include +#include + +#include "include/pika_binlog_reader.h" +#include "include/pika_define.h" + +struct SyncWinItem { + LogOffset offset_; + std::size_t binlog_size_ = 0; + bool acked_ = false; + bool operator==(const SyncWinItem& other) const { + return offset_.b_offset.filenum == other.offset_.b_offset.filenum && + offset_.b_offset.offset == other.offset_.b_offset.offset; + } + explicit SyncWinItem(const LogOffset& offset, std::size_t binlog_size = 0) + : offset_(offset), binlog_size_(binlog_size) {} + std::string ToString() const { + return offset_.ToString() + " binglog size: " + std::to_string(binlog_size_) + " acked: " + std::to_string(static_cast(acked_)); + } +}; + +class SyncWindow { + public: + SyncWindow() = default; + void Push(const SyncWinItem& item); + bool Update(const SyncWinItem& start_item, const SyncWinItem& end_item, LogOffset* acked_offset); + int Remaining(); + std::string ToStringStatus() const { + if (win_.empty()) { + return " Size: " + std::to_string(win_.size()) + "\r\n"; + } else { + std::string res; + res += " Size: " + std::to_string(win_.size()) + "\r\n"; + res += (" Begin_item: " + win_.begin()->ToString() + "\r\n"); + res += (" End_item: " + win_.rbegin()->ToString() + "\r\n"); + return res; + } + } + std::size_t GetTotalBinlogSize() { return total_size_; } + void Reset() { + win_.clear(); + total_size_ = 0; + } + + private: + // TODO(whoiami) ring buffer maybe + std::deque win_; + std::size_t total_size_ = 0; +}; + +// role master use +class SlaveNode : public RmNode { + public: + SlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id); + ~SlaveNode() override; + void Lock() { slave_mu.lock(); } + void Unlock() { slave_mu.unlock(); } + SlaveState slave_state{kSlaveNotSync}; + + BinlogSyncState b_state{kNotSync}; + SyncWindow sync_win; + LogOffset sent_offset; + LogOffset acked_offset; + + std::string ToStringStatus(); + + std::shared_ptr binlog_reader; + pstd::Status InitBinlogFileReader(const std::shared_ptr& binlog, const BinlogOffset& offset); + pstd::Status Update(const LogOffset& start, const LogOffset& end, LogOffset* updated_offset); + + pstd::Mutex slave_mu; +}; + +#endif // PIKA_SLAVE_NODE_H diff --git a/tools/pika_migrate/include/pika_slaveping_thread.h b/tools/pika_migrate/include/pika_slaveping_thread.h new file mode 100644 index 0000000000..a79200782e --- /dev/null +++ b/tools/pika_migrate/include/pika_slaveping_thread.h @@ -0,0 +1,41 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_SLAVEPING_THREAD_H_ +#define PIKA_SLAVEPING_THREAD_H_ + +#include + +#include "net/include/net_cli.h" +#include "net/include/net_thread.h" +#include "pstd/include/pstd_status.h" + +using pstd::Status; + +class PikaSlavepingThread : public net::Thread { + public: + PikaSlavepingThread(int64_t sid) : sid_(sid), is_first_send_(true) { + cli_ = net::NewPbCli(); + cli_->set_connect_timeout(1500); + set_thread_name("SlavePingThread"); + }; + virtual ~PikaSlavepingThread() { + StopThread(); + delete cli_; + LOG(INFO) << "SlavepingThread " << thread_id() << " exit!!!"; + }; + + Status Send(); + Status RecvProc(); + + private: + int64_t sid_ = 0; + bool is_first_send_ = true; + int sockfd_ = -1; + net::NetCli* cli_ = nullptr; + virtual void* ThreadMain(); +}; + +#endif diff --git a/tools/pika_migrate/include/pika_slot_command.h b/tools/pika_migrate/include/pika_slot_command.h new file mode 100644 index 0000000000..53937d6172 --- /dev/null +++ b/tools/pika_migrate/include/pika_slot_command.h @@ -0,0 +1,273 @@ +#ifndef PIKA_SLOT_COMMAND_H_ +#define PIKA_SLOT_COMMAND_H_ + +#include "include/pika_client_conn.h" +#include "include/pika_command.h" +#include "net/include/net_cli.h" +#include "net/include/net_thread.h" +#include "storage/storage.h" +#include "storage/src/base_data_key_format.h" +#include "strings.h" + +const std::string SlotKeyPrefix = "_internal:slotkey:4migrate:"; +const std::string SlotTagPrefix = "_internal:slottag:4migrate:"; + +const size_t MaxKeySendSize = 10 * 1024; + +int GetKeyType(const std::string& key, std::string &key_type, const std::shared_ptr& db); +void AddSlotKey(const std::string& type, const std::string& key, const std::shared_ptr& db); +void RemSlotKey(const std::string& key, const std::shared_ptr& db); +int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr& db); +void RemSlotKeyByType(const std::string& type, const std::string& key, const std::shared_ptr& db); +std::string GetSlotKey(uint32_t slot); +std::string GetSlotsTagKey(uint32_t crc); + +class PikaMigrate { + public: + PikaMigrate(); + virtual ~PikaMigrate(); + + int MigrateKey(const std::string& host, const int port, int timeout, const std::string& key, const char type, + std::string& detail, const std::shared_ptr& db); + void CleanMigrateClient(); + + void Lock() { + mutex_.lock(); + } + int Trylock() { + return mutex_.try_lock(); + } + void Unlock() { + mutex_.unlock(); + } + net::NetCli* GetMigrateClient(const std::string& host, const int port, int timeout); + + private: + std::map migrate_clients_; + pstd::Mutex mutex_; + void KillMigrateClient(net::NetCli* migrate_cli); + void KillAllMigrateClient(); + int64_t TTLByType(const char key_type, const std::string& key, const std::shared_ptr& db); + int MigrateSend(net::NetCli* migrate_cli, const std::string& key, const char type, std::string& detail, + const std::shared_ptr& db); + bool MigrateRecv(net::NetCli* migrate_cli, int need_receive, std::string& detail); + int ParseKey(const std::string& key, const char type, std::string& wbuf_str, const std::shared_ptr& db); + int ParseKKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseZKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseSKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseHKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseLKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + int ParseMKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db); + bool SetTTL(const std::string& key, std::string& wbuf_str, int64_t ttl); +}; + +class SlotsMgrtTagSlotCmd : public Cmd { + public: + SlotsMgrtTagSlotCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtTagSlotCmd(*this); } + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int64_t slot_id_ = 0; + std::basic_string, std::allocator> key_; + void DoInitial() override; +}; + +class SlotsMgrtTagSlotAsyncCmd : public Cmd { + public: + SlotsMgrtTagSlotAsyncCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag){} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtTagSlotAsyncCmd(*this); } + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + int64_t max_bulks_ = 0; + int64_t max_bytes_ = 0; + int64_t slot_id_ = 0; + int64_t keys_num_ = 0; + void DoInitial() override; +}; + +class SlotsMgrtTagOneCmd : public Cmd { + public: + SlotsMgrtTagOneCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtTagOneCmd(*this); } + + private: + std::string dest_ip_; + int64_t dest_port_ = 0; + int64_t timeout_ms_ = 60; + std::string key_; + int64_t slot_id_ = 0; + char key_type_ = '\0'; + void DoInitial() override; + int KeyTypeCheck(const std::shared_ptr& db); +}; + +class SlotsMgrtAsyncStatusCmd : public Cmd { + public: + SlotsMgrtAsyncStatusCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtAsyncStatusCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsInfoCmd : public Cmd { + public: + SlotsInfoCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsInfoCmd(*this); } + + private: + void DoInitial() override; + + int64_t begin_ = 0; + int64_t end_ = 1024; +}; + +class SlotsMgrtAsyncCancelCmd : public Cmd { + public: + SlotsMgrtAsyncCancelCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtAsyncCancelCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsDelCmd : public Cmd { + public: + SlotsDelCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsDelCmd(*this); } + + private: + std::vector slots_; + void DoInitial() override; +}; + +class SlotsHashKeyCmd : public Cmd { + public: + SlotsHashKeyCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsHashKeyCmd(*this); } + + private: + std::vector keys_; + void DoInitial() override; +}; + +class SlotsScanCmd : public Cmd { + public: + SlotsScanCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsScanCmd(*this); } + + private: + std::string key_; + std::string pattern_ = "*"; + int64_t cursor_ = 0; + int64_t count_ = 10; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + } +}; + +/* * +* SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$arg1 ...] +* SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$key1 $arg1 ...] +* SLOTSMGRT-EXEC-WRAPPER $hashkey $command [$key1 $arg1 ...] [$key2 $arg2 ...] +* */ +class SlotsMgrtExecWrapperCmd : public Cmd { + public: + SlotsMgrtExecWrapperCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsMgrtExecWrapperCmd(*this); } + + private: + std::string key_; + std::vector args; + void DoInitial() override; +}; + + +class SlotsReloadCmd : public Cmd { + public: + SlotsReloadCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsReloadCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsReloadOffCmd : public Cmd { + public: + SlotsReloadOffCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsReloadOffCmd(*this); } + + private: + void DoInitial() override; +}; + +class SlotsCleanupCmd : public Cmd { + public: + SlotsCleanupCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsCleanupCmd(*this); } + std::vector cleanup_slots_; + + private: + void DoInitial() override; +}; + +class SlotsCleanupOffCmd : public Cmd { + public: + SlotsCleanupOffCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag) {} + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new SlotsCleanupOffCmd(*this); } + + private: + void DoInitial() override; +}; + +#endif diff --git a/tools/pika_migrate/include/pika_stable_log.h b/tools/pika_migrate/include/pika_stable_log.h new file mode 100644 index 0000000000..300e0d0fc5 --- /dev/null +++ b/tools/pika_migrate/include/pika_stable_log.h @@ -0,0 +1,63 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_STABLE_LOG_H_ +#define PIKA_STABLE_LOG_H_ + +#include +#include + +#include "include/pika_binlog.h" + +class StableLog : public std::enable_shared_from_this { + public: + StableLog(std::string table_name, std::string log_path); + ~StableLog(); + std::shared_ptr Logger() { return stable_logger_; } + void Leave(); + void SetFirstOffset(const LogOffset& offset) { + std::lock_guard l(offset_rwlock_); + first_offset_ = offset; + } + LogOffset first_offset() { + std::shared_lock l(offset_rwlock_); + return first_offset_; + } + // Need to hold binlog lock + pstd::Status TruncateTo(const LogOffset& offset); + + // Purgelogs use + bool PurgeStableLogs(uint32_t to = 0, bool manual = false); + void ClearPurge(); + bool GetBinlogFiles(std::map* binlogs); + pstd::Status PurgeFileAfter(uint32_t filenum); + + private: + void Close(); + void RemoveStableLogDir(); + void UpdateFirstOffset(uint32_t filenum); + /* + * Purgelogs use + */ + static void DoPurgeStableLogs(void* arg); + bool PurgeFiles(uint32_t to, bool manual); + std::atomic purging_; + + std::string db_name_; + std::string log_path_; + std::shared_ptr stable_logger_; + + std::shared_mutex offset_rwlock_; + LogOffset first_offset_; +}; + +struct PurgeStableLogArg { + std::shared_ptr logger; + uint32_t to = 0; + bool manual = false; + bool force = false; // Ignore the delete window +}; + +#endif // PIKA_STABLE_LOG_H_ diff --git a/tools/pika_migrate/include/pika_statistic.h b/tools/pika_migrate/include/pika_statistic.h new file mode 100644 index 0000000000..9ea824ca13 --- /dev/null +++ b/tools/pika_migrate/include/pika_statistic.h @@ -0,0 +1,67 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_STATISTIC_H_ +#define PIKA_STATISTIC_H_ + +#include +#include +#include +#include + +class QpsStatistic { + public: + QpsStatistic(); + QpsStatistic(const QpsStatistic& other); + ~QpsStatistic() = default; + void IncreaseQueryNum(bool is_write); + void ResetLastSecQuerynum(); + + std::atomic querynum; + std::atomic write_querynum; + + std::atomic last_querynum; + std::atomic last_write_querynum; + + std::atomic last_sec_querynum; + std::atomic last_sec_write_querynum; + + std::atomic last_time_us; +}; + +struct ServerStatistic { + ServerStatistic() = default; + ~ServerStatistic() = default; + + std::atomic accumulative_connections; + std::unordered_map> exec_count_db; + std::atomic keyspace_hits; + std::atomic keyspace_misses; + QpsStatistic qps; +}; + +struct Statistic { + Statistic(); + + QpsStatistic DBStat(const std::string& db_name); + std::unordered_map AllDBStat(); + + void UpdateDBQps(const std::string& db_name, const std::string& command, bool is_write); + void ResetDBLastSecQuerynum(); + + // statistic shows accumulated data of all tables + ServerStatistic server_stat; + + // statistic shows accumulated data of every single table + std::shared_mutex db_stat_rw; + std::unordered_map db_stat; +}; + +struct DiskStatistic { + std::atomic db_size_ = 0; + std::atomic log_size_ = 0; +}; + +#endif // PIKA_STATISTIC_H_ diff --git a/tools/pika_migrate/include/pika_stream.h b/tools/pika_migrate/include/pika_stream.h new file mode 100644 index 0000000000..bf61a96c6b --- /dev/null +++ b/tools/pika_migrate/include/pika_stream.h @@ -0,0 +1,163 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_STREAM_H_ +#define PIKA_STREAM_H_ + +#include "include/acl.h" +#include "include/pika_command.h" +#include "storage/src/redis_streams.h" +#include "storage/storage.h" + +/* + * stream + */ + +inline void ParseAddOrTrimArgsOrReply(CmdRes& res, const PikaCmdArgsType& argv, storage::StreamAddTrimArgs& args, + int* idpos, bool is_xadd); + +inline void ParseReadOrReadGroupArgsOrReply(CmdRes& res, const PikaCmdArgsType& argv, + storage::StreamReadGroupReadArgs& args, bool is_xreadgroup); + +// @field_values is the result of ScanStream. +// field is the serialized message id, +// value is the serialized message. +inline void AppendMessagesToRes(CmdRes& res, std::vector& field_values, const DB* db); + +class XAddCmd : public Cmd { + public: + XAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + std::vector current_key() const override { return {key_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XAddCmd(*this); } + + private: + std::string key_; + storage::StreamAddTrimArgs args_; + int field_pos_{0}; + + void DoInitial() override; +}; + +class XDelCmd : public Cmd { + public: + XDelCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + std::vector current_key() const override { return {key_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XDelCmd(*this); } + + private: + std::string key_; + std::vector ids_; + + void DoInitial() override; + void Clear() override { ids_.clear(); } +}; + +class XReadCmd : public Cmd { + public: + XReadCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XReadCmd(*this); } + + private: + storage::StreamReadGroupReadArgs args_; + + void DoInitial() override; + void Clear() override { + args_.unparsed_ids.clear(); + args_.keys.clear(); + } +}; + +class XRangeCmd : public Cmd { + public: + XRangeCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XRangeCmd(*this); } + + protected: + std::string key_; + storage::StreamScanArgs args_; + + void DoInitial() override; +}; + +class XRevrangeCmd : public XRangeCmd { + public: + XRevrangeCmd(const std::string& name, int arity, uint32_t flag) : XRangeCmd(name, arity, flag){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XRevrangeCmd(*this); } +}; + +class XLenCmd : public Cmd { + public: + XLenCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XLenCmd(*this); } + + private: + std::string key_; + + void DoInitial() override; +}; + +class XTrimCmd : public Cmd { + public: + XTrimCmd(const std::string& name, int arity, uint32_t flag) : Cmd(name, arity, flag){}; + std::vector current_key() const override { return {key_}; } + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XTrimCmd(*this); } + + private: + std::string key_; + storage::StreamAddTrimArgs args_; + + void DoInitial() override; +}; + +class XInfoCmd : public Cmd { + public: + XInfoCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::STREAM)){}; + void Do() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new XInfoCmd(*this); } + + private: + std::string key_; + std::string cgroupname_; + std::string consumername_; + std::string subcmd_; + uint64_t count_{0}; + bool is_full_{false}; + + void DoInitial() override; + void StreamInfo(std::shared_ptr& db); + void GroupsInfo(std::shared_ptr& db); + void ConsumersInfo(std::shared_ptr& db); +}; + +#endif // PIKA_STREAM_H_ diff --git a/tools/pika_migrate/include/pika_transaction.h b/tools/pika_migrate/include/pika_transaction.h new file mode 100644 index 0000000000..f772ef4e90 --- /dev/null +++ b/tools/pika_migrate/include/pika_transaction.h @@ -0,0 +1,107 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_TRANSACTION_H_ +#define PIKA_TRANSACTION_H_ + +#include "acl.h" +#include "include/pika_command.h" +#include "net/include/redis_conn.h" +#include "pika_db.h" +#include "storage/storage.h" + +class MultiCmd : public Cmd { + public: + MultiCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + void Do() override; + Cmd* Clone() override { return new MultiCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + + private: + void DoInitial() override; +}; + +class ExecCmd : public Cmd { + public: + ExecCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + void Do() override; + Cmd* Clone() override { return new ExecCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + std::vector current_key() const override { return {}; } + void Execute() override; + private: + struct CmdInfo { + public: + CmdInfo(std::shared_ptr cmd, std::shared_ptr db, + std::shared_ptr sync_db) : cmd_(cmd), db_(db), sync_db_(sync_db) {} + std::shared_ptr cmd_; + std::shared_ptr db_; + std::shared_ptr sync_db_; + }; + void DoInitial() override; + void Lock(); + void Unlock(); + bool IsTxnFailedAndSetState(); + void SetCmdsVec(); + void ServeToBLrPopWithKeys(); + std::unordered_set> lock_db_{}; + std::unordered_map, std::vector> lock_db_keys_{}; + std::unordered_set> r_lock_dbs_ {}; + bool is_lock_rm_dbs_{false}; // g_pika_rm->dbs_rw_; + std::vector cmds_; + std::vector list_cmd_; + std::vector keys_; +}; + +class DiscardCmd : public Cmd { + public: + DiscardCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + void Do() override; + Cmd* Clone() override { return new DiscardCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + + private: + void DoInitial() override; +}; + +class WatchCmd : public Cmd { + public: + WatchCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + + void Do() override; + void Split(const HintKeys& hint_keys) override {} + Cmd* Clone() override { return new WatchCmd(*this); } + void Merge() override {} + std::vector current_key() const override { return keys_; } + void Execute() override; + + private: + void DoInitial() override; + std::vector keys_; + std::vector db_keys_; // cause the keys watched may cross different dbs, so add dbname as keys prefix +}; + +class UnwatchCmd : public Cmd { + public: + UnwatchCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::TRANSACTION)) {} + + void Do() override; + Cmd* Clone() override { return new UnwatchCmd(*this); } + void Split(const HintKeys& hint_keys) override {} + void Merge() override {} + + private: + void DoInitial() override; +}; + +#endif // PIKA_TRANSACTION_H_ diff --git a/tools/pika_migrate/include/pika_version.h b/tools/pika_migrate/include/pika_version.h new file mode 100644 index 0000000000..9c0b2a1732 --- /dev/null +++ b/tools/pika_migrate/include/pika_version.h @@ -0,0 +1,13 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef INCLUDE_PIKA_VERSION_H_ +#define INCLUDE_PIKA_VERSION_H_ + +#define PIKA_MAJOR 4 +#define PIKA_MINOR 0 +#define PIKA_PATCH 0 + +#endif // INCLUDE_PIKA_VERSION_H_ diff --git a/tools/pika_migrate/include/pika_zset.h b/tools/pika_migrate/include/pika_zset.h new file mode 100644 index 0000000000..a74ee026fc --- /dev/null +++ b/tools/pika_migrate/include/pika_zset.h @@ -0,0 +1,634 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_ZSET_H_ +#define PIKA_ZSET_H_ + +#include "storage/storage.h" +#include "include/acl.h" +#include "include/pika_command.h" +#include "pika_kv.h" + +/* + * zset + */ +class ZAddCmd : public Cmd { + public: + ZAddCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZAddCmd(*this); } + + private: + std::string key_; + std::vector score_members; + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZCardCmd : public Cmd { + public: + ZCardCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZCardCmd(*this); } + + private: + std::string key_; + void DoInitial() override; +}; + +class ZScanCmd : public Cmd { + public: + ZScanCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)), pattern_("*") {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ZScanCmd(*this); } + + private: + std::string key_, pattern_ = "*"; + int64_t cursor_ = 0, count_ = 10; + void DoInitial() override; + void Clear() override { + pattern_ = "*"; + count_ = 10; + } +}; + +class ZIncrbyCmd : public Cmd { + public: + ZIncrbyCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZIncrbyCmd(*this); } + double Score() { return score_; } + + private: + std::string key_, member_; + double by_ = .0f; + double score_ = .0f; + void DoInitial() override; +}; + +class ZsetRangeParentCmd : public Cmd { + public: + ZsetRangeParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + + protected: + std::string key_; + int64_t start_ = 0; + int64_t stop_ = -1; + bool is_ws_ = false; + void DoInitial() override; + void Clear() override { is_ws_ = false; } +}; + +class ZRangeCmd : public ZsetRangeParentCmd { + public: + ZRangeCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangeParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRangeCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZRevrangeCmd : public ZsetRangeParentCmd { + public: + ZRevrangeCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangeParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrangeCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZsetRangebyscoreParentCmd : public Cmd { + public: + ZsetRangebyscoreParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + + double MinScore() { return min_score_; } + double MaxScore() { return max_score_; } + bool LeftClose() { return left_close_; } + bool RightClose() { return right_close_; } + int64_t Offset() { return offset_; } + int64_t Count() { return count_; } + + protected: + std::string key_; + std::string min_, max_; + double min_score_ = 0, max_score_ = 0; + bool left_close_ = true, right_close_ = true, with_scores_ = false; + int64_t offset_ = 0, count_ = -1; + void DoInitial() override; + void Clear() override { + left_close_ = right_close_ = true; + with_scores_ = false; + offset_ = 0; + count_ = -1; + } +}; + +class ZRangebyscoreCmd : public ZsetRangebyscoreParentCmd { + public: + ZRangebyscoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangebyscoreParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRangebyscoreCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZRevrangebyscoreCmd : public ZsetRangebyscoreParentCmd { + public: + ZRevrangebyscoreCmd(const std::string& name, int arity, uint32_t flag) + : ZsetRangebyscoreParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrangebyscoreCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZCountCmd : public Cmd { + public: + ZCountCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZCountCmd(*this); } + double MinScore() { return min_score_; } + double MaxScore() { return max_score_; } + bool LeftClose() { return left_close_; } + bool RightClose() { return right_close_; } + + private: + std::string key_; + std::string min_ , max_; + double min_score_ = 0, max_score_ = 0; + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { + left_close_ = true; + right_close_ = true; + } +}; + +class ZRemCmd : public Cmd { + public: + ZRemCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemCmd(*this); } + + private: + std::string key_; + std::vector members_; + int32_t deleted_ = 0; + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZsetUIstoreParentCmd : public Cmd { + public: + ZsetUIstoreParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) { + zadd_cmd_ = std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset); + } + ZsetUIstoreParentCmd(const ZsetUIstoreParentCmd& other) + : Cmd(other), + dest_key_(other.dest_key_), + num_keys_(other.num_keys_), + aggregate_(other.aggregate_), + keys_(other.keys_), + weights_(other.weights_) { + zadd_cmd_ = std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset); + } + + std::vector current_key() const override { return {dest_key_}; } + + protected: + std::string dest_key_; + int64_t num_keys_ = 0; + storage::AGGREGATE aggregate_{storage::SUM}; + std::vector keys_; + std::vector weights_; + void DoInitial() override; + void Clear() override { aggregate_ = storage::SUM; } + // used for write binlog + std::shared_ptr zadd_cmd_; +}; + +class ZUnionstoreCmd : public ZsetUIstoreParentCmd { + public: + ZUnionstoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetUIstoreParentCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZUnionstoreCmd(*this); } + + private: + void DoInitial() override; + // used for write binlog + std::map value_to_dest_; + rocksdb::Status s_; + void DoBinlog() override; +}; + +class ZInterstoreCmd : public ZsetUIstoreParentCmd { + public: + ZInterstoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetUIstoreParentCmd(name, arity, flag) {} + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZInterstoreCmd(*this); } + void DoBinlog() override; + + private: + void DoInitial() override; + rocksdb::Status s_; + // used for write binlog + std::vector value_to_dest_; +}; + +class ZsetRankParentCmd : public Cmd { + public: + ZsetRankParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + + protected: + std::string key_, member_; + void DoInitial() override; +}; + +class ZRankCmd : public ZsetRankParentCmd { + public: + ZRankCmd(const std::string& name, int arity, uint32_t flag) : ZsetRankParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRankCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZRevrankCmd : public ZsetRankParentCmd { + public: + ZRevrankCmd(const std::string& name, int arity, uint32_t flag) : ZsetRankParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrankCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZScoreCmd : public ZsetRankParentCmd { + public: + ZScoreCmd(const std::string& name, int arity, uint32_t flag) : ZsetRankParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZScoreCmd(*this); } + + private: + std::string key_, member_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZsetRangebylexParentCmd : public Cmd { + public: + ZsetRangebylexParentCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + + protected: + std::string key_, min_member_, max_member_; + std::string min_, max_; + bool left_close_ = true, right_close_ = true; + int64_t offset_ = 0, count_ = -1; + void DoInitial() override; + void Clear() override { + left_close_ = right_close_ = true; + offset_ = 0; + count_ = -1; + } +}; + +class ZRangebylexCmd : public ZsetRangebylexParentCmd { + public: + ZRangebylexCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangebylexParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRangebylexCmd(*this); } + + private: + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZRevrangebylexCmd : public ZsetRangebylexParentCmd { + public: + ZRevrangebylexCmd(const std::string& name, int arity, uint32_t flag) : ZsetRangebylexParentCmd(name, arity, flag) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRevrangebylexCmd(*this); } + + private: + void DoInitial() override; + rocksdb::Status s_; +}; + +class ZLexcountCmd : public Cmd { + public: + ZLexcountCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void ReadCache() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZLexcountCmd(*this); } + + private: + std::string key_, min_member_, max_member_; + std::string min_, max_; + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; } +}; + +class ZRemrangebyrankCmd : public Cmd { + public: + ZRemrangebyrankCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemrangebyrankCmd(*this); } + + private: + std::string key_, min_, max_; + int64_t start_rank_ = 0, stop_rank_ = -1; + int32_t ele_deleted_; + rocksdb::Status s_; + void DoInitial() override; +}; + +class ZRemrangebyscoreCmd : public Cmd { + public: + ZRemrangebyscoreCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemrangebyscoreCmd(*this); } + + private: + std::string key_, min_, max_; + double min_score_ = 0, max_score_ = 0; + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; } +}; + +class ZRemrangebylexCmd : public Cmd { + public: + ZRemrangebylexCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void DoUpdateCache() override; + void DoThroughDB() override; + void Split(const HintKeys& hint_keys) override{}; + void Merge() override{}; + Cmd* Clone() override { return new ZRemrangebylexCmd(*this); } + + private: + std::string key_, min_, max_; + std::string min_member_, max_member_; + bool left_close_ = true, right_close_ = true; + rocksdb::Status s_; + void DoInitial() override; + void Clear() override { left_close_ = right_close_ = true; } +}; + +class ZPopmaxCmd : public Cmd { + public: + ZPopmaxCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.emplace_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ZPopmaxCmd(*this); } + + private: + void DoInitial() override; + std::string key_; + int64_t count_ = 0; +}; + +class ZPopminCmd : public Cmd { + public: + ZPopminCmd(const std::string& name, int arity, uint32_t flag) + : Cmd(name, arity, flag, static_cast(AclCategory::SORTEDSET)) {} + std::vector current_key() const override { + std::vector res; + res.push_back(key_); + return res; + } + void Do() override; + void Split(const HintKeys& hint_keys) override {}; + void Merge() override {}; + Cmd* Clone() override { return new ZPopminCmd(*this); } + + private: + void DoInitial() override; + std::string key_; + int64_t count_ = 0; +}; + +#endif diff --git a/tools/pika_migrate/include/rsync_client.h b/tools/pika_migrate/include/rsync_client.h new file mode 100644 index 0000000000..657407218f --- /dev/null +++ b/tools/pika_migrate/include/rsync_client.h @@ -0,0 +1,247 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef RSYNC_CLIENT_H_ +#define RSYNC_CLIENT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "net/include/bg_thread.h" +#include "net/include/net_cli.h" +#include "pstd/include/env.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/pstd_hash.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/pstd_status.h" +#include "include/pika_define.h" +#include "include/rsync_client_thread.h" +#include "include/throttle.h" +#include "rsync_service.pb.h" + +extern std::unique_ptr g_pika_conf; + +const std::string kDumpMetaFileName = "DUMP_META_DATA"; +const std::string kUuidPrefix = "snapshot-uuid:"; +const size_t kInvalidOffset = 0xFFFFFFFF; + +namespace rsync { + +class RsyncWriter; +class Session; +class WaitObject; +class WaitObjectManager; + +using pstd::Status; + +using ResponseSPtr = std::shared_ptr; +class RsyncClient : public net::Thread { + public: + enum State { + IDLE, + RUNNING, + STOP, + }; + RsyncClient(const std::string& dir, const std::string& db_name); + void* ThreadMain() override; + void Copy(const std::set& file_set, int index); + bool Init(); + int GetParallelNum(); + Status Start(); + Status Stop(); + bool IsRunning() { + return state_.load() == RUNNING; + } + bool IsExitedFromRunning() { + return state_.load() == STOP && all_worker_exited_.load(); + } + bool IsStop() { + return state_.load() == STOP; + } + bool IsIdle() { return state_.load() == IDLE;} + void OnReceive(RsyncService::RsyncResponse* resp); +private: + bool ComparisonUpdate(); + Status CopyRemoteFile(const std::string& filename, int index); + Status PullRemoteMeta(std::string* snapshot_uuid, std::set* file_set); + Status LoadLocalMeta(std::string* snapshot_uuid, std::map* file_map); + std::string GetLocalMetaFilePath(); + Status FlushMetaTable(); + Status CleanUpExpiredFiles(bool need_reset_path, const std::set& files); + Status UpdateLocalMeta(const std::string& snapshot_uuid, const std::set& expired_files, + std::map* localFileMap); + void HandleRsyncMetaResponse(RsyncService::RsyncResponse* response); + +private: + typedef std::unique_ptr NetThreadUPtr; + std::map meta_table_; + std::set file_set_; + std::string snapshot_uuid_; + std::string dir_; + std::string db_name_; + + NetThreadUPtr client_thread_; + std::vector work_threads_; + std::atomic finished_work_cnt_ = 0; + + std::atomic state_; + std::atomic error_stopped_{false}; + std::atomic all_worker_exited_{true}; + int max_retries_ = 10; + std::unique_ptr wo_mgr_; + std::condition_variable cond_; + std::mutex mu_; + + + std::string master_ip_; + int master_port_; + int parallel_num_; +}; + +class RsyncWriter { + public: + RsyncWriter(const std::string& filepath) { + filepath_ = filepath; + fd_ = open(filepath.c_str(), O_RDWR | O_APPEND | O_CREAT, 0644); + } + ~RsyncWriter() {} + Status Write(uint64_t offset, size_t n, const char* data) { + const char* ptr = data; + size_t left = n; + Status s; + while (left != 0) { + ssize_t done = write(fd_, ptr, left); + if (done < 0) { + if (errno == EINTR) { + continue; + } + LOG(WARNING) << "pwrite failed, filename: " << filepath_ << "errno: " << strerror(errno) << "n: " << n; + return Status::IOError(filepath_, "pwrite failed"); + } + left -= done; + ptr += done; + offset += done; + } + return Status::OK(); + } + Status Close() { + close(fd_); + return Status::OK(); + } + Status Fsync() { + fsync(fd_); + return Status::OK(); + } + + private: + std::string filepath_; + int fd_ = -1; +}; + +class WaitObject { + public: + WaitObject() : filename_(""), type_(RsyncService::kRsyncMeta), offset_(0), resp_(nullptr) {} + ~WaitObject() {} + + void Reset(const std::string& filename, RsyncService::Type t, size_t offset) { + std::lock_guard guard(mu_); + resp_.reset(); + filename_ = filename; + type_ = t; + offset_ = offset; + } + + pstd::Status Wait(ResponseSPtr& resp) { + auto timeout = g_pika_conf->rsync_timeout_ms(); + std::unique_lock lock(mu_); + auto cv_s = cond_.wait_for(lock, std::chrono::milliseconds(timeout), [this] { + return resp_.get() != nullptr; + }); + if (!cv_s) { + std::string timout_info("timeout during(in ms) is "); + timout_info.append(std::to_string(timeout)); + return pstd::Status::Timeout("rsync timeout", timout_info); + } + resp = resp_; + return pstd::Status::OK(); + } + + void WakeUp(RsyncService::RsyncResponse* resp) { + std::unique_lock lock(mu_); + resp_.reset(resp); + offset_ = kInvalidOffset; + cond_.notify_all(); + } + + std::string Filename() {return filename_;} + RsyncService::Type Type() {return type_;} + size_t Offset() {return offset_;} + private: + std::string filename_; + RsyncService::Type type_; + size_t offset_ = kInvalidOffset; + ResponseSPtr resp_ = nullptr; + std::condition_variable cond_; + std::mutex mu_; +}; + +class WaitObjectManager { + public: + WaitObjectManager() { + wo_vec_.resize(kMaxRsyncParallelNum); + for (int i = 0; i < kMaxRsyncParallelNum; i++) { + wo_vec_[i] = new WaitObject(); + } + } + ~WaitObjectManager() { + for (int i = 0; i < wo_vec_.size(); i++) { + delete wo_vec_[i]; + wo_vec_[i] = nullptr; + } + } + + WaitObject* UpdateWaitObject(int worker_index, const std::string& filename, + RsyncService::Type type, size_t offset) { + std::lock_guard guard(mu_); + wo_vec_[worker_index]->Reset(filename, type, offset); + return wo_vec_[worker_index]; + } + + void WakeUp(RsyncService::RsyncResponse* resp) { + std::lock_guard guard(mu_); + int index = resp->reader_index(); + if (wo_vec_[index] == nullptr || resp->type() != wo_vec_[index]->Type()) { + delete resp; + return; + } + if (resp->code() != RsyncService::kOk) { + LOG(WARNING) << "rsync response error"; + wo_vec_[index]->WakeUp(resp); + return; + } + + if (resp->type() == RsyncService::kRsyncFile && + ((resp->file_resp().filename() != wo_vec_[index]->Filename()) || + (resp->file_resp().offset() != wo_vec_[index]->Offset()))) { + delete resp; + return; + } + wo_vec_[index]->WakeUp(resp); + } + private: + std::vector wo_vec_; + std::mutex mu_; +}; + +} // end namespace rsync +#endif diff --git a/tools/pika_migrate/include/rsync_client_thread.h b/tools/pika_migrate/include/rsync_client_thread.h new file mode 100644 index 0000000000..19bebcb56d --- /dev/null +++ b/tools/pika_migrate/include/rsync_client_thread.h @@ -0,0 +1,55 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef RSYNC_CLIENT_THREAD_H_ +#define RSYNC_CLIENT_THREAD_H_ + +#include "net/include/client_thread.h" +#include "net/include/net_conn.h" +#include "net/include/pb_conn.h" +#include "rsync_service.pb.h" + +using namespace pstd; +using namespace net; + +namespace rsync { + +class RsyncClientConn : public PbConn { + public: + RsyncClientConn(int fd, const std::string& ip_port, + net::Thread* thread, void* cb_handler, + NetMultiplexer* mpx); + ~RsyncClientConn() override; + int DealMessage() override; + + private: + void* cb_handler_ = nullptr; +}; + +class RsyncClientConnFactory : public ConnFactory { + public: + RsyncClientConnFactory(void* scheduler) : cb_handler_(scheduler) {} + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, + net::Thread* thread, void* cb_handler, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, cb_handler_, net)); + } + private: + void* cb_handler_ = nullptr; +}; + +class RsyncClientThread : public ClientThread { + public: + RsyncClientThread(int cron_interval, int keepalive_timeout, void* scheduler); + ~RsyncClientThread() override; + private: + RsyncClientConnFactory conn_factory_; + ClientHandle handle_; +}; + +} //end namespace rsync +#endif + diff --git a/tools/pika_migrate/include/rsync_server.h b/tools/pika_migrate/include/rsync_server.h new file mode 100644 index 0000000000..560585f3c8 --- /dev/null +++ b/tools/pika_migrate/include/rsync_server.h @@ -0,0 +1,187 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef RSYNC_SERVER_H_ +#define RSYNC_SERVER_H_ + +#include +#include +#include +#include + +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/include/pb_conn.h" +#include "net/include/server_thread.h" +#include "net/include/thread_pool.h" +#include "net/src/holy_thread.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/env.h" +#include "pstd_hash.h" +#include "rsync_service.pb.h" + +namespace rsync { +class RsyncServerConn; +struct RsyncServerTaskArg { + std::shared_ptr req; + std::shared_ptr conn; + RsyncServerTaskArg(std::shared_ptr _req, std::shared_ptr _conn) + : req(std::move(_req)), conn(std::move(_conn)) {} +}; +class RsyncReader; +class RsyncServerThread; + +class RsyncServer { + public: + RsyncServer(const std::set& ips, const int port); + ~RsyncServer(); + void Schedule(net::TaskFunc func, void* arg); + int Start(); + int Stop(); + private: + std::unique_ptr work_thread_; + std::unique_ptr rsync_server_thread_; +}; + +class RsyncServerConn : public net::PbConn { + public: + RsyncServerConn(int connfd, const std::string& ip_port, + net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx); + virtual ~RsyncServerConn() override; + int DealMessage() override; + static void HandleMetaRsyncRequest(void* arg); + static void HandleFileRsyncRequest(void* arg); + private: + std::vector > readers_; + std::mutex mu_; + void* data_ = nullptr; +}; + +class RsyncServerThread : public net::HolyThread { + public: + RsyncServerThread(const std::set& ips, int port, int cron_internal, RsyncServer* arg); + ~RsyncServerThread(); + + private: + class RsyncServerConnFactory : public net::ConnFactory { + public: + explicit RsyncServerConnFactory(RsyncServer* sched) : scheduler_(sched) {} + + std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, + net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* net) const override { + return std::static_pointer_cast( + std::make_shared(connfd, ip_port, thread, scheduler_, net)); + } + private: + RsyncServer* scheduler_ = nullptr; + }; + class RsyncServerHandle : public net::ServerHandle { + public: + void FdClosedHandle(int fd, const std::string& ip_port) const override; + void FdTimeoutHandle(int fd, const std::string& ip_port) const override; + bool AccessHandle(int fd, std::string& ip) const override; + void CronHandle() const override; + }; + private: + RsyncServerConnFactory conn_factory_; + RsyncServerHandle handle_; +}; + +class RsyncReader { + public: + RsyncReader() { + block_data_ = new char[kBlockSize]; + } + ~RsyncReader() { + if (!filepath_.empty()) { + Reset(); + } + delete []block_data_; + } + pstd::Status Read(const std::string filepath, const size_t offset, + const size_t count, char* data, size_t* bytes_read, + std::string* checksum, bool* is_eof) { + std::lock_guard guard(mu_); + pstd::Status s = readAhead(filepath, offset); + if (!s.ok()) { + return s; + } + size_t offset_in_block = offset % kBlockSize; + size_t copy_count = count > (end_offset_ - offset) ? end_offset_ - offset : count; + memcpy(data, block_data_ + offset_in_block, copy_count); + *bytes_read = copy_count; + *is_eof = (offset + copy_count == total_size_); + return pstd::Status::OK(); + } + +private: + pstd::Status readAhead(const std::string filepath, const size_t offset) { + if (filepath == filepath_ && offset >= start_offset_ && offset < end_offset_) { + return pstd::Status::OK(); + } + if (filepath != filepath_) { + Reset(); + fd_ = open(filepath.c_str(), O_RDONLY); + if (fd_ < 0) { + LOG(ERROR) << "open file [" << filepath << "] failed! error: " << strerror(errno); + return pstd::Status::IOError("open file [" + filepath + "] failed! error: " + strerror(errno)); + } + filepath_ = filepath; + struct stat buf; + stat(filepath.c_str(), &buf); + total_size_ = buf.st_size; + } + start_offset_ = (offset / kBlockSize) * kBlockSize; + + size_t read_offset = start_offset_; + size_t read_count = kBlockSize > (total_size_ - read_offset) ? (total_size_ - read_offset) : kBlockSize; + ssize_t bytesin = 0; + char* ptr = block_data_; + while ((bytesin = pread(fd_, ptr, read_count, read_offset)) > 0) { + read_count -= bytesin; + read_offset += bytesin; + ptr += bytesin; + if (read_count <= 0) { + break; + } + } + if (bytesin < 0) { + LOG(ERROR) << "unable to read from " << filepath << ". error: " << strerror(errno); + Reset(); + return pstd::Status::IOError("unable to read from " + filepath + ". error: " + strerror(errno)); + } + end_offset_ = start_offset_ + (ptr - block_data_); + return pstd::Status::OK(); + } + void Reset() { + total_size_ = -1; + start_offset_ = 0xFFFFFFFF; + end_offset_ = 0xFFFFFFFF; + memset(block_data_, 0, kBlockSize); + md5_.reset(new pstd::MD5()); + filepath_ = ""; + close(fd_); + fd_ = -1; + } + + private: + std::mutex mu_; + const size_t kBlockSize = 16 << 20; + + char* block_data_; + size_t start_offset_ = -1; + size_t end_offset_ = -1; + size_t total_size_ = -1; + + int fd_ = -1; + std::string filepath_; + std::unique_ptr md5_; +}; + +} //end namespace rsync +#endif + diff --git a/tools/pika_migrate/include/throttle.h b/tools/pika_migrate/include/throttle.h new file mode 100644 index 0000000000..73184d6c29 --- /dev/null +++ b/tools/pika_migrate/include/throttle.h @@ -0,0 +1,45 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef THROTTLE_H_ +#define THROTTLE_H_ + +#include +#include "pstd/include/pstd_mutex.h" +#include "pika_conf.h" + +extern std::unique_ptr g_pika_conf; + +namespace rsync { +class Throttle { + public: + Throttle() {} + Throttle(size_t throttle_throughput_bytes, size_t check_cycle); + ~Throttle(); + + void ResetThrottleThroughputBytes(size_t new_throughput_bytes_per_s) { + throttle_throughput_bytes_.store(new_throughput_bytes_per_s); + }; + size_t ThrottledByThroughput(size_t bytes); + void ReturnUnusedThroughput(size_t acquired, size_t consumed, size_t elaspe_time_us); + static Throttle& GetInstance() { + static Throttle instance(g_pika_conf->throttle_bytes_per_second(), 10); + return instance; + } +private: + std::atomic throttle_throughput_bytes_ = 100 * 1024 * 1024; + std::atomic last_throughput_check_time_us_; + std::atomic cur_throughput_bytes_; + // check cycles of throughput per second + size_t check_cycle_ = 10; + pstd::Mutex keys_mutex_; + size_t caculate_check_time_us_(int64_t current_time_us, int64_t check_cycle) { + size_t base_aligning_time_us = 1000 * 1000 / check_cycle; + return current_time_us / base_aligning_time_us * base_aligning_time_us; + } +}; +} // end namespace rsync +#endif + diff --git a/tools/pika_migrate/pika-migrate.md b/tools/pika_migrate/pika-migrate.md new file mode 100644 index 0000000000..9236cce658 --- /dev/null +++ b/tools/pika_migrate/pika-migrate.md @@ -0,0 +1,43 @@ +## Pika3.5到Redis迁移工具 + +### 适用版本: +Pika 3.5-4.0, 单机模式且只支持单db + +### 功能 +将Pika中的数据在线迁移到Pika、Redis(支持全量、增量同步) + +### 开发背景: +之前Pika项目官方提供的pika\_to\_redis工具仅支持离线将Pika的DB中的数据迁移到Pika、Redis, 且无法增量同步, 该工具实际上就是一个特殊的Pika, 只不过成为从库之后, 内部会将从主库获取到的数据转发给Redis,同时并支持增量同步, 实现热迁功能. + +### 热迁原理 +1. pika-port通过dbsync请求获取主库当前全量db数据, 以及当前db数据所对应的binlog点位 +2. 获取到主库当前全量db数据之后, 扫描db, 将db中的数据转发给Redis +3. 通过之前获取的binlog的点位向主库进行增量同步, 在增量同步的过程中, 将从主库获取到的binlog重组成Redis命令, 转发给Redis + +### 新增配置项 +```cpp +################### +## Migrate Settings +################### + +target-redis-host : 127.0.0.1 +target-redis-port : 6379 +target-redis-pwd : abc + +sync-batch-num : 100 +redis-sender-num : 10 +``` + +### 步骤 +1. 考虑到在pika-port在将全量数据写入到Redis这段时间可能耗时很长, 导致主库原先binlog点位已经被清理, 我们首先在主库上执行`config set expire-logs-nums 10000`, 让主库保留10000个Binlog文件(Binlog文件占用磁盘空间, 可以根据实际情况确定保留binlog的数量), 确保后续该工具请求增量同步的时候, 对应的Binlog文件还存在. +2. 修改该工具配置文件的`target-redis-host, target-redis-port, target-redis-pwd, sync-batch-num, redis-sender-num`配置项(`sync-batch-num`是该工具接收到主库的全量数据之后, 为了提升转发效率, 将`sync-batch-num`个数据一起打包发送给Redis, 此外该工具内部可以指定`redis-sender-num`个线程用于转发命令, 命令通过Key的哈希值被分配到不同的线程中, 所以无需担心多线程发送导致的数据错乱的问题) +3. 使用`pika -c pika.conf`命令启动该工具, 查看日志是否有报错信息 +4. 向该工具执行`slaveof ip port force`向主库请求同步, 观察是否有报错信息 +5. 在确认主从关系建立成功之后(此时pika-port同时也在向目标Redis转发数据了)通过向主库执行`info Replication`查看主从同步延迟(可在主库写入一个特殊的Key, 然后看在Redis测是否可以立马获取到, 来判断是否数据已经基本同步完毕) + +### 注意事项 +1. Pika支持不同数据结构采用同名Key, 但是Redis不支持, 所以在有同Key数据的场景下, 以第一个迁移到Redis数据结构为准, 其他同Key数据结构会丢失 +2. 该工具只支持热迁移单机模式下, 并且只采用单DB版本的Pika, 如果是集群模式, 或者是多DB场景, 工具会报错并且退出. +3. 为了避免由于主库Binlog被清理导致该工具触发多次全量同步向Redis写入脏数据, 工具自身做了保护, 在第二次触发全量同步时会报错退出. + + diff --git a/tools/pika_migrate/protogen.cmake b/tools/pika_migrate/protogen.cmake new file mode 100644 index 0000000000..895a15b175 --- /dev/null +++ b/tools/pika_migrate/protogen.cmake @@ -0,0 +1,41 @@ +function(CUSTOM_PROTOBUF_GENERATE_CPP SRCS HDRS) + if (NOT ARGN) + message(SEND_ERROR "Error: CUSTOM_PROTOBUF_GENERATE_CPP() called without any proto files") + return() + endif () + + # Create an include path for each file specified + foreach (FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + get_filename_component(ABS_PATH ${ABS_FIL} PATH) + list(FIND _protobuf_include_path ${ABS_PATH} _contains_already) + if (${_contains_already} EQUAL -1) + list(APPEND _protobuf_include_path -I ${ABS_PATH}) + endif () + endforeach () + + set(${SRCS}) + set(${HDRS}) + foreach (FIL ${ARGN}) + get_filename_component(ABS_FIL ${FIL} ABSOLUTE) + get_filename_component(FIL_WE ${FIL} NAME_WE) + + list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc") + list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h") + + execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}) + + add_custom_command( + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.cc" + "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.pb.h" + COMMAND ${PROTOBUF_PROTOC} + ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} ${_protobuf_include_path} ${ABS_FIL} + DEPENDS ${ABS_FIL} + COMMENT "Running C++ protocol buffer compiler on ${FIL}" + VERBATIM) + endforeach () + + set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE) + set(${SRCS} ${${SRCS}} PARENT_SCOPE) + set(${HDRS} ${${HDRS}} PARENT_SCOPE) +endfunction() \ No newline at end of file diff --git a/tools/pika_migrate/src/acl.cc b/tools/pika_migrate/src/acl.cc new file mode 100644 index 0000000000..dad50f73e6 --- /dev/null +++ b/tools/pika_migrate/src/acl.cc @@ -0,0 +1,1418 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "include/acl.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_server.h" +#include "pstd_defer.h" +#include "pstd_hash.h" + +extern PikaServer* g_pika_server; + +extern std::unique_ptr g_pika_cmd_table_manager; + +// class User +User::User(std::string name) : name_(std::move(name)) { + selectors_.emplace_back(std::make_shared(static_cast(AclSelectorFlag::ROOT))); +} + +User::User(const User& user) : name_(user.Name()) { + flags_ = user.flags_.load(); + passwords_ = user.passwords_; + aclString_ = user.aclString_; + for (const auto& item : user.selectors_) { + selectors_.emplace_back(std::make_shared(*item)); + } +} + +std::string User::Name() const { return name_; } + +void User::CleanAclString() { aclString_.clear(); } + +void User::AddPassword(const std::string& password) { passwords_.insert(password); } + +void User::RemovePassword(const std::string& password) { passwords_.erase(password); } + +void User::CleanPassword() { passwords_.clear(); } + +void User::AddSelector(const std::shared_ptr& selector) { selectors_.push_back(selector); } + +pstd::Status User::SetUser(const std::vector& rules) { + std::unique_lock wl(mutex_); + + for (const auto& rule : rules) { + auto status = SetUser(rule); + if (!status.ok()) { + LOG(ERROR) << "SetUser rule:" << rule << status.ToString(); + return status; + } + } + + return pstd::Status::OK(); +} + +pstd::Status User::SetUser(const std::string& op) { + CleanAclString(); + if (op.empty()) { + return pstd::Status::OK(); + } + if (!strcasecmp(op.data(), "on")) { + AddFlags(static_cast(AclUserFlag::ENABLED)); + DecFlags(static_cast(AclUserFlag::DISABLED)); + } else if (!strcasecmp(op.data(), "off")) { + AddFlags(static_cast(AclUserFlag::DISABLED)); + DecFlags(static_cast(AclUserFlag::ENABLED)); + } else if (!strcasecmp(op.data(), "nopass")) { + AddFlags(static_cast(AclUserFlag::NO_PASS)); + CleanPassword(); + } else if (!strcasecmp(op.data(), "resetpass")) { + DecFlags(static_cast(AclUserFlag::NO_PASS)); + CleanPassword(); + } else if (op[0] == '>' || op[0] == '#') { + std::string newpass; + if (op[0] == '>') { + newpass = pstd::sha256(op.data() + 1); + } else { + if (!pstd::isSha256(op.data() + 1)) { + return pstd::Status::Error("password not sha256"); + } + newpass = op.data() + 1; + } + AddPassword(newpass); + DecFlags(static_cast(AclUserFlag::NO_PASS)); + } else if (op[0] == '<' || op[0] == '!') { + std::string delpass; + if (op[0] == '<') { + delpass = pstd::sha256(op.data() + 1); + } else { + if (!pstd::isSha256(op.data() + 1)) { + return pstd::Status::Error("password not sha256"); + } + delpass = op.data() + 1; + } + // passwords_.erase(delpass); + RemovePassword(delpass); + } else if (op[0] == '(' && op[op.size() - 1] == ')') { + auto status = CreateSelectorFromOpSet(op); + if (!status.ok()) { + return status; + } + } else if (!strcasecmp(op.data(), "clearselectors")) { + selectors_.clear(); + return pstd::Status::OK(); + } else if (!strcasecmp(op.data(), "reset")) { + auto status = SetUser("resetpass"); + if (!status.ok()) { + return status; + } + status = SetUser("resetkeys"); + if (!status.ok()) { + return status; + } + status = SetUser("resetchannels"); + if (!status.ok()) { + return status; + } + if (g_pika_conf->acl_pubsub_default() & static_cast(AclSelectorFlag::ALL_CHANNELS)) { + status = SetUser("allchannels"); + if (!status.ok()) { + return status; + } + } + status = SetUser("off"); + if (!status.ok()) { + return status; + } + status = SetUser("-@all"); + if (!status.ok()) { + return status; + } + } else { + auto root = GetRootSelector(); + if (!root) { // does not appear under normal circumstances + LOG(ERROR) << "set user:" << Name() << " not find root selector"; + return pstd::Status::Error("set user error,See pika log for details"); + } + auto status = root->SetSelector(op); + if (!status.ok()) { + return status; + } + } + + return pstd::Status::OK(); +} + +pstd::Status User::CreateSelectorFromOpSet(const std::string& opSet) { + auto selector = std::make_shared(); + auto status = selector->SetSelectorFromOpSet(opSet); + if (!status.ok()) { + return status; + } + AddSelector(selector); + return status; +} + +std::shared_ptr User::GetRootSelector() { + for (const auto& item : selectors_) { + if (item->HasFlags(static_cast(AclSelectorFlag::ROOT))) { + return item; + } + } + return nullptr; +} + +void User::DescribeUser(std::string* str) { + std::unique_lock wl(mutex_); + + if (!aclString_.empty()) { + str->append(aclString_); + return; + } + + // flag + for (const auto& item : Acl::UserFlags) { + if (HasFlags(item.second)) { + aclString_ += " "; + aclString_ += item.first; + } + } + + // password + for (const auto& item : passwords_) { + aclString_ += " #" + item; + } + + // selector + std::string selectorStr; + for (const auto& item : selectors_) { + selectorStr.clear(); + item->ACLDescribeSelector(&selectorStr); + + if (item->HasFlags(static_cast(AclSelectorFlag::ROOT))) { + aclString_ += selectorStr; + } else { + aclString_ += fmt::format(" ({})", selectorStr.data() + 1); + } + } + + str->append(aclString_); +} + +bool User::MatchPassword(const std::string& password) { + std::shared_lock l(mutex_); + return passwords_.find(password) != passwords_.end(); +} + +void User::GetUserDescribe(CmdRes* res) { + std::shared_lock l(mutex_); + + res->AppendArrayLen(12); + + res->AppendString("flags"); + std::vector vector; + for (const auto& item : Acl::UserFlags) { + if (HasFlags(item.second)) { + vector.emplace_back(item.first); + } + } + res->AppendStringVector(vector); + + vector.clear(); + res->AppendString("passwords"); + for (const auto& item : passwords_) { + vector.emplace_back(item); + } + res->AppendStringVector(vector); + + size_t i = 0; + for (const auto& selector : selectors_) { + vector.clear(); + if (i == 0) { // root selector + selector->ACLDescribeSelector(vector); + for (const auto& item : vector) { + res->AppendString(item); + } + + res->AppendString("selectors"); + if (selectors_.size() == 1) { + res->AppendArrayLen(0); + } + ++i; + continue; + } + if (i == 1) { + res->AppendArrayLen(static_cast(selectors_.size()) - 1); + } + selector->ACLDescribeSelector(vector); + res->AppendStringVector(vector); + ++i; + } +} + +AclDeniedCmd User::CheckUserPermission(std::shared_ptr& cmd, const PikaCmdArgsType& argv, int8_t& subCmdIndex, + std::string* errKey) { + std::shared_lock l(mutex_); + + subCmdIndex = -1; + if (cmd->HasSubCommand()) { + subCmdIndex = cmd->SubCmdIndex(argv[1]); + if (subCmdIndex < 0) { + return AclDeniedCmd::NO_SUB_CMD; + } + } + auto keys = cmd->current_key(); + AclDeniedCmd res = AclDeniedCmd::OK; + for (const auto& selector : selectors_) { + res = selector->CheckCanExecCmd(cmd, subCmdIndex, keys, errKey); + if (res == AclDeniedCmd::OK) { + return AclDeniedCmd::OK; + } + } + return res; +} + +std::vector User::AllChannelKey() { + std::vector result; + for (const auto& selector : selectors_) { + for (const auto& item : selector->channels_) { + result.emplace_back(item); + } + } + return result; +} +// class User end + +// class Acl +pstd::Status Acl::Initialization() { + AddUser(CreateDefaultUser()); + UpdateDefaultUserPassword(g_pika_conf->requirepass()); + + auto status = LoadUsersAtStartup(); + auto u = GetUser(DefaultLimitUser); + bool limit_exist = true; + if (nullptr == u) { + AddUser(CreatedUser(DefaultLimitUser)); + limit_exist = false; + } + InitLimitUser(g_pika_conf->GetUserBlackList(), limit_exist); + + if (!status.ok()) { + return status; + } + return status; +} + +std::shared_ptr Acl::GetUser(const std::string& userName) { + auto u = users_.find(userName); + if (u == users_.end()) { + return nullptr; + } + return u->second; +} + +std::shared_ptr Acl::GetUserLock(const std::string& userName) { + std::shared_lock rl(mutex_); + auto u = users_.find(userName); + if (u == users_.end()) { + return nullptr; + } + return u->second; +} + +void Acl::AddUser(const std::shared_ptr& user) { users_[user->Name()] = user; } + +void Acl::AddUserLock(const std::shared_ptr& user) { + std::unique_lock wl(mutex_); + users_[user->Name()] = user; +} + +pstd::Status Acl::LoadUsersAtStartup() { + if (!g_pika_conf->users().empty() && !g_pika_conf->acl_file().empty()) { + return pstd::Status::NotSupported("Only one configuration file and acl file can be used", ""); + } + + if (g_pika_conf->users().empty()) { + return LoadUserFromFile(g_pika_conf->acl_file()); + } else { + return LoadUserConfigured(g_pika_conf->users()); + } +} + +pstd::Status Acl::LoadUserConfigured(std::vector& users) { + std::vector userRules; + for (const auto& item : users) { + userRules.clear(); + pstd::StringSplit(item, ' ', userRules); + if (userRules.size() < 2) { + return pstd::Status::Error("acl from configuration file read rules error"); + } + auto user = GetUser(userRules[0]); + if (user) { + if (user->Name() != DefaultUser) { // only `default` users are allowed to repeat + return pstd::Status::Error("acl user: " + user->Name() + " is repeated"); + } else { + user->SetUser("reset"); + } + } else { + user = CreatedUser(userRules[0]); + } + std::vector aclArgc; + auto subRule = std::vector(userRules.begin() + 1, userRules.end()); + ACLMergeSelectorArguments(subRule, &aclArgc); + + for (const auto& rule : aclArgc) { + auto status = user->SetUser(rule); + if (!status.ok()) { + LOG(ERROR) << "load user from configured file error," << status.ToString(); + return status; + } + } + AddUser(user); + } + + return pstd::Status().OK(); +} + +pstd::Status Acl::LoadUserFromFile(std::set* toUnAuthUsers) { + std::unique_lock wl(mutex_); + + for (const auto& item : users_) { + if (item.first != DefaultUser) { + toUnAuthUsers->insert(item.first); + } + } + + auto status = LoadUserFromFile(g_pika_conf->acl_file()); + if (!status.ok()) { + return status; + } + + return status; +} + +pstd::Status Acl::LoadUserFromFile(const std::string& fileName) { + if (fileName.empty()) { + return pstd::Status::OK(); + } + + std::map> users; + std::vector rules; + + bool hasDefaultUser = false; + + std::ifstream ruleFile(fileName); + if (!ruleFile) { + return pstd::Status::IOError(fmt::format("open file {} fail"), fileName); + } + + DEFER { ruleFile.close(); }; + + int lineNum = 0; + std::string lineContent; + while (std::getline(ruleFile, lineContent)) { + ++lineNum; + if (lineContent.empty()) { + continue; + } + + lineContent = pstd::StringTrim(lineContent, "\r\n "); + rules.clear(); + pstd::StringSplit(lineContent, ' ', rules); + if (rules.empty()) { + continue; + } + + if (rules[0] != "user" || rules.size() < 2) { + LOG(ERROR) << fmt::format("load user from acl file,line:{} '{}' illegal", lineNum, lineContent); + return pstd::Status::Error(fmt::format("line:{} '{}' illegal", lineNum, lineContent)); + } + + auto user = users.find(rules[1]); + if (user != users.end()) { + // if user is exists, exit + auto err = fmt::format("Duplicate user '{}' found on line {}.", rules[1], lineNum); + LOG(ERROR) << err; + return pstd::Status::Error(err); + } + + std::vector aclArgc; + auto subRule = std::vector(rules.begin() + 2, rules.end()); + ACLMergeSelectorArguments(subRule, &aclArgc); + + auto u = CreatedUser(rules[1]); + for (const auto& item : aclArgc) { + auto status = u->SetUser(item); + if (!status.ok()) { + LOG(ERROR) << "load user from acl file error," << status.ToString(); + return status; + } + } + if (rules[1] == DefaultUser) { + hasDefaultUser = true; + } + users[rules[1]] = u; + } + + if (!hasDefaultUser) { + users[DefaultUser] = GetUser(DefaultUser); + } + + users_ = std::move(users); + + return pstd::Status().OK(); +} + +void Acl::UpdateDefaultUserPassword(const std::string& pass) { + std::unique_lock wl(mutex_); + auto u = GetUser(DefaultUser); + u->SetUser("resetpass"); + if (pass.empty()) { + u->SetUser("nopass"); + } else { + if (g_pika_conf->userpass().empty()) { + u->SetUser("nopass"); + } else { + u->SetUser(">" + pass); + } + } +} + +void Acl::InitLimitUser(const std::string& bl, bool limit_exist) { + auto pass = g_pika_conf->userpass(); + std::vector blacklist; + pstd::StringSplit(bl, ',', blacklist); + std::unique_lock wl(mutex_); + auto u = GetUser(DefaultLimitUser); + if (limit_exist) { + if (!bl.empty()) { + for (auto& cmd : blacklist) { + cmd = pstd::StringTrim(cmd, " "); + u->SetUser("-" + cmd); + } + u->SetUser("on"); + } + if (!pass.empty()) { + u->SetUser(">" + pass); + } + } else { + if (pass.empty()) { + u->SetUser("nopass"); + } else { + u->SetUser(">" + pass); + } + u->SetUser("on"); + u->SetUser("+@all"); + u->SetUser("~*"); + u->SetUser("&*"); + + for (auto& cmd : blacklist) { + cmd = pstd::StringTrim(cmd, " "); + u->SetUser("-" + cmd); + } + } +} +// bool Acl::CheckUserCanExec(const std::shared_ptr& cmd, const PikaCmdArgsType& argv) { cmd->name(); } + +std::shared_ptr Acl::CreateDefaultUser() { + auto defaultUser = std::make_shared(DefaultUser); + defaultUser->SetUser("+@all"); + defaultUser->SetUser("~*"); + defaultUser->SetUser("&*"); + defaultUser->SetUser("on"); + defaultUser->SetUser("nopass"); + return defaultUser; +} + +std::shared_ptr Acl::CreatedUser(const std::string& name) { return std::make_shared(name); } + +pstd::Status Acl::SetUser(const std::string& userName, std::vector& op) { + auto user = GetUserLock(userName); + + std::shared_ptr tempUser = nullptr; + bool add = false; + if (!user) { // if the user not exist, create new user + user = CreatedUser(userName); + add = true; + } else { + tempUser = std::make_shared(*user); + } + + std::vector aclArgc; + ACLMergeSelectorArguments(op, &aclArgc); + + auto status = user->SetUser(aclArgc); + if (!status.ok()) { + return status; + } + + if (add) { + AddUserLock(user); + } else { + KillPubsubClientsIfNeeded(tempUser, user); + } + return pstd::Status::OK(); +} + +void Acl::KillPubsubClientsIfNeeded(const std::shared_ptr& origin, const std::shared_ptr& newUser) { + std::shared_lock l(mutex_); + bool match = true; + for (const auto& newUserSelector : newUser->selectors_) { + if (newUserSelector->HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { // new user has all channels + return; + } + } + auto newChKey = newUser->AllChannelKey(); + + for (const auto& selector : origin->selectors_) { + if (selector->HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + match = false; + break; + } + if (!selector->EqualChannel(newChKey)) { + match = false; + break; + } + } + if (match) { + return; + } + g_pika_server->CheckPubsubClientKill(newUser->Name(), newChKey); +} + +uint32_t Acl::GetCommandCategoryFlagByName(const std::string& name) { + for (const auto& item : CommandCategories) { + if (item.first == name) { + return item.second; + } + } + return 0; +} + +std::string Acl::GetCommandCategoryFlagByName(const uint32_t category) { + for (const auto& item : CommandCategories) { + if (item.second == category) { + return item.first; + } + } + + return ""; +} + +std::vector Acl::GetAllCategoryName() { + std::vector result; + result.reserve(CommandCategories.size()); + for (const auto& item : CommandCategories) { + result.emplace_back(item.first); + } + return result; +} + +void Acl::ACLMergeSelectorArguments(std::vector& argv, std::vector* merged) { + bool openBracketStart = false; + std::string selector; + for (const auto& item : argv) { + if (item[0] == '(' && item[item.size() - 1] != ')') { + selector = item; + openBracketStart = true; + continue; + } + + if (openBracketStart) { + selector += " " + item; + if (item[item.size() - 1] == ')') { + openBracketStart = false; + merged->emplace_back(selector); + } + continue; + } + + merged->emplace_back(item); + } +} + +std::shared_ptr Acl::Auth(const std::string& userName, const std::string& password) { + std::shared_lock l(mutex_); + + auto user = GetUser(userName); + if (!user) { + return nullptr; + } + if (user->HasFlags(static_cast(AclUserFlag::DISABLED))) { + return nullptr; + } + + if (user->HasFlags(static_cast(AclUserFlag::NO_PASS))) { + return user; + } + + if (user->MatchPassword(pstd::sha256(password))) { + return user; + } + return nullptr; +} + +std::vector Acl::Users() { + std::shared_lock l(mutex_); + std::vector result; + result.reserve(users_.size()); + + for (const auto& item : users_) { + result.emplace_back(item.first); + } + + return result; +} + +void Acl::DescribeAllUser(std::vector* content) { + std::shared_lock l(mutex_); + content->reserve(users_.size()); + + for (const auto& item : users_) { + std::string saveContent; + saveContent += "user "; + saveContent += item.first; + + item.second->DescribeUser(&saveContent); + content->emplace_back(saveContent); + } +} + +pstd::Status Acl::SaveToFile() { + std::string aclFileName = g_pika_conf->acl_file(); + if (aclFileName.empty()) { + LOG(ERROR) << "save user to acl file, file name is empty"; + return pstd::Status::Error("acl file name is empty"); + } + + std::unique_lock wl(mutex_); + + std::unique_ptr file; + const std::string tmpFile = aclFileName + ".tmp"; + auto status = pstd::NewWritableFile(tmpFile, file); + if (!status.ok()) { + auto error = fmt::format("open acl user file:{} fail, error:{}", aclFileName, status.ToString()); + LOG(ERROR) << error; + return pstd::Status::Error(error); + } + + std::string saveContent; + for (const auto& item : users_) { + saveContent += "user "; + saveContent += item.first; + + item.second->DescribeUser(&saveContent); + saveContent += "\n"; + } + + file->Append(saveContent); + file->Sync(); + file->Close(); + + if (pstd::RenameFile(tmpFile, aclFileName) < 0) { // rename fail + return pstd::Status::Error("save acl rule to file fail. specific information see pika log"); + } + return pstd::Status::OK(); +} + +std::set Acl::DeleteUser(const std::vector& userNames) { + std::unique_lock wl(mutex_); + + std::set delUserNames; + for (const auto& userName : userNames) { + if (users_.erase(userName)) { + delUserNames.insert(userName); + } + } + + return delUserNames; +} + +std::array, 21> Acl::CommandCategories = {{ + {"keyspace", static_cast(AclCategory::KEYSPACE)}, + {"read", static_cast(AclCategory::READ)}, + {"write", static_cast(AclCategory::WRITE)}, + {"set", static_cast(AclCategory::SET)}, + {"sortedset", static_cast(AclCategory::SORTEDSET)}, + {"list", static_cast(AclCategory::LIST)}, + {"hash", static_cast(AclCategory::HASH)}, + {"string", static_cast(AclCategory::STRING)}, + {"bitmap", static_cast(AclCategory::BITMAP)}, + {"hyperloglog", static_cast(AclCategory::HYPERLOGLOG)}, + {"geo", static_cast(AclCategory::GEO)}, + {"stream", static_cast(AclCategory::STREAM)}, + {"pubsub", static_cast(AclCategory::PUBSUB)}, + {"admin", static_cast(AclCategory::ADMIN)}, + {"fast", static_cast(AclCategory::FAST)}, + {"slow", static_cast(AclCategory::SLOW)}, + {"blocking", static_cast(AclCategory::BLOCKING)}, + {"dangerous", static_cast(AclCategory::DANGEROUS)}, + {"connection", static_cast(AclCategory::CONNECTION)}, + {"transaction", static_cast(AclCategory::TRANSACTION)}, + {"scripting", static_cast(AclCategory::SCRIPTING)}, +}}; + +std::array, 3> Acl::UserFlags = {{ + {"on", static_cast(AclUserFlag::ENABLED)}, + {"off", static_cast(AclUserFlag::DISABLED)}, + {"nopass", static_cast(AclUserFlag::NO_PASS)}, +}}; + +std::array, 3> Acl::SelectorFlags = {{ + {"allkeys", static_cast(AclSelectorFlag::ALL_KEYS)}, + {"allchannels", static_cast(AclSelectorFlag::ALL_CHANNELS)}, + {"allcommands", static_cast(AclSelectorFlag::ALL_COMMANDS)}, +}}; + +const std::string Acl::DefaultUser = "default"; +const std::string Acl::DefaultLimitUser = "limit"; +const int64_t Acl::LogGroupingMaxTimeDelta = 60000; + +void Acl::AddLogEntry(int32_t reason, int32_t context, const std::string& username, const std::string& object, + const std::string& cInfo) { + int64_t nowUnix = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count(); + { + std::unique_lock wl(mutex_); + for (const auto& item : logEntries_) { + if (item->Match(reason, context, nowUnix, object, username)) { + item->AddEntry(cInfo, nowUnix); + return; + } + } + auto entry = std::make_unique(reason, context, object, username, nowUnix, cInfo); + logEntries_.push_front(std::move(entry)); + + auto maxLen = g_pika_conf->acl_log_max_len(); + if (logEntries_.size() > maxLen) { // remove overflow log + if (maxLen == 0) { + logEntries_.clear(); + } else { + logEntries_.erase(std::next(logEntries_.begin(), maxLen), logEntries_.end()); + } + } + } +} + +void Acl::GetLog(long count, CmdRes* res) { + std::shared_lock rl(mutex_); + auto size = static_cast(logEntries_.size()); + if (count == -1) { + count = size; + } + if (count > size) { + count = size; + } + if (count == 0) { + res->AppendArrayLen(0); + return; + } + + std::vector items; + res->AppendArrayLen(static_cast(count)); + items.reserve(14); + for (const auto& item : logEntries_) { + items.clear(); + item->GetReplyInfo(&items); + res->AppendStringVector(items); + count--; + if (count == 0) { + break; + } + } +} + +void Acl::ResetLog() { + std::unique_lock wl(mutex_); + logEntries_.clear(); +} +// class Acl end + +// class ACLLogEntry +bool ACLLogEntry::Match(int32_t reason, int32_t context, int64_t ctime, const std::string& object, + const std::string& username) { + if (reason_ != reason) { + return false; + } + if (context_ != context) { + return false; + } + auto delta = ctime_ - ctime; + if (delta > Acl::LogGroupingMaxTimeDelta) { + return false; + }; + if (object_ != object) { + return false; + } + if (username_ != username) { + return false; + } + return true; +} + +void ACLLogEntry::AddEntry(const std::string& cinfo, u_int64_t ctime) { + cinfo_ = cinfo; + ctime_ = ctime; + ++count_; +} + +void ACLLogEntry::GetReplyInfo(std::vector* vector) { + vector->emplace_back("count"); + vector->emplace_back(std::to_string(count_)); + vector->emplace_back("reason"); + switch (reason_) { + case static_cast(AclDeniedCmd::CMD): + vector->emplace_back("command"); + break; + case static_cast(AclDeniedCmd::KEY): + vector->emplace_back("key"); + break; + case static_cast(AclDeniedCmd::CHANNEL): + vector->emplace_back("channel"); + break; + case static_cast(AclDeniedCmd::NO_AUTH): + vector->emplace_back("auth"); + break; + default: + vector->emplace_back("unknown"); + break; + } + + vector->emplace_back("context"); + switch (context_) { + case static_cast(AclLogCtx::TOPLEVEL): + vector->emplace_back("toplevel"); + break; + case static_cast(AclLogCtx::MULTI): + vector->emplace_back("multi"); + break; + case static_cast(AclLogCtx::LUA): + vector->emplace_back("lua"); + break; + default: + vector->emplace_back("unknown"); + break; + } + + vector->emplace_back("object"); + vector->emplace_back(object_); + vector->emplace_back("username"); + vector->emplace_back(username_); + vector->emplace_back("age-seconds"); + int64_t nowUnix = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count(); + + char latitude[32]; + pstd::d2string(latitude, 32, static_cast(nowUnix - ctime_) / 1000); + vector->emplace_back(latitude); + vector->emplace_back("client-info"); + vector->emplace_back(cinfo_); +} + +// class ACLLogEntry end + +// class AclSelector +AclSelector::AclSelector(uint32_t flag) : flags_(flag) { + if (g_pika_conf->acl_pubsub_default()) { + AddFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + } +} + +AclSelector::AclSelector(const AclSelector& selector) { + flags_ = selector.Flags(); + allowedCommands_ = selector.allowedCommands_; + subCommand_ = selector.subCommand_; + channels_ = selector.channels_; + commandRules_ = selector.commandRules_; + + for (const auto& item : selector.patterns_) { + auto pattern = std::make_shared(); + pattern->flags = item->flags; + pattern->pattern = item->pattern; + patterns_.emplace_back(pattern); + } +} + +pstd::Status AclSelector::SetSelector(const std::string& op) { + if (!strcasecmp(op.data(), "allkeys") || op == "~*") { + AddFlags(static_cast(AclSelectorFlag::ALL_KEYS)); + patterns_.clear(); + } else if (!strcasecmp(op.data(), "resetkeys")) { + DecFlags(static_cast(AclSelectorFlag::ALL_KEYS)); + patterns_.clear(); + } else if (!strcasecmp(op.data(), "allchannels") || !strcasecmp(op.data(), "&*")) { + AddFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + channels_.clear(); + } else if (!strcasecmp(op.data(), "resetchannels")) { + DecFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + channels_.clear(); + } else if (!strcasecmp(op.data(), "allcommands") || !strcasecmp(op.data(), "+@all")) { + SetAllCommandSelector(); + } else if (!strcasecmp(op.data(), "nocommands") || !strcasecmp(op.data(), "-@all")) { + RestAllCommandSelector(); + } else if (op[0] == '~' || op[0] == '%') { + if (HasFlags(static_cast(AclSelectorFlag::ALL_KEYS))) { + return pstd::Status::Error( + fmt::format("Error in ACL SETUSER modifier '{}': Adding a pattern after the * " + "pattern (or the 'allkeys' flag) is not valid and does not have any effect." + " Try 'resetkeys' to start with an empty list of patterns", + op)); + } + int flags = 0; + size_t offset = 1; + if (op[0] == '%') { + for (; offset < op.size(); offset++) { + if (toupper(op[offset]) == 'R' && !(flags & static_cast(AclPermission::READ))) { + flags |= static_cast(AclPermission::READ); + } else if (toupper(op[offset]) == 'W' && !(flags & static_cast(AclPermission::WRITE))) { + flags |= static_cast(AclPermission::WRITE); + } else if (op[offset] == '~') { + offset++; + break; + } else { + return pstd::Status::Error("Syntax error"); + } + } + } else { + flags = static_cast(AclPermission::ALL); + } + + if (pstd::isspace(op)) { + return pstd::Status::Error("Syntax error"); + } + + InsertKeyPattern(op.substr(offset, std::string::npos), flags); + DecFlags(static_cast(AclSelectorFlag::ALL_KEYS)); + } else if (op[0] == '&') { + if (HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + return pstd::Status::Error( + "Adding a pattern after the * pattern (or the 'allchannels' flag) is not valid and does not have any effect. " + "Try 'resetchannels' to start with an empty list of channels"); + } + if (pstd::isspace(op)) { + return pstd::Status::Error("Syntax error"); + } + InsertChannel(op.substr(1, std::string::npos)); + DecFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)); + } else if (op[0] == '+' && op[1] != '@') { + auto status = SetCommandOp(op, true); + if (!status.ok()) { + return status; + } + UpdateCommonRule(op.data() + 1, true); + } else if (op[0] == '-' && op[1] != '@') { + auto status = SetCommandOp(op, false); + if (!status.ok()) { + return status; + } + UpdateCommonRule(op.data() + 1, false); + } else if ((op[0] == '+' || op[0] == '-') && op[1] == '@') { + bool allow = op[0] == '+' ? true : false; + if (!SetSelectorCommandBitsForCategory(op.data() + 1, allow)) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + } else { + return pstd::Status::Error("Syntax error"); + } + return pstd::Status(); +} + +pstd::Status AclSelector::SetSelectorFromOpSet(const std::string& opSet) { + if (opSet[0] != '(' || opSet[opSet.size() - 1] != ')') { + return pstd::Status::Error("Unmatched parenthesis in acl selector starting at" + opSet); + } + + std::vector args; + pstd::StringSplit(opSet.substr(1, opSet.size() - 2), ' ', args); + + for (const auto& item : args) { + auto status = SetSelector(item); + if (!status.ok()) { + return status; + } + } + return pstd::Status().OK(); +} + +bool AclSelector::SetSelectorCommandBitsForCategory(const std::string& categoryName, bool allow) { + std::string lowerCategoryName(categoryName); + std::transform(categoryName.begin(), categoryName.end(), lowerCategoryName.begin(), ::tolower); + auto category = Acl::GetCommandCategoryFlagByName(lowerCategoryName.data() + 1); + if (!category) { // not find category + return false; + } + UpdateCommonRule(categoryName, allow); + for (const auto& cmd : *g_pika_cmd_table_manager->cmds_) { + if (cmd.second->AclCategory() & category) { // this cmd belongs to this category + ChangeSelector(cmd.second.get(), allow); + } + } + return true; +} + +void AclSelector::SetAllCommandSelector() { + AddFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + allowedCommands_.set(); + for (const auto& cmd : *g_pika_cmd_table_manager->cmds_) { + if (cmd.second->HasSubCommand()) { + SetSubCommand(cmd.second->GetCmdId()); + } + } + CleanCommandRule(); +} + +void AclSelector::RestAllCommandSelector() { + DecFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + allowedCommands_.reset(); + ResetSubCommand(); + CleanCommandRule(); +} + +void AclSelector::InsertKeyPattern(const std::string& str, uint32_t flags) { + for (const auto& item : patterns_) { + if (item->pattern == str) { + item->flags |= flags; + return; + } + } + auto pattern = std::make_shared(); + pattern->flags = flags; + pattern->pattern = str; + patterns_.emplace_back(pattern); + return; +} + +void AclSelector::InsertChannel(const std::string& str) { + for (const auto& item : channels_) { + if (item == str) { + return; + } + } + channels_.emplace_back(str); +} + +void AclSelector::ChangeSelector(const Cmd* cmd, bool allow) { + if (allow) { + allowedCommands_.set(cmd->GetCmdId()); + if (cmd->HasSubCommand()) { + SetSubCommand(cmd->GetCmdId()); + } + } else { + allowedCommands_.reset(cmd->GetCmdId()); + DecFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + if (cmd->HasSubCommand()) { + ResetSubCommand(cmd->GetCmdId()); + } + } +} + +void AclSelector::ChangeSelector(const std::shared_ptr& cmd, bool allow) { ChangeSelector(cmd.get(), allow); } + +pstd::Status AclSelector::ChangeSelector(const std::shared_ptr& cmd, const std::string& subCmd, bool allow) { + if (cmd->HasSubCommand()) { + auto index = cmd->SubCmdIndex(subCmd); + if (index == -1) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + if (allow) { + SetSubCommand(cmd->GetCmdId(), index); + } else { + ResetSubCommand(cmd->GetCmdId(), index); + } + } + return pstd::Status::OK(); +} + +void AclSelector::SetSubCommand(uint32_t cmdId) { subCommand_[cmdId] = 0xFFFFFFFF; } + +void AclSelector::SetSubCommand(uint32_t cmdId, uint32_t subCmdIndex) { subCommand_[cmdId] |= (1 << subCmdIndex); } + +void AclSelector::ResetSubCommand() { subCommand_.clear(); } + +void AclSelector::ResetSubCommand(uint32_t cmdId) { subCommand_[cmdId] = 0; } + +void AclSelector::ResetSubCommand(uint32_t cmdId, uint32_t subCmdIndex) { + DecFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)); + subCommand_[cmdId] &= ~(1 << subCmdIndex); +} + +bool AclSelector::CheckSubCommand(uint32_t cmdId, uint32_t subCmdIndex) { + if (subCmdIndex < 0) { + return false; + } + auto bit = subCommand_.find(cmdId); + if (bit == subCommand_.end()) { + return false; + } + + return bit->second & (1 << subCmdIndex); +} + +void AclSelector::ACLDescribeSelector(std::string* str) { + if (HasFlags(static_cast(AclSelectorFlag::ALL_KEYS))) { + str->append(" ~*"); + } else { + for (const auto& item : patterns_) { + str->append(" "); + item->ToString(str); + } + } + + // Pub/sub channel patterns + if (HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + str->append(" &*"); + } else if (channels_.empty()) { + str->append(" resetchannels"); + } else { + for (const auto& item : channels_) { + str->append(" &" + item); + } + } + + // Command rules + DescribeSelectorCommandRules(str); +} + +void AclSelector::ACLDescribeSelector(std::vector& vector) { + vector.emplace_back("commands"); + if (allowedCommands_.test(USER_COMMAND_BITS_COUNT - 1)) { + if (commandRules_.empty()) { + vector.emplace_back("+@all"); + } else { + vector.emplace_back("+@all " + commandRules_); + } + } else { + if (commandRules_.empty()) { + vector.emplace_back("-@all"); + } else { + vector.emplace_back("-@all " + commandRules_); + } + } + + vector.emplace_back("key"); + if (HasFlags(static_cast(AclSelectorFlag::ALL_KEYS))) { + vector.emplace_back("~*"); + } else if (patterns_.empty()) { + vector.emplace_back(""); + } else { + std::string keys; + for (auto it = patterns_.begin(); it != patterns_.end(); ++it) { + if (it != patterns_.begin()) { + keys += " "; + (*it)->ToString(&keys); + } + } + vector.emplace_back(keys); + } + + vector.emplace_back("channels"); + if (HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS))) { + vector.emplace_back("&*"); + } else if (channels_.empty()) { + vector.emplace_back(""); + } else if (channels_.size() == 1) { + vector.emplace_back("&" + channels_.front()); + } else { + vector.emplace_back(fmt::format("{}", fmt::join(channels_, " &"))); + } +} + +AclDeniedCmd AclSelector::CheckCanExecCmd(std::shared_ptr& cmd, int8_t subCmdIndex, + const std::vector& keys, std::string* errKey) { + if (!HasFlags(static_cast(AclSelectorFlag::ALL_COMMANDS)) && !(cmd->flag() & kCmdFlagsNoAuth)) { + if (subCmdIndex < 0) { + if (!allowedCommands_.test(cmd->GetCmdId())) { + return AclDeniedCmd::CMD; + } + } else { // if the command has subCmd + if (!CheckSubCommand(cmd->GetCmdId(), subCmdIndex)) { + return AclDeniedCmd::CMD; + } + } + } + + // key match + if (!HasFlags(static_cast(AclSelectorFlag::ALL_KEYS)) && !keys.empty() && !cmd->hasFlag(kCmdFlagsPubSub)) { + for (const auto& key : keys) { + // if the key is empty, skip, because some command keys for write categories are empty + if (!key.empty() && !CheckKey(key, cmd->flag())) { + if (errKey) { + *errKey = key; + } + return AclDeniedCmd::KEY; + } + } + } + + // channel match + if (!HasFlags(static_cast(AclSelectorFlag::ALL_CHANNELS)) && cmd->hasFlag(kCmdFlagsPubSub)) { + bool isPattern = cmd->name() == kCmdNamePSubscribe || cmd->name() == kCmdNamePUnSubscribe; + for (const auto& key : keys) { + if (!CheckChannel(key, isPattern)) { + if (errKey) { + *errKey = key; + } + return AclDeniedCmd::CHANNEL; + } + } + } + return AclDeniedCmd::OK; +} + +bool AclSelector::EqualChannel(const std::vector& allChannel) { + for (const auto& item : channels_) { + if (std::count(allChannel.begin(), allChannel.end(), item) == 0) { + return false; + } + } + return true; +} + +void AclSelector::DescribeSelectorCommandRules(std::string* str) { + allowedCommands_.test(USER_COMMAND_BITS_COUNT - 1) ? str->append(" +@all") : str->append(" -@all"); + + // Category + if (!commandRules_.empty()) { + str->append(" "); + str->append(commandRules_); + } +} + +pstd::Status AclSelector::SetCommandOp(const std::string& op, bool allow) { + std::string _op(op.data() + 1); + pstd::StringToLower(_op); + if (_op.find('|') == std::string::npos) { + auto cmd = g_pika_cmd_table_manager->GetCmd(_op); + if (!cmd) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + ChangeSelector(cmd, allow); + return pstd::Status::OK(); + } else { + /* Split the command and subcommand parts. */ + std::vector cmds; + pstd::StringSplit(_op, '|', cmds); + + /* The subcommand cannot be empty, so things like CONFIG| + * are syntax errors of course. */ + if (cmds.size() != 2) { + return pstd::Status::Error("Allowing first-arg of a subcommand is not supported"); + } + + auto parentCmd = g_pika_cmd_table_manager->GetCmd(cmds[0]); + if (!parentCmd) { + return pstd::Status::Error("Unknown command or category name in ACL"); + } + + return ChangeSelector(parentCmd, cmds[1], allow); + + // not support Redis ACL `first-arg` feature + } +} + +void AclSelector::UpdateCommonRule(const std::string& rule, bool allow) { + std::string _rule(rule); + pstd::StringToLower(_rule); + RemoveCommonRule(_rule); + if (commandRules_.empty()) { + commandRules_ += allow ? "+" : "-"; + } else { + commandRules_ += allow ? " +" : " -"; + } + commandRules_ += _rule; +} + +void AclSelector::RemoveCommonRule(const std::string& rule) { + if (commandRules_.empty()) { + return; + } + + const size_t ruleLen = rule.size(); + + size_t start = 0; + while (true) { + start = commandRules_.find(rule, start); + if (start == std::string::npos) { + return; + } + + size_t delNum = 0; // the length to be deleted this time + if (start + ruleLen >= commandRules_.size()) { // the remaining commandRule == rule, delete to end + delNum = ruleLen; + --start; + ++delNum; + } else { + if (commandRules_[start + ruleLen] == ' ') { + delNum = ruleLen + 1; + } else if (commandRules_[start + ruleLen] == '|') { + size_t end = commandRules_.find(' ', start); // find next ' ' + if (end == std::string::npos) { // not find ' ', delete to end + delNum = commandRules_.size() - start; + --start; + ++delNum; + } else { + delNum = end + 1 - start; + } + } else { + start += ruleLen; + continue; // not match + } + } + + if (start > 0) { // the rule not included '-'/'+', but need delete need + --start; + ++delNum; // star position moved one forward So delNum takes +1 + } + + commandRules_.erase(start, delNum); + } +} + +void AclSelector::CleanCommandRule() { commandRules_.clear(); } + +bool AclSelector::CheckKey(const std::string& key, const uint32_t cmdFlag) { + uint32_t selectorFlag = 0; + if (cmdFlag & kCmdFlagsRead) { + selectorFlag |= static_cast(AclPermission::READ); + } + if (cmdFlag & kCmdFlagsWrite) { + selectorFlag |= static_cast(AclPermission::WRITE); + } + if ((selectorFlag & static_cast(AclPermission::WRITE)) && + (selectorFlag & static_cast(AclPermission::READ))) { + selectorFlag |= static_cast(AclPermission::ALL); + } + + for (const auto& item : patterns_) { + if ((item->flags & selectorFlag) != selectorFlag) { + continue; + } + + if (pstd::stringmatchlen(item->pattern.data(), static_cast(item->pattern.size()), key.data(), + static_cast(key.size()), 0)) { + return true; + } + } + return false; +} + +bool AclSelector::CheckChannel(const std::string& key, bool isPattern) { + for (const auto& channel : channels_) { + if (isPattern ? (channel == key) + : (pstd::stringmatchlen(channel.data(), static_cast(channel.size()), key.data(), + static_cast(key.size()), 0))) { + return true; + } + } + return false; +} +// class AclSelector end \ No newline at end of file diff --git a/tools/pika_migrate/src/build_version.cc.in b/tools/pika_migrate/src/build_version.cc.in new file mode 100644 index 0000000000..1d341ef321 --- /dev/null +++ b/tools/pika_migrate/src/build_version.cc.in @@ -0,0 +1,8 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +const char* pika_build_git_sha = + "pika_git_sha:@PIKA_GIT_SHA@"; +const char* pika_build_compile_date = "@PIKA_BUILD_DATE@"; diff --git a/tools/pika_migrate/src/cache/CMakeLists.txt b/tools/pika_migrate/src/cache/CMakeLists.txt new file mode 100644 index 0000000000..e61b2eacdc --- /dev/null +++ b/tools/pika_migrate/src/cache/CMakeLists.txt @@ -0,0 +1,20 @@ +cmake_minimum_required (VERSION 3.18) + +set (CMAKE_CXX_STANDARD 17) +project (cache) + +aux_source_directory(./src DIR_SRCS) +include_directories(include) +add_library(cache STATIC ${DIR_SRCS}) +add_dependencies(cache net protobuf glog gflags rediscache ${LIBUNWIND_NAME}) + +target_link_libraries(cache + PUBLIC ${GTEST_LIBRARY} + PUBLIC pstd + PUBLIC ${ROCKSDB_LIBRARY} + PUBLIC storage + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + PUBLIC ${REDISCACHE_LIBRARY} + ) \ No newline at end of file diff --git a/tools/pika_migrate/src/cache/include/cache.h b/tools/pika_migrate/src/cache/include/cache.h new file mode 100644 index 0000000000..5f5d6a2959 --- /dev/null +++ b/tools/pika_migrate/src/cache/include/cache.h @@ -0,0 +1,179 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#ifndef __CACHE_H__ +#define __CACHE_H__ + +#include + +#include +#include +#include +#include +#include +#include + +extern "C" { + #include "rediscache/redis.h" +} + +#include "config.h" +#include "pstd_status.h" +#include "storage/storage.h" + +namespace cache { + +using Status = rocksdb::Status; + +class RedisCache { +public: + RedisCache(); + ~RedisCache(); + + // Server APIs + static void SetConfig(CacheConfig *cfg); + static uint64_t GetUsedMemory(void); + static void GetHitAndMissNum(int64_t *hits, int64_t *misses); + static void ResetHitAndMissNum(void); + Status Open(void); + int32_t ActiveExpireCycle(void); + + // Normal Commands + bool Exists(std::string& key); + int64_t DbSize(void); + void FlushCache(void); + + Status Del(const std::string& key); + Status Expire(std::string& key, int64_t ttl); + Status Expireat(std::string& key, int64_t ttl); + Status TTL(std::string& key, int64_t *ttl); + Status Persist(std::string& key); + Status Type(std::string& key, std::string *value); + Status RandomKey(std::string *key); + + // String Commands + Status Set(std::string& key, std::string &value, int64_t ttl); + Status SetWithoutTTL(std::string& key, std::string &value); + Status Setnx(std::string& key, std::string &value, int64_t ttl); + Status SetnxWithoutTTL(std::string& key, std::string &value); + Status Setxx(std::string& key, std::string &value, int64_t ttl); + Status SetxxWithoutTTL(std::string& key, std::string &value); + Status Get(const std::string& key, std::string *value); + Status Incr(std::string& key); + Status Decr(std::string& key); + Status IncrBy(std::string& key, int64_t incr); + Status DecrBy(std::string& key, int64_t incr); + Status Incrbyfloat(std::string& key, double incr); + Status Append(std::string& key, std::string &value); + Status GetRange(std::string& key, int64_t start, int64_t end, std::string *value); + Status SetRange(std::string& key, int64_t start, std::string &value); + Status Strlen(std::string& key, int32_t *len); + + // Hash Commands + Status HDel(std::string& key, std::vector &fields); + Status HSet(std::string& key, std::string &field, std::string &value); + Status HSetnx(std::string& key, std::string &field, std::string &value); + Status HMSet(std::string& key, std::vector &fvs); + Status HGet(std::string& key, std::string &field, std::string *value); + Status HMGet(std::string& key, + std::vector &fields, + std::vector* vss); + Status HGetall(std::string& key, std::vector *fvs); + Status HKeys(std::string& key, std::vector *fields); + Status HVals(std::string& key, std::vector *values); + Status HExists(std::string& key, std::string &field); + Status HIncrby(std::string& key, std::string &field, int64_t value); + Status HIncrbyfloat(std::string& key, std::string &field, double value); + Status HLen(const std::string& key, uint64_t *len); + Status HStrlen(std::string& key, std::string &field, uint64_t *len); + + // List Commands + Status LIndex(std::string& key, int64_t index, std::string *element); + Status LInsert(std::string& key, storage::BeforeOrAfter &before_or_after, + std::string &pivot, std::string &value); + Status LLen(const std::string& key, uint64_t *len); + Status LPop(std::string& key, std::string *element); + Status LPush(std::string& key, std::vector &values); + Status LPushx(std::string& key, std::vector &values); + Status LRange(std::string& key, int64_t start, int64_t stop, std::vector *values); + Status LRem(std::string& key, int64_t count, std::string &value); + Status LSet(std::string& key, int64_t index, std::string &value); + Status LTrim(std::string& key, int64_t start, int64_t stop); + Status RPop(std::string& key, std::string *element); + Status RPush(std::string& key, std::vector &values); + Status RPushx(std::string& key, std::vector &values); + + // Set Commands + Status SAdd(std::string& key, std::vector &members); + Status SCard(const std::string& key, uint64_t *len); + Status SIsmember(std::string& key, std::string& member); + Status SMembers(std::string& key, std::vector *members); + Status SRem(std::string& key, std::vector &members); + Status SRandmember(std::string& key, int64_t count, std::vector *members); + + // Zset Commands + Status ZAdd(std::string& key, std::vector &score_members); + Status ZCard(const std::string& key, uint64_t *len); + Status ZCount(std::string& key, std::string &min, std::string &max, uint64_t *len); + Status ZIncrby(std::string& key, std::string& member, double increment); + Status ZRange(std::string& key, + int64_t start, int64_t stop, + std::vector *score_members); + Status ZRangebyscore(std::string& key, + std::string &min, std::string &max, + std::vector *score_members, + int64_t offset = 0, int64_t count = -1); + Status ZRank(std::string& key, std::string& member, int64_t *rank); + Status ZRem(std::string& key, std::vector &members); + Status ZRemrangebyrank(std::string& key, std::string &min, std::string &max); + Status ZRemrangebyscore(std::string& key, std::string &min, std::string &max); + Status ZRevrange(std::string& key, + int64_t start, int64_t stop, + std::vector *score_members); + Status ZRevrangebyscore(std::string& key, + std::string &min, std::string &max, + std::vector *score_members, + int64_t offset = 0, int64_t count = -1); + Status ZRevrangebylex(std::string& key, + std::string &min, std::string &max, + std::vector *members); + Status ZRevrank(std::string& key, std::string& member, int64_t *rank); + Status ZScore(std::string& key, std::string& member, double *score); + Status ZRangebylex(std::string& key, + std::string &min, std::string &max, + std::vector *members); + Status ZLexcount(std::string& key, std::string &min, std::string &max, uint64_t *len); + Status ZRemrangebylex(std::string& key, std::string &min, std::string &max); + + // Bit Commands + Status SetBit(std::string& key, size_t offset, int64_t value); + Status GetBit(std::string& key, size_t offset, int64_t *value); + Status BitCount(std::string& key, int64_t start, int64_t end, int64_t *value, bool have_offset); + Status BitPos(std::string& key, int64_t bit, int64_t *value); + Status BitPos(std::string& key, int64_t bit, int64_t start, int64_t *value); + Status BitPos(std::string& key, int64_t bit, int64_t start, int64_t end, int64_t *value); + +protected: + void DecrObjectsRefCount(robj *argv1, robj *argv2 = nullptr, robj *argv3 = nullptr); + void FreeSdsList(sds *items, uint32_t size); + void FreeObjectList(robj **items, uint32_t size); + void FreeHitemList(hitem *items, uint32_t size); + void FreeZitemList(zitem *items, uint32_t size); + void ConvertObjectToString(robj *obj, std::string *value); + +private: + RedisCache(const RedisCache&); + RedisCache& operator=(const RedisCache&); + +private: + redisCache cache_; +}; + +} // namespace cache + +#endif + +/* EOF */ diff --git a/tools/pika_migrate/src/cache/include/config.h b/tools/pika_migrate/src/cache/include/config.h new file mode 100644 index 0000000000..e0a261542c --- /dev/null +++ b/tools/pika_migrate/src/cache/include/config.h @@ -0,0 +1,72 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#ifndef __CACHE_CONFIG_H__ +#define __CACHE_CONFIG_H__ + +#include + +#include "rediscache/commondef.h" + +namespace cache { + +/* Redis maxmemory strategies */ +enum RedisMaxmemoryPolicy { + CACHE_VOLATILE_LRU = 0, + CACHE_ALLKEYS_LRU = 1, + CACHE_VOLATILE_LFU = 2, + CACHE_ALLKEYS_LFU = 3, + CACHE_VOLATILE_RANDOM = 4, + CACHE_ALLKEYS_RANDOM = 5, + CACHE_VOLATILE_TTL = 6, + CACHE_NO_EVICTION = 7 +}; + +#define CACHE_DEFAULT_MAXMEMORY CONFIG_DEFAULT_MAXMEMORY // 10G +#define CACHE_DEFAULT_MAXMEMORY_SAMPLES CONFIG_DEFAULT_MAXMEMORY_SAMPLES +#define CACHE_DEFAULT_LFU_DECAY_TIME CONFIG_DEFAULT_LFU_DECAY_TIME + +/* + * cache start pos + */ +constexpr int CACHE_START_FROM_BEGIN = 0; +constexpr int CACHE_START_FROM_END = -1; +/* + * cache items per key + */ +#define DEFAULT_CACHE_ITEMS_PER_KEY 512 +#define DEFAULT_CACHE_MAX_KEY_SIZE 512 + +struct CacheConfig { + uint64_t maxmemory; /* Can used max memory */ + int32_t maxmemory_policy; /* Policy for key eviction */ + int32_t maxmemory_samples; /* Precision of random sampling */ + int32_t lfu_decay_time; /* LFU counter decay factor. */ + int32_t zset_cache_start_direction; + int32_t zset_cache_field_num_per_key; + + CacheConfig() + : maxmemory(CACHE_DEFAULT_MAXMEMORY) + , maxmemory_policy(CACHE_NO_EVICTION) + , maxmemory_samples(CACHE_DEFAULT_MAXMEMORY_SAMPLES) + , lfu_decay_time(CACHE_DEFAULT_LFU_DECAY_TIME) + , zset_cache_start_direction(CACHE_START_FROM_BEGIN) + , zset_cache_field_num_per_key(DEFAULT_CACHE_ITEMS_PER_KEY){} + + CacheConfig& operator=(const CacheConfig& obj) { + maxmemory = obj.maxmemory; + maxmemory_policy = obj.maxmemory_policy; + maxmemory_samples = obj.maxmemory_samples; + lfu_decay_time = obj.lfu_decay_time; + zset_cache_start_direction = obj.zset_cache_start_direction; + zset_cache_field_num_per_key = obj.zset_cache_field_num_per_key; + return *this; + } +}; + +} // namespace cache + +#endif diff --git a/tools/pika_migrate/src/cache/src/bit.cc b/tools/pika_migrate/src/cache/src/bit.cc new file mode 100644 index 0000000000..d8955875a2 --- /dev/null +++ b/tools/pika_migrate/src/cache/src/bit.cc @@ -0,0 +1,115 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "cache/include/cache.h" + +#include "pstd_defer.h" + +namespace cache { + +Status RedisCache::SetBit(std::string& key, size_t offset, int64_t value) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + // createObject is a function in redis, the init ref count of robj is 1 + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcSetBit(cache_, kobj, offset, value); + if (C_OK != ret) { + return Status::Corruption("RcSetBit failed"); + } + + return Status::OK(); +} + +Status RedisCache::GetBit(std::string& key, size_t offset, int64_t *value) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcGetBit(cache_, kobj, offset, (long*)value); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + + return Status::Corruption("RcGetBit failed"); + } + + return Status::OK(); +} + +Status RedisCache::BitCount(std::string& key, int64_t start, int64_t end, int64_t *value, bool have_offset) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcBitCount(cache_, kobj, start, end, (long*)value, (int)have_offset); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + + return Status::Corruption("RcBitCount failed"); + } + + return Status::OK(); +} + +Status RedisCache::BitPos(std::string& key, int64_t bit, int64_t *value) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcBitPos(cache_, kobj, bit, -1, -1, (long*)value, BIT_POS_NO_OFFSET); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcBitPos failed"); + } + + return Status::OK(); +} + +Status RedisCache::BitPos(std::string& key, int64_t bit, int64_t start, int64_t *value) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcBitPos(cache_, kobj, bit, start, -1, (long*)value, BIT_POS_START_OFFSET); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcBitPos failed"); + } + + return Status::OK(); +} + +Status RedisCache::BitPos(std::string& key, int64_t bit, int64_t start, int64_t end, int64_t *value) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcBitPos(cache_, kobj, bit, start, end, (long*)value, BIT_POS_START_END_OFFSET); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcBitPos failed"); + } + + return Status::OK(); +} + +} // namespace cache + +/* EOF */ diff --git a/tools/pika_migrate/src/cache/src/cache.cc b/tools/pika_migrate/src/cache/src/cache.cc new file mode 100644 index 0000000000..ef0b3103f3 --- /dev/null +++ b/tools/pika_migrate/src/cache/src/cache.cc @@ -0,0 +1,272 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#include "cache/include/cache.h" +#include "pstd/include/pstd_string.h" +#include "pstd_defer.h" + +namespace cache { + +static int32_t GetRedisLRUPolicy(int32_t cache_lru_policy) { + switch (cache_lru_policy) { + case CACHE_VOLATILE_LRU: + return MAXMEMORY_VOLATILE_LRU; + case CACHE_ALLKEYS_LRU: + return MAXMEMORY_ALLKEYS_LRU; + case CACHE_VOLATILE_LFU: + return MAXMEMORY_VOLATILE_LFU; + case CACHE_ALLKEYS_LFU: + return MAXMEMORY_ALLKEYS_LFU; + case CACHE_VOLATILE_RANDOM: + return MAXMEMORY_VOLATILE_RANDOM; + case CACHE_ALLKEYS_RANDOM: + return MAXMEMORY_ALLKEYS_RANDOM; + case CACHE_VOLATILE_TTL: + return MAXMEMORY_VOLATILE_TTL; + case CACHE_NO_EVICTION: + return MAXMEMORY_NO_EVICTION; + default: + return MAXMEMORY_NO_EVICTION; + } +} + +static void ConvertCfg(CacheConfig *cache_cfg, db_config *db_cfg) { + if (nullptr == cache_cfg || nullptr == db_cfg) { + return; + } + + db_cfg->maxmemory = cache_cfg->maxmemory; + db_cfg->maxmemory_policy = GetRedisLRUPolicy(cache_cfg->maxmemory_policy); + db_cfg->maxmemory_samples = cache_cfg->maxmemory_samples; + db_cfg->lfu_decay_time = cache_cfg->lfu_decay_time; +} + +RedisCache::RedisCache() {} + +RedisCache::~RedisCache() { + if (cache_) { + RcDestroyCacheHandle(cache_); + cache_ = nullptr; + } +} + +/*----------------------------------------------------------------------------- + * Server APIs + *----------------------------------------------------------------------------*/ +void RedisCache::SetConfig(CacheConfig *cfg) { + db_config db_cfg; + ConvertCfg(cfg, &db_cfg); + RcSetConfig(&db_cfg); +} + +uint64_t RedisCache::GetUsedMemory(void) { return RcGetUsedMemory(); } + +void RedisCache::GetHitAndMissNum(int64_t *hits, int64_t *misses) { RcGetHitAndMissNum((long long int*)hits, (long long int*)misses); } + +void RedisCache::ResetHitAndMissNum(void) { RcResetHitAndMissNum(); } + +Status RedisCache::Open(void) { + cache_ = RcCreateCacheHandle(); + if (nullptr == cache_) { + return Status::Corruption("RcCreateCacheHandle failed!"); + } + + return Status::OK(); +} + +int32_t RedisCache::ActiveExpireCycle(void) { return RcActiveExpireCycle(cache_); } + +/*----------------------------------------------------------------------------- + * Normal Commands + *----------------------------------------------------------------------------*/ +bool RedisCache::Exists(std::string& key) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + decrRefCount(kobj); + }; + bool is_exist = RcExists(cache_, kobj); + + return is_exist; +} + +int64_t RedisCache::DbSize(void) { + int64_t dbsize = 0; + RcCacheSize(cache_, (long long int*)&dbsize); + return dbsize; +} + +void RedisCache::FlushCache(void) { RcFlushCache(cache_); } + +Status RedisCache::Del(const std::string& key) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + decrRefCount(kobj); + }; + int ret = RcDel(cache_, kobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else { + return Status::Corruption("RcDel failed"); + } + } + + return Status::OK(); +} + +Status RedisCache::Expire(std::string& key, int64_t ttl) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *tobj = createStringObjectFromLongLong(ttl); + DEFER { + DecrObjectsRefCount(kobj, tobj); + }; + int ret = RcExpire(cache_, kobj, tobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else { + return Status::Corruption("RcExpire failed"); + } + } + + return Status::OK(); +} + +Status RedisCache::Expireat(std::string& key, int64_t ttl) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *tobj = createStringObjectFromLongLong(ttl); + DEFER { + DecrObjectsRefCount(kobj, tobj); + }; + int ret = RcExpireat(cache_, kobj, tobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcExpireat failed"); + } + + return Status::OK(); +} + +Status RedisCache::TTL(std::string& key, int64_t *ttl) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcTTL(cache_, kobj, ttl); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcTTL failed"); + } + + return Status::OK(); +} + +Status RedisCache::Persist(std::string& key) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcPersist(cache_, kobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcPersist failed"); + } + + return Status::OK(); +} + +Status RedisCache::Type(std::string& key, std::string *value) { + sds val; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcType(cache_, kobj, &val); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcType failed"); + } + + value->clear(); + value->assign(val, sdslen(val)); + sdsfree(val); + + return Status::OK(); +} + +Status RedisCache::RandomKey(std::string *key) { + sds val; + int ret = RcRandomkey(cache_, &val); + if (C_OK != ret) { + if (REDIS_NO_KEYS == ret) { + return Status::NotFound("no keys in cache"); + } + return Status::Corruption("RcRandomkey failed"); + } + + key->clear(); + key->assign(val, sdslen(val)); + sdsfree(val); + + return Status::OK(); +} + +void RedisCache::DecrObjectsRefCount(robj *argv1, robj *argv2, robj *argv3) { + if (nullptr != argv1) decrRefCount(argv1); + if (nullptr != argv2) decrRefCount(argv2); + if (nullptr != argv3) decrRefCount(argv3); +} + +void RedisCache::FreeSdsList(sds *items, uint32_t size) { + for (uint32_t i = 0; i < size; ++i) { + sdsfree(items[i]); + } + zfree(items); +} + +void RedisCache::FreeObjectList(robj **items, uint32_t size) { + for (uint32_t i = 0; i < size; ++i) { + decrRefCount(items[i]); + } + zfree(items); +} + +void RedisCache::FreeHitemList(hitem *items, uint32_t size) { + for (uint32_t i = 0; i < size; ++i) { + sdsfree(items[i].field); + sdsfree(items[i].value); + } + zfree(items); +} + +void RedisCache::FreeZitemList(zitem *items, uint32_t size) { + for (uint32_t i = 0; i < size; ++i) { + sdsfree(items[i].member); + } + zfree(items); +} + +void RedisCache::ConvertObjectToString(robj *obj, std::string *value) { + if (sdsEncodedObject(obj)) { + value->assign((char *)obj->ptr, sdslen((sds)obj->ptr)); + } else if (obj->encoding == OBJ_ENCODING_INT) { + char buf[64]; + int len = pstd::ll2string(buf, 64, (long)obj->ptr); + value->assign(buf, len); + } +} + +} // namespace cache + +/* EOF */ \ No newline at end of file diff --git a/tools/pika_migrate/src/cache/src/hash.cc b/tools/pika_migrate/src/cache/src/hash.cc new file mode 100644 index 0000000000..19fc37092b --- /dev/null +++ b/tools/pika_migrate/src/cache/src/hash.cc @@ -0,0 +1,312 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#include "cache/include/cache.h" +#include "pstd_defer.h" + +namespace cache { + +Status RedisCache::HDel(std::string& key, std::vector &fields) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **fields_obj = (robj **)zcallocate(sizeof(robj *) * fields.size()); + for (unsigned int i = 0; i < fields.size(); ++i) { + fields_obj[i] = createObject(OBJ_STRING, sdsnewlen(fields[i].data(), fields[i].size())); + } + DEFER { + DecrObjectsRefCount(kobj); + FreeObjectList(fields_obj, fields.size()); + }; + unsigned long deleted; + int ret = RcHDel(cache_, kobj, fields_obj, fields.size(), &deleted); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + return Status::OK(); +} + +Status RedisCache::HSet(std::string& key, std::string &field, std::string &value) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *fobj = createObject(OBJ_STRING, sdsnewlen(field.data(), field.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, fobj, vobj); + }; + int ret = RcHSet(cache_, kobj, fobj, vobj); + if (C_OK != ret) { + return Status::Corruption("RcHSet failed"); + } + + return Status::OK(); +} + +Status RedisCache::HSetnx(std::string& key, std::string &field, std::string &value) { + if (C_OK != RcFreeMemoryIfNeeded(cache_)) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *fobj = createObject(OBJ_STRING, sdsnewlen(field.data(), field.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, fobj, vobj); + }; + if (C_OK != RcHSetnx(cache_, kobj, fobj, vobj)) { + return Status::Corruption("RcHSetnx failed"); + } + + return Status::OK(); +} + +Status RedisCache::HMSet(std::string& key, std::vector &fvs) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + unsigned int items_size = fvs.size() * 2; + robj **items = (robj **)zcallocate(sizeof(robj *) * items_size); + for (unsigned int i = 0; i < fvs.size(); ++i) { + items[i * 2] = createObject(OBJ_STRING, sdsnewlen(fvs[i].field.data(), fvs[i].field.size())); + items[i * 2 + 1] = createObject(OBJ_STRING, sdsnewlen(fvs[i].value.data(), fvs[i].value.size())); + } + DEFER { + FreeObjectList(items, items_size); + DecrObjectsRefCount(kobj); + }; + int ret = RcHMSet(cache_, kobj, items, items_size); + if (C_OK != ret) { + return Status::Corruption("RcHMSet failed"); + } + return Status::OK(); +} + +Status RedisCache::HGet(std::string& key, std::string &field, std::string *value) { + sds val; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *fobj = createObject(OBJ_STRING, sdsnewlen(field.data(), field.size())); + DEFER { + DecrObjectsRefCount(kobj, fobj); + }; + int ret = RcHGet(cache_, kobj, fobj, &val); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else if (REDIS_ITEM_NOT_EXIST == ret) { + return Status::NotFound("field not exist"); + } + return Status::Corruption("RcHGet failed"); + } + + value->clear(); + value->assign(val, sdslen(val)); + sdsfree(val); + + return Status::OK(); +} + +Status RedisCache::HMGet(std::string& key, std::vector &fields, std::vector *vss) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + hitem *items = (hitem *)zcallocate(sizeof(hitem) * fields.size()); + for (unsigned int i = 0; i < fields.size(); ++i) { + items[i].field = sdsnewlen(fields[i].data(), fields[i].size()); + } + DEFER { + FreeHitemList(items, fields.size()); + DecrObjectsRefCount(kobj); + }; + + int ret = RcHMGet(cache_, kobj, items, fields.size()); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + vss->clear(); + for (unsigned int i = 0; i < fields.size(); ++i) { + if (C_OK == items[i].status) { + vss->push_back({std::string(items[i].value, sdslen(items[i].value)), rocksdb::Status::OK()}); + } else { + vss->push_back({std::string(), rocksdb::Status::NotFound()}); + } + } + + return Status::OK(); +} + +Status RedisCache::HGetall(std::string& key, std::vector *fvs) { + hitem *items = nullptr; + unsigned long items_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcHGetAll(cache_, kobj, &items, &items_size); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + for (uint64_t i = 0; i < items_size; ++i) { + storage::FieldValue fv; + fv.field.assign(items[i].field, sdslen(items[i].field)); + fv.value.assign(items[i].value, sdslen(items[i].value)); + fvs->push_back(fv); + } + + FreeHitemList(items, items_size); + return Status::OK(); +} + +Status RedisCache::HKeys(std::string& key, std::vector *fields) { + hitem *items = nullptr; + unsigned long items_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcHKeys(cache_, kobj, &items, &items_size); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + for (uint64_t i = 0; i < items_size; ++i) { + fields->push_back(std::string(items[i].field, sdslen(items[i].field))); + } + + FreeHitemList(items, items_size); + return Status::OK(); +} + +Status RedisCache::HVals(std::string& key, std::vector *values) { + hitem *items = nullptr; + unsigned long items_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcHVals(cache_, kobj, &items, &items_size); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + for (uint64_t i = 0; i < items_size; ++i) { + values->push_back(std::string(items[i].value, sdslen(items[i].value))); + } + + FreeHitemList(items, items_size); + return Status::OK(); +} + +Status RedisCache::HExists(std::string& key, std::string &field) { + int is_exist = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *fobj = createObject(OBJ_STRING, sdsnewlen(field.data(), field.size())); + DEFER { + DecrObjectsRefCount(kobj, fobj); + }; + int ret = RcHExists(cache_, kobj, fobj, &is_exist); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + return is_exist ? Status::OK() : Status::NotFound("field not exist"); +} + +Status RedisCache::HIncrby(std::string& key, std::string &field, int64_t value) { + int64_t result = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *fobj = createObject(OBJ_STRING, sdsnewlen(field.data(), field.size())); + DEFER { + DecrObjectsRefCount(kobj, fobj); + }; + int ret = RcHIncrby(cache_, kobj, fobj, value, (long long int*)&result); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + return Status::OK(); +} + +Status RedisCache::HIncrbyfloat(std::string& key, std::string &field, double value) { + long double result = .0f; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *fobj = createObject(OBJ_STRING, sdsnewlen(field.data(), field.size())); + DEFER { + DecrObjectsRefCount(kobj, fobj); + }; + int ret = RcHIncrbyfloat(cache_, kobj, fobj, value, &result); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + return Status::OK(); +} + +Status RedisCache::HLen(const std::string& key, uint64_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcHlen(cache_, kobj, reinterpret_cast(len)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + return Status::OK(); +} + +Status RedisCache::HStrlen(std::string& key, std::string &field, uint64_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *fobj = createObject(OBJ_STRING, sdsnewlen(field.data(), field.size())); + DEFER { + DecrObjectsRefCount(kobj, fobj); + }; + int ret = RcHStrlen(cache_, kobj, fobj, reinterpret_cast(len)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcHGet failed"); + } + + return Status::OK(); +} + +} // namespace cache + +/* EOF */ diff --git a/tools/pika_migrate/src/cache/src/list.cc b/tools/pika_migrate/src/cache/src/list.cc new file mode 100644 index 0000000000..e063eed150 --- /dev/null +++ b/tools/pika_migrate/src/cache/src/list.cc @@ -0,0 +1,293 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "cache/include/cache.h" +#include "pstd_defer.h" + +namespace cache { + +Status RedisCache::LIndex(std::string& key, int64_t index, std::string *element) { + sds val; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcLIndex(cache_, kobj, index, &val); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else if (REDIS_ITEM_NOT_EXIST == ret) { + return Status::NotFound("index not exist"); + } + return Status::Corruption("RcLIndex failed"); + } + + element->clear(); + element->assign(val, sdslen(val)); + sdsfree(val); + + return Status::OK(); +} + +Status RedisCache::LInsert(std::string& key, storage::BeforeOrAfter &before_or_after, std::string &pivot, + std::string &value) { + int ret = RcFreeMemoryIfNeeded(cache_); + if (C_OK != ret) { + return Status::Corruption("[error] Free memory faild !"); + } + + int where = (before_or_after == storage::Before) ? REDIS_LIST_HEAD : REDIS_LIST_TAIL; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *pobj = createObject(OBJ_STRING, sdsnewlen(pivot.data(), pivot.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, pobj, vobj); + }; + int res = RcLInsert(cache_, kobj, where, pobj, vobj); + if (C_OK != res) { + if (REDIS_KEY_NOT_EXIST == res) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcLInsert failed"); + } + + return Status::OK(); +} + +Status RedisCache::LLen(const std::string& key, uint64_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcLLen(cache_, kobj, reinterpret_cast(len)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcLLen failed"); + } + + return Status::OK(); +} + +Status RedisCache::LPop(std::string& key, std::string *element) { + sds val; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcLPop(cache_, kobj, &val); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcLPop failed"); + } + + element->clear(); + element->assign(val, sdslen(val)); + sdsfree(val); + + return Status::OK(); +} + +Status RedisCache::LPush(std::string& key, std::vector &values) { + int ret = RcFreeMemoryIfNeeded(cache_); + if (C_OK != ret) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **vals = (robj **)zcallocate(sizeof(robj *) * values.size()); + for (unsigned int i = 0; i < values.size(); ++i) { + vals[i] = createObject(OBJ_STRING, sdsnewlen(values[i].data(), values[i].size())); + } + DEFER { + FreeObjectList(vals, values.size()); + DecrObjectsRefCount(kobj); + }; + int res = RcLPush(cache_, kobj, vals, values.size()); + if (C_OK != res) { + return Status::Corruption("RcLPush failed"); + } + + return Status::OK(); +} + +Status RedisCache::LPushx(std::string& key, std::vector &values) { + int ret = RcFreeMemoryIfNeeded(cache_); + if (C_OK != ret) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **vals = (robj **)zcallocate(sizeof(robj *) * values.size()); + for (unsigned int i = 0; i < values.size(); ++i) { + vals[i] = createObject(OBJ_STRING, sdsnewlen(values[i].data(), values[i].size())); + } + DEFER { + FreeObjectList(vals, values.size()); + DecrObjectsRefCount(kobj); + }; + int res = RcLPushx(cache_, kobj, vals, values.size()); + if (C_OK != res) { + if (REDIS_KEY_NOT_EXIST == res) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcLPushx failed"); + } + + return Status::OK(); +} + +Status RedisCache::LRange(std::string& key, int64_t start, int64_t stop, std::vector *values) { + sds *vals = nullptr; + uint64_t vals_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcLRange(cache_, kobj, start, stop, &vals, reinterpret_cast(&vals_size)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcLRange failed"); + } + + for (uint64_t i = 0; i < vals_size; ++i) { + values->push_back(std::string(vals[i], sdslen(vals[i]))); + } + + FreeSdsList(vals, vals_size); + return Status::OK(); +} + +Status RedisCache::LRem(std::string& key, int64_t count, std::string &value) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, vobj); + }; + int ret = RcLRem(cache_, kobj, count, vobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcLRem failed"); + } + + return Status::OK(); +} + +Status RedisCache::LSet(std::string& key, int64_t index, std::string &value) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, vobj); + }; + int ret = RcLSet(cache_, kobj, index, vobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else if (REDIS_ITEM_NOT_EXIST == ret) { + return Status::NotFound("item not exist"); + } + return Status::Corruption("RcLSet failed"); + } + + return Status::OK(); +} + +Status RedisCache::LTrim(std::string& key, int64_t start, int64_t stop) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcLTrim(cache_, kobj, start, stop); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else { + return Status::Corruption("RcLTrim failed"); + } + } + + return Status::OK(); +} + +Status RedisCache::RPop(std::string& key, std::string *element) { + sds val; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcRPop(cache_, kobj, &val); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcRPop failed"); + } + + element->clear(); + element->assign(val, sdslen(val)); + sdsfree(val); + + return Status::OK(); +} + +Status RedisCache::RPush(std::string& key, std::vector &values) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **vals = (robj **)zcallocate(sizeof(robj *) * values.size()); + for (unsigned int i = 0; i < values.size(); ++i) { + vals[i] = createObject(OBJ_STRING, sdsnewlen(values[i].data(), values[i].size())); + } + DEFER { + FreeObjectList(vals, values.size()); + DecrObjectsRefCount(kobj); + }; + int ret = RcRPush(cache_, kobj, vals, values.size()); + if (C_OK != ret) { + return Status::Corruption("RcRPush failed"); + } + + return Status::OK(); +} + +Status RedisCache::RPushx(std::string& key, std::vector &values) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **vals = (robj **)zcallocate(sizeof(robj *) * values.size()); + for (unsigned int i = 0; i < values.size(); ++i) { + vals[i] = createObject(OBJ_STRING, sdsnewlen(values[i].data(), values[i].size())); + } + DEFER { + FreeObjectList(vals, values.size()); + DecrObjectsRefCount(kobj); + }; + int ret = RcRPushx(cache_, kobj, vals, values.size()); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcRPushx failed"); + } + + return Status::OK(); +} + +} // namespace cache + +/* EOF */ \ No newline at end of file diff --git a/tools/pika_migrate/src/cache/src/set.cc b/tools/pika_migrate/src/cache/src/set.cc new file mode 100644 index 0000000000..3c2fcd2598 --- /dev/null +++ b/tools/pika_migrate/src/cache/src/set.cc @@ -0,0 +1,138 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "cache/include/cache.h" +#include "pstd_defer.h" + +namespace cache { + +Status RedisCache::SAdd(std::string& key, std::vector &members) { + int ret = RcFreeMemoryIfNeeded(cache_); + if (C_OK != ret) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **vals = (robj **)zcallocate(sizeof(robj *) * members.size()); + for (unsigned int i = 0; i < members.size(); ++i) { + vals[i] = createObject(OBJ_STRING, sdsnewlen(members[i].data(), members[i].size())); + } + DEFER { + FreeObjectList(vals, members.size()); + DecrObjectsRefCount(kobj); + }; + int res = RcSAdd(cache_, kobj, vals, members.size()); + if (C_OK != res) { + return Status::Corruption("RcSAdd failed"); + } + + return Status::OK(); +} + +Status RedisCache::SCard(const std::string& key, uint64_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcSCard(cache_, kobj, reinterpret_cast(len)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcSCard failed"); + } + + return Status::OK(); +} + +Status RedisCache::SIsmember(std::string& key, std::string& member) { + int is_member = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *mobj = createObject(OBJ_STRING, sdsnewlen(member.data(), member.size())); + DEFER { + DecrObjectsRefCount(kobj, mobj); + }; + int ret = RcSIsmember(cache_, kobj, mobj, &is_member); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("SIsmember failed"); + } + + return is_member ? Status::OK() : Status::NotFound("member not exist"); +} + +Status RedisCache::SMembers(std::string& key, std::vector *members) { + sds *vals = nullptr; + unsigned long vals_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcSMembers(cache_, kobj, &vals, &vals_size); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcSMembers failed"); + } + + for (unsigned long i = 0; i < vals_size; ++i) { + members->push_back(std::string(vals[i], sdslen(vals[i]))); + } + + FreeSdsList(vals, vals_size); + return Status::OK(); +} + +Status RedisCache::SRem(std::string& key, std::vector &members) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **vals = (robj **)zcallocate(sizeof(robj *) * members.size()); + for (unsigned int i = 0; i < members.size(); ++i) { + vals[i] = createObject(OBJ_STRING, sdsnewlen(members[i].data(), members[i].size())); + } + DEFER { + FreeObjectList(vals, members.size()); + DecrObjectsRefCount(kobj); + }; + + int ret = RcSRem(cache_, kobj, vals, members.size()); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcSRem failed"); + } + + return Status::OK(); +} + +Status RedisCache::SRandmember(std::string& key, int64_t count, std::vector *members) { + sds *vals = nullptr; + unsigned long vals_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcSRandmember(cache_, kobj, count, &vals, &vals_size); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcSRandmember failed"); + } + + for (unsigned long i = 0; i < vals_size; ++i) { + members->push_back(std::string(vals[i], sdslen(vals[i]))); + } + + FreeSdsList(vals, vals_size); + return Status::OK(); +} + +} // namespace cache + +/* EOF */ \ No newline at end of file diff --git a/tools/pika_migrate/src/cache/src/string.cc b/tools/pika_migrate/src/cache/src/string.cc new file mode 100644 index 0000000000..4195fe7b6e --- /dev/null +++ b/tools/pika_migrate/src/cache/src/string.cc @@ -0,0 +1,295 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "cache/include/cache.h" +#include "pstd_defer.h" + +namespace cache { + +Status RedisCache::Set(std::string& key, std::string &value, int64_t ttl) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + robj *tobj = createStringObjectFromLongLong(ttl); + DEFER { + DecrObjectsRefCount(kobj, vobj, tobj); + }; + int ret = RcSet(cache_, kobj, vobj, tobj); + if (C_OK != ret) { + return Status::Corruption("RcSet failed"); + } + + return Status::OK(); +} + +Status RedisCache::SetWithoutTTL(std::string& key, std::string &value) { + int ret = RcFreeMemoryIfNeeded(cache_); + if (C_OK != ret) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, vobj); + }; + int res = RcSet(cache_, kobj, vobj, nullptr); + if (C_OK != res) { + return Status::Corruption("RcSetnx failed, key exists!"); + } + + return Status::OK(); +} + +Status RedisCache::Setnx(std::string& key, std::string &value, int64_t ttl) { + int ret = RcFreeMemoryIfNeeded(cache_); + if (C_OK != ret) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + robj *tobj = createStringObjectFromLongLong(ttl); + DEFER { + DecrObjectsRefCount(kobj, vobj, tobj); + }; + int res = RcSetnx(cache_, kobj, vobj, tobj); + if (C_OK != res) { + return Status::Corruption("RcSetnx failed, key exists!"); + } + + return Status::OK(); +} + +Status RedisCache::SetnxWithoutTTL(std::string& key, std::string &value) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, vobj); + }; + int ret = RcSetnx(cache_, kobj, vobj, nullptr); + if (C_OK != ret) { + return Status::Corruption("RcSetnx failed, key exists!"); + } + + return Status::OK(); +} + +Status RedisCache::Setxx(std::string& key, std::string &value, int64_t ttl) { + int ret = RcFreeMemoryIfNeeded(cache_); + if (C_OK != ret) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + robj *tobj = createStringObjectFromLongLong(ttl); + DEFER { + DecrObjectsRefCount(kobj, vobj, tobj); + }; + int res = RcSetxx(cache_, kobj, vobj, tobj); + if (C_OK != res) { + return Status::Corruption("RcSetxx failed, key not exists!"); + } + + return Status::OK(); +} + +Status RedisCache::SetxxWithoutTTL(std::string& key, std::string &value) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, vobj); + }; + int ret = RcSetxx(cache_, kobj, vobj, nullptr); + if (C_OK != ret) { + return Status::Corruption("RcSetxx failed, key not exists!"); + } + + return Status::OK(); +} + +Status RedisCache::Get(const std::string& key, std::string *value) { + robj *val; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcGet(cache_, kobj, &val); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else { + return Status::Corruption("RcGet failed"); + } + } + + value->clear(); + ConvertObjectToString(val, value); + + return Status::OK(); +} + +Status RedisCache::Incr(std::string& key) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + long long int ret; + int res = RcIncr(cache_, kobj, &ret); + if (C_OK != res) { + return Status::Corruption("RcIncr failed"); + } + + return Status::OK(); +} + +Status RedisCache::Decr(std::string& key) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + long long int ret; + int res = RcDecr(cache_, kobj, &ret); + if (C_OK != res) { + return Status::Corruption("RcDecr failed!"); + } + + return Status::OK(); +} + +Status RedisCache::IncrBy(std::string& key, int64_t incr) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + long long int ret; + int res = RcIncrBy(cache_, kobj, incr, &ret); + if (C_OK != res) { + return Status::Corruption("RcIncrBy failed!"); + } + + return Status::OK(); +} + +Status RedisCache::DecrBy(std::string& key, int64_t incr) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + long long int ret; + int res = RcDecrBy(cache_, kobj, incr, &ret); + if (C_OK != res) { + return Status::Corruption("RcDecrBy failed!"); + } + + return Status::OK(); +} + +Status RedisCache::Incrbyfloat(std::string& key, double incr) { + long double ret = .0f; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int res = RcIncrByFloat(cache_, kobj, incr, &ret); + if (C_OK != res) { + return Status::Corruption("RcIncrByFloat failed!"); + } + + return Status::OK(); +} + +Status RedisCache::Append(std::string& key, std::string &value) { + uint64_t ret = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, vobj); + }; + int res = RcAppend(cache_, kobj, vobj, reinterpret_cast(&ret)); + if (C_OK != res) { + return Status::Corruption("RcAppend failed!"); + } + + return Status::OK(); +} + +Status RedisCache::GetRange(std::string& key, int64_t start, int64_t end, std::string *value) { + sds val; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcGetRange(cache_, kobj, start, end, &val); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else { + return Status::Corruption("RcGetRange failed"); + } + } + + value->clear(); + value->assign(val, sdslen(val)); + sdsfree(val); + + return Status::OK(); +} + +Status RedisCache::SetRange(std::string& key, int64_t start, std::string &value) { + if (C_OK != RcFreeMemoryIfNeeded(cache_)) { + return Status::Corruption("[error] Free memory faild !"); + } + + uint64_t ret = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *vobj = createObject(OBJ_STRING, sdsnewlen(value.data(), value.size())); + DEFER { + DecrObjectsRefCount(kobj, vobj); + }; + int res = RcSetRange(cache_, kobj, start, vobj, reinterpret_cast(&ret)); + if (C_OK != res) { + return Status::Corruption("SetRange failed!"); + } + + return Status::OK(); +} + +Status RedisCache::Strlen(std::string& key, int32_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcStrlen(cache_, kobj, len); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcStrlen failed"); + } + + return Status::OK(); +} + +} // namespace cache + +/* EOF */ \ No newline at end of file diff --git a/tools/pika_migrate/src/cache/src/zset.cc b/tools/pika_migrate/src/cache/src/zset.cc new file mode 100644 index 0000000000..9a83c018c5 --- /dev/null +++ b/tools/pika_migrate/src/cache/src/zset.cc @@ -0,0 +1,409 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "cache/include/cache.h" +#include "pstd_defer.h" + +namespace cache { + +Status RedisCache::ZAdd(std::string& key, std::vector &score_members) { + int res = RcFreeMemoryIfNeeded(cache_); + if (C_OK != res) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + unsigned int items_size = score_members.size() * 2; + robj **items = (robj **)zcallocate(sizeof(robj *) * items_size); + for (unsigned int i = 0; i < score_members.size(); ++i) { + items[i * 2] = createStringObjectFromLongDouble(score_members[i].score, 0); + items[i * 2 + 1] = + createObject(OBJ_STRING, sdsnewlen(score_members[i].member.data(), score_members[i].member.size())); + } + DEFER { + FreeObjectList(items, items_size); + DecrObjectsRefCount(kobj); + }; + int ret = RcZAdd(cache_, kobj, items, items_size); + if (C_OK != ret) { + return Status::Corruption("RcZAdd failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZCard(const std::string& key, uint64_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcZCard(cache_, kobj, reinterpret_cast(len)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZCard failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZCount(std::string& key, std::string &min, std::string &max, uint64_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZCount(cache_, kobj, minobj, maxobj, reinterpret_cast(len)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZCount failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZIncrby(std::string& key, std::string& member, double increment) { + if (C_OK != RcFreeMemoryIfNeeded(cache_)) { + return Status::Corruption("[error] Free memory faild !"); + } + + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **items = (robj **)zcallocate(sizeof(robj *) * 2); + items[0] = createStringObjectFromLongDouble(increment, 0); + items[1] = createObject(OBJ_STRING, sdsnewlen(member.data(), member.size())); + DEFER { + FreeObjectList(items, 2); + DecrObjectsRefCount(kobj); + }; + int ret = RcZIncrby(cache_, kobj, items, 2); + if (C_OK != ret) { + return Status::Corruption("RcZIncrby failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZRange(std::string& key, int64_t start, int64_t stop, std::vector *score_members) { + zitem *items = nullptr; + uint64_t items_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcZrange(cache_, kobj, start, stop, &items, reinterpret_cast(&items_size)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZrange failed"); + } + + for (uint64_t i = 0; i < items_size; ++i) { + storage::ScoreMember sm; + sm.score = items[i].score; + sm.member.assign(items[i].member, sdslen(items[i].member)); + score_members->push_back(sm); + } + + FreeZitemList(items, items_size); + return Status::OK(); +} + +Status RedisCache::ZRangebyscore(std::string& key, std::string &min, std::string &max, + std::vector *score_members, int64_t offset, int64_t count) { + zitem *items = nullptr; + uint64_t items_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZRangebyscore(cache_, kobj, minobj, maxobj, &items, + reinterpret_cast(&items_size), offset, count); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRangebyscore failed"); + } + + for (uint64_t i = 0; i < items_size; ++i) { + storage::ScoreMember sm; + sm.score = items[i].score; + sm.member.assign(items[i].member, sdslen(items[i].member)); + score_members->push_back(sm); + } + + FreeZitemList(items, items_size); + return Status::OK(); +} + +Status RedisCache::ZRank(std::string& key, std::string& member, int64_t *rank) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *mobj = createObject(OBJ_STRING, sdsnewlen(member.data(), member.size())); + DEFER { + DecrObjectsRefCount(kobj, mobj); + }; + int ret = RcZRank(cache_, kobj, mobj, (long*)rank); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else if (REDIS_ITEM_NOT_EXIST == ret) { + return Status::NotFound("member not exist"); + } + return Status::Corruption("RcZRank failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZRem(std::string& key, std::vector &members) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj **members_obj = (robj **)zcallocate(sizeof(robj *) * members.size()); + for (unsigned int i = 0; i < members.size(); ++i) { + members_obj[i] = createObject(OBJ_STRING, sdsnewlen(members[i].data(), members[i].size())); + } + DEFER { + FreeObjectList(members_obj, members.size()); + DecrObjectsRefCount(kobj); + }; + + int ret = RcZRem(cache_, kobj, members_obj, members.size()); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRem failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZRemrangebyrank(std::string& key, std::string &min, std::string &max) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZRemrangebyrank(cache_, kobj, minobj, maxobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRemrangebyrank failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZRemrangebyscore(std::string& key, std::string &min, std::string &max) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZRemrangebyscore(cache_, kobj, minobj, maxobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRemrangebyscore failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZRevrange(std::string& key, int64_t start, int64_t stop, + std::vector *score_members) { + zitem *items = nullptr; + uint64_t items_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + DEFER { + DecrObjectsRefCount(kobj); + }; + int ret = RcZRevrange(cache_, kobj, start, stop, &items, reinterpret_cast(&items_size)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRevrange failed"); + } + + for (uint64_t i = 0; i < items_size; ++i) { + storage::ScoreMember sm; + sm.score = items[i].score; + sm.member.assign(items[i].member, sdslen(items[i].member)); + score_members->push_back(sm); + } + + FreeZitemList(items, items_size); + return Status::OK(); +} + +Status RedisCache::ZRevrangebyscore(std::string& key, std::string &min, std::string &max, + std::vector *score_members, int64_t offset, int64_t count) { + zitem *items = nullptr; + uint64_t items_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZRevrangebyscore(cache_, kobj, minobj, maxobj, &items, + reinterpret_cast(&items_size), offset, (long)count); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRevrangebyscore failed"); + } + + for (uint64_t i = 0; i < items_size; ++i) { + storage::ScoreMember sm; + sm.score = items[i].score; + sm.member.assign(items[i].member, sdslen(items[i].member)); + score_members->push_back(sm); + } + + FreeZitemList(items, items_size); + return Status::OK(); +} + +Status RedisCache::ZRevrangebylex(std::string& key, std::string &min, std::string &max, + std::vector *members) { + sds *vals = nullptr; + uint64_t vals_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZRevrangebylex(cache_, kobj, minobj, maxobj, &vals, (unsigned long*)&vals_size); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRevrangebylex failed"); + } + + for (uint64_t i = 0; i < vals_size; ++i) { + members->push_back(std::string(vals[i], sdslen(vals[i]))); + } + + FreeSdsList(vals, vals_size); + return Status::OK(); +} + +Status RedisCache::ZRevrank(std::string& key, std::string& member, int64_t *rank) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *mobj = createObject(OBJ_STRING, sdsnewlen(member.data(), member.size())); + DEFER { + DecrObjectsRefCount(kobj, mobj); + }; + int ret = RcZRevrank(cache_, kobj, mobj, reinterpret_cast(rank)); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else if (REDIS_ITEM_NOT_EXIST == ret) { + return Status::NotFound("member not exist"); + } + return Status::Corruption("RcZRevrank failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZScore(std::string& key, std::string& member, double *score) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *mobj = createObject(OBJ_STRING, sdsnewlen(member.data(), member.size())); + DEFER { + DecrObjectsRefCount(kobj, mobj); + }; + int ret = RcZScore(cache_, kobj, mobj, score); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } else if (REDIS_ITEM_NOT_EXIST == ret) { + return Status::NotFound("member not exist"); + } + return Status::Corruption("RcZScore failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZRangebylex(std::string& key, std::string &min, std::string &max, + std::vector *members) { + sds *vals = nullptr; + uint64_t vals_size = 0; + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZRangebylex(cache_, kobj, minobj, maxobj, &vals, (unsigned long*)&vals_size); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRangebylex failed"); + } + + for (uint64_t i = 0; i < vals_size; ++i) { + members->push_back(std::string(vals[i], sdslen(vals[i]))); + } + + FreeSdsList(vals, vals_size); + return Status::OK(); +} + +Status RedisCache::ZLexcount(std::string& key, std::string &min, std::string &max, uint64_t *len) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZLexcount(cache_, kobj, minobj, maxobj, (unsigned long*)len); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZLexcount failed"); + } + + return Status::OK(); +} + +Status RedisCache::ZRemrangebylex(std::string& key, std::string &min, std::string &max) { + robj *kobj = createObject(OBJ_STRING, sdsnewlen(key.data(), key.size())); + robj *minobj = createObject(OBJ_STRING, sdsnewlen(min.data(), min.size())); + robj *maxobj = createObject(OBJ_STRING, sdsnewlen(max.data(), max.size())); + DEFER { + DecrObjectsRefCount(kobj, minobj, maxobj); + }; + int ret = RcZRemrangebylex(cache_, kobj, minobj, maxobj); + if (C_OK != ret) { + if (REDIS_KEY_NOT_EXIST == ret) { + return Status::NotFound("key not in cache"); + } + return Status::Corruption("RcZRemrangebylex failed"); + } + + return Status::OK(); +} + +} // namespace cache +/* EOF */ diff --git a/tools/pika_migrate/src/net/CMakeLists.txt b/tools/pika_migrate/src/net/CMakeLists.txt new file mode 100644 index 0000000000..dc38d0d3d8 --- /dev/null +++ b/tools/pika_migrate/src/net/CMakeLists.txt @@ -0,0 +1,35 @@ +cmake_minimum_required (VERSION 3.18) + +set (CMAKE_CXX_STANDARD 17) + +project (net) + +aux_source_directory(./src DIR_SRCS) + +if(USE_SSL) + add_definitions("-D__ENABLE_SSL") +endif() + +add_subdirectory(test) +add_subdirectory(examples) + +if(${CMAKE_SYSTEM_NAME} MATCHES "Linux") + list(FILTER DIR_SRCS EXCLUDE REGEX ".net_kqueue.*") +elseif(${CMAKE_SYSTEM_NAME} MATCHES "Darwin" OR ${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD") + list(FILTER DIR_SRCS EXCLUDE REGEX ".net_epoll.*") +endif() + +add_library(net STATIC ${DIR_SRCS} ) + +add_dependencies(net protobuf glog gflags ${LIBUNWIND_NAME}) + + +target_include_directories(net + PUBLIC ${PROJECT_SOURCE_DIR}/.. + PUBLIC ${INSTALL_INCLUDEDIR}) + +target_link_libraries(net + PUBLIC ${GLOG_LIBRARY} + ${GFLAGS_LIBRARY} + ${LIBUNWIND_LIBRARY} +) diff --git a/tools/pika_migrate/src/net/examples/CMakeLists.txt b/tools/pika_migrate/src/net/examples/CMakeLists.txt new file mode 100644 index 0000000000..a5738bc56e --- /dev/null +++ b/tools/pika_migrate/src/net/examples/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required(VERSION 3.18) + +set (CMAKE_CXX_STANDARD 17) +add_subdirectory(performance) + +aux_source_directory(../src DIR_SRCS) + +file(GLOB NET_EXAMPLES_SOURCE "${PROJECT_SOURCE_DIR}/examples/*.cc") + +set(PROTO_FILES ${PROJECT_SOURCE_DIR}/examples/myproto.proto) +custom_protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${PROTO_FILES}) + + +foreach(net_example_source ${NET_EXAMPLES_SOURCE}) + get_filename_component(net_example_filename ${net_example_source} NAME) + string(REPLACE ".cc" "" net_example_name ${net_example_filename}) + + add_executable(${net_example_name} EXCLUDE_FROM_ALL ${net_example_source} ${PROTO_SRCS} ${PROTO_HDRS}) + target_include_directories(${net_example_name} + PUBLIC ${CMAKE_CURRENT_BINARY_DIR} + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/src + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + add_dependencies(${net_example_name} net pstd storage glog gflags ${LIBUNWIND_NAME} protobuf) + + target_link_libraries(${net_example_name} + PUBLIC net + PUBLIC pstd + PUBLIC storage + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + PUBLIC pthread + PUBLIC ${PROTOBUF_LIBRARY} + ) +endforeach() diff --git a/tools/pika_migrate/src/net/examples/README.md b/tools/pika_migrate/src/net/examples/README.md new file mode 100644 index 0000000000..a5055c8b5c --- /dev/null +++ b/tools/pika_migrate/src/net/examples/README.md @@ -0,0 +1,11 @@ +myproto.proto the proto buffer file used to test pb protocol + +myholy_srv.cc server side of myproto.proto with holy thread + +mydispatch_srv.cc server side of myproto.proto with dispatch thread and worker thread + +myproto_cli.cc client support myproto.proto + +myredis_srv.cc A simple server support redis protocol, it can be used to test the performance of net with redis protocol + +performance/ client and server code used to get performance benchmark diff --git a/tools/pika_migrate/src/net/examples/bg_thread.cc b/tools/pika_migrate/src/net/examples/bg_thread.cc new file mode 100644 index 0000000000..a8bc75c2bc --- /dev/null +++ b/tools/pika_migrate/src/net/examples/bg_thread.cc @@ -0,0 +1,102 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/bg_thread.h" +#include +#include +#include "pstd/include/pstd_mutex.h" +#include "unistd.h" + +using namespace std; + +static pstd::Mutex print_lock; + +void task(void* arg) { + std::unique_ptr int_arg(static_cast(arg)); + { + std::lock_guard l(print_lock); + std::cout << " task : " << *int_arg << std::endl; + } + sleep(1); +} + +struct TimerItem { + uint64_t exec_time; + void (*function)(void*); + void* arg; + TimerItem(uint64_t _exec_time, void (*_function)(void*), void* _arg) + : exec_time(_exec_time), function(_function), arg(_arg) {} + bool operator<(const TimerItem& item) const { return exec_time > item.exec_time; } +}; + +int main() { + net::BGThread t, t2(5); + t.StartThread(); + t2.StartThread(); + int qsize = 0, pqsize = 0; + + std::cout << "Normal BGTask... " << std::endl; + for (int i = 0; i < 10; i++) { + int* pi = new int(i); + t.Schedule(task, (void*)pi); + t.QueueSize(&pqsize, &qsize); + std::lock_guard l(print_lock); + std::cout << " current queue size:" << qsize << ", " << pqsize << std::endl; + } + std::cout << std::endl << std::endl; + + while (qsize > 0) { + t.QueueSize(&pqsize, &qsize); + sleep(1); + } + + qsize = pqsize = 0; + std::cout << "Limit queue BGTask... " << std::endl; + for (int i = 0; i < 10; i++) { + int* pi = new int(i); + t2.Schedule(task, (void*)pi); + t2.QueueSize(&pqsize, &qsize); + std::lock_guard l(print_lock); + std::cout << " current queue size:" << qsize << ", " << pqsize << std::endl; + } + std::cout << std::endl << std::endl; + + while (qsize > 0) { + t2.QueueSize(&pqsize, &qsize); + sleep(1); + } + + std::cout << "TimerItem Struct... " << std::endl; + std::priority_queue pq; + pq.push(TimerItem(1, task, nullptr)); + pq.push(TimerItem(5, task, nullptr)); + pq.push(TimerItem(3, task, nullptr)); + pq.push(TimerItem(2, task, nullptr)); + pq.push(TimerItem(4, task, nullptr)); + + while (!pq.empty()) { + printf("%ld\n", pq.top().exec_time); + pq.pop(); + } + std::cout << std::endl << std::endl; + + std::cout << "Restart BGThread" << std::endl; + t.StopThread(); + t.StartThread(); + std::cout << "Time BGTask... " << std::endl; + for (int i = 0; i < 10; i++) { + int* pi = new int(i); + t.DelaySchedule(i * 1000, task, (void*)pi); + t.QueueSize(&pqsize, &qsize); + std::lock_guard l(print_lock); + std::cout << " current queue size:" << qsize << ", " << pqsize << std::endl; + } + sleep(3); + std::cout << "QueueClear..." << std::endl; + t.QueueClear(); + sleep(10); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/binlog_parser_test.cc b/tools/pika_migrate/src/net/examples/binlog_parser_test.cc new file mode 100644 index 0000000000..9077db4c17 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/binlog_parser_test.cc @@ -0,0 +1,67 @@ +#include +#include +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" +#include "pstd/include/pstd_coding.h" +#include "pstd/include/xdebug.h" + +using namespace net; + +int main(int argc, char* argv[]) { + if (argc < 3) { + printf("Usage: ./redis_cli ip port\n"); + exit(0); + } + + std::string ip(argv[1]); + int port = atoi(argv[2]); + + std::unique_ptr rcli(NewRedisCli()); + rcli->set_connect_timeout(3000); + + Status s = rcli->Connect(ip, port, "127.0.0.1"); + printf(" RedisCli Connect(%s:%d) return %s\n", ip.c_str(), port, s.ToString().c_str()); + if (!s.ok()) { + printf("Connect failed, %s\n", s.ToString().c_str()); + exit(-1); + } + + net::RedisCmdArgsType redis_argv; + std::string one_command = "*3\r\n$3\r\nSET\r\n$1\r\na\r\n$2\r\nab\r\n"; + + std::string binlog_body; + pstd::PutFixed16(&binlog_body, 1); // type + pstd::PutFixed32(&binlog_body, 0); // exec_time + pstd::PutFixed32(&binlog_body, 10); // server_id + pstd::PutFixed64(&binlog_body, 0); // logic_id + pstd::PutFixed32(&binlog_body, 0); // filenum + pstd::PutFixed64(&binlog_body, 0); // offset + uint32_t content_length = one_command.size(); + pstd::PutFixed32(&binlog_body, content_length); + binlog_body.append(one_command); + + std::string header; + pstd::PutFixed16(&header, 2); + pstd::PutFixed32(&header, binlog_body.size()); + + std::string command = header + binlog_body; + { + for (size_t i = 0; i < command.size(); ++i) { + sleep(1); + std::string one_char_str(command, i, 1); + s = rcli->Send(&one_char_str); + printf("Send %d %s\n", i, s.ToString().c_str()); + } + + s = rcli->Recv(&redis_argv); + printf("Recv return %s\n", s.ToString().c_str()); + if (redis_argv.size() > 0) { + printf(" redis_argv[0] is (%s)\n", redis_argv[0].c_str()); + } + } + + char ch; + scanf("%c", &ch); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/http_server.cc b/tools/pika_migrate/src/net/examples/http_server.cc new file mode 100644 index 0000000000..634083a43f --- /dev/null +++ b/tools/pika_migrate/src/net/examples/http_server.cc @@ -0,0 +1,113 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "net/include/http_conn.h" +#include "net/include/net_thread.h" +#include "net/include/server_thread.h" +#include "pstd/include/pstd_hash.h" +#include "pstd/include/pstd_status.h" +#include "net/src/net_multiplexer.h" + +using namespace net; + +class MyHTTPHandles : public net::HTTPHandles { + public: + std::string body_data; + std::string body_md5; + std::string zero_space; + size_t write_pos = 0; + std::chrono::system_clock::time_point start, end; + std::chrono::duration diff; + + // Request handles + virtual bool HandleRequest(const HTTPRequest* req) { + req->Dump(); + body_data.clear(); + + start = std::chrono::system_clock::now(); + + // Continue receive body + return false; + } + virtual void HandleBodyData(const char* data, size_t size) { + std::cout << "ReqBodyPartHandle: " << size << std::endl; + body_data.append(data, size); + } + + // Response handles + virtual void PrepareResponse(HTTPResponse* resp) { + body_md5.assign(pstd::md5(body_data)); + + resp->SetStatusCode(200); + resp->SetContentLength(body_md5.size()); + write_pos = 0; + end = std::chrono::system_clock::now(); + diff = end - start; + std::cout << "Use: " << diff.count() << " ms" << std::endl; + } + + virtual int WriteResponseBody(char* buf, size_t max_size) { + size_t size = std::min(max_size, body_md5.size() - write_pos); + memcpy(buf, body_md5.data() + write_pos, size); + write_pos += size; + return size; + } +}; + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, NetMultiplexer* net_epoll) const override { + auto my_handles = std::make_shared(); + return std::make_shared(connfd, ip_port, thread, my_handles, worker_specific_data); + } +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main(int argc, char* argv[]) { + int port; + if (argc < 2) { + printf("Usage: ./http_server port"); + } else { + port = atoi(argv[1]); + } + + SignalSetup(); + + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(port, 4, my_conn_factory.get(), 1000)); + + if (st->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + } + st->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/https_server.cc b/tools/pika_migrate/src/net/examples/https_server.cc new file mode 100644 index 0000000000..7b7243a825 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/https_server.cc @@ -0,0 +1,121 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "net/include/http_conn.h" +#include "net/include/net_thread.h" +#include "net/include/server_thread.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/pstd_hash.h" +#include "pstd/include/pstd_status.h" + +using namespace net; + +class MyHTTPHandles : public net::HTTPHandles { + public: + std::string body_data; + std::string body_md5; + std::string zero_space; + size_t write_pos = 0; + std::chrono::system_clock::time_point start, end; + std::chrono::duration diff; + + // Request handles + virtual bool HandleRequest(const HTTPRequest* req) { + req->Dump(); + body_data.clear(); + + start = std::chrono::system_clock::now(); + + // Continue receive body + return false; + } + virtual void HandleBodyData(const char* data, size_t size) { + std::cout << "ReqBodyPartHandle: " << size << std::endl; + body_data.append(data, size); + } + + // Response handles + virtual void PrepareResponse(HTTPResponse* resp) { + body_md5.assign(pstd::md5(body_data)); + + resp->SetStatusCode(200); + resp->SetContentLength(body_md5.size()); + write_pos = 0; + end = std::chrono::system_clock::now(); + diff = end - start; + std::cout << "Use: " << diff.count() << " ms" << std::endl; + } + + virtual int WriteResponseBody(char* buf, size_t max_size) { + size_t size = std::min(max_size, body_md5.size() - write_pos); + memcpy(buf, body_md5.data() + write_pos, size); + write_pos += size; + return size; + } +}; + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, + NetMultiplexer* net_mpx = nullptr) const override { + auto my_handles = std::make_shared(); + return std::make_shared(connfd, ip_port, thread, my_handles, worker_specific_data); + } +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main(int argc, char* argv[]) { + int port; + if (argc < 2) { + printf("Usage: ./http_server port"); + } else { + port = atoi(argv[1]); + } + + SignalSetup(); + + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(port, 4, my_conn_factory.get(), 1000)); + +#if __ENABLE_SSL + if (st->EnableSecurity("/complete_path_to/host.crt", "/complete_path_to/host.key") != 0) { + printf("EnableSecurity error happened!\n"); + exit(-1); + } +#endif + + if (st->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + } + st->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/mydispatch_srv.cc b/tools/pika_migrate/src/net/examples/mydispatch_srv.cc new file mode 100644 index 0000000000..23fc591b91 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/mydispatch_srv.cc @@ -0,0 +1,92 @@ +#include +#include +#include +#include + +#include "net/include/net_thread.h" +#include "net/include/server_thread.h" +#include "pstd/include/xdebug.h" + +#include "myproto.pb.h" +#include "net/include/pb_conn.h" + +#include +#include +#include + +using namespace net; + +class MyConn : public PbConn { + public: + MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data); + virtual ~MyConn(); + + protected: + virtual int DealMessage(); + + private: + myproto::Ping ping_; + myproto::PingRes ping_res_; +}; + +MyConn::MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data) + : PbConn(fd, ip_port, thread) { + // Handle worker_specific_data ... +} + +MyConn::~MyConn() {} + +int MyConn::DealMessage() { + printf("In the myconn DealMessage branch\n"); + ping_.ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + ping_res_.Clear(); + ping_res_.set_res(11234); + ping_res_.set_mess("heiheidfdfdf"); + printf("DealMessage receive (%s)\n", ping_res_.mess().c_str()); + std::string res; + ping_res_.SerializeToString(&res); + WriteResp(res); + return 0; +} + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, NetMultiplexer* net_epoll) const override { + return std::make_shared(connfd, ip_port, thread, worker_specific_data); + } +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main() { + SignalSetup(); + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(9211, 10, my_conn_factory.get(), 1000)); + + if (st->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + } + st->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/myholy_srv.cc b/tools/pika_migrate/src/net/examples/myholy_srv.cc new file mode 100644 index 0000000000..27607e9a4e --- /dev/null +++ b/tools/pika_migrate/src/net/examples/myholy_srv.cc @@ -0,0 +1,97 @@ +#include +#include +#include +#include + +#include "myproto.pb.h" +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/include/pb_conn.h" +#include "net/include/server_thread.h" +#include "net/src/net_multiplexer.h" + +using namespace net; + +class MyConn : public PbConn { + public: + MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data); + virtual ~MyConn(); + + protected: + virtual int DealMessage(); + + private: + myproto::Ping ping_; + myproto::PingRes ping_res_; +}; + +MyConn::MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data) + : PbConn(fd, ip_port, thread) { + // Handle worker_specific_data ... +} + +MyConn::~MyConn() {} + +int MyConn::DealMessage() { + printf("In the myconn DealMessage branch\n"); + ping_.ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + printf("DealMessage receive (%s) port %d \n", ping_.address().c_str(), ping_.port()); + + ping_res_.Clear(); + ping_res_.set_res(11234); + ping_res_.set_mess("heiheidfdfdf"); + std::string res; + ping_res_.SerializeToString(&res); + WriteResp(res); + return 0; +} + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, NetMultiplexer* net_epoll) const override { + return std::make_shared(connfd, ip_port, thread, worker_specific_data); + } +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main(int argc, char* argv[]) { + if (argc < 2) { + printf("Usage: ./server port\n"); + exit(0); + } + + int my_port = (argc > 1) ? atoi(argv[1]) : 8221; + + SignalSetup(); + + std::unique_ptr conn_factory = std::make_unique(); + + std::unique_ptr my_thread(NewHolyThread(my_port, conn_factory.get())); + if (my_thread->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + } + my_thread->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/myholy_srv_chandle.cc b/tools/pika_migrate/src/net/examples/myholy_srv_chandle.cc new file mode 100644 index 0000000000..a6f6b6cd97 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/myholy_srv_chandle.cc @@ -0,0 +1,122 @@ +#include +#include +#include +#include + +#include "myproto.pb.h" +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/include/pb_conn.h" +#include "net/include/server_thread.h" +#include "net/src/net_multiplexer.h" + +using namespace net; + +class MyConn : public PbConn { + public: + MyConn(int fd, std::string ip_port, Thread* thread, void* private_data); + virtual ~MyConn(); + + Thread* thread() { return thread_; } + + protected: + virtual int DealMessage(); + + private: + Thread* thread_; + int* private_data_; + myproto::Ping ping_; + myproto::PingRes ping_res_; +}; + +MyConn::MyConn(int fd, ::std::string ip_port, Thread* thread, void* worker_specific_data) + : PbConn(fd, ip_port, thread), thread_(thread), private_data_(static_cast(worker_specific_data)) {} + +MyConn::~MyConn() {} + +int MyConn::DealMessage() { + printf("In the myconn DealMessage branch\n"); + ping_.ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + printf("DealMessage receive (%s) port %d \n", ping_.address().c_str(), ping_.port()); + + int* data = static_cast(private_data_); + printf("Worker's Env: %d\n", *data); + + ping_res_.Clear(); + ping_res_.set_res(11234); + ping_res_.set_mess("heiheidfdfdf"); + std::string res; + ping_res_.SerializeToString(&res); + WriteResp(res); + return 0; +} + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, NetMultiplexer* net_epoll) const override { + return std::make_shared(connfd, ip_port, thread, worker_specific_data); + } +}; + +class MyServerHandle : public ServerHandle { + public: + virtual void CronHandle() const override { printf("Cron operation\n"); } + using ServerHandle::AccessHandle; + virtual bool AccessHandle(std::string& ip) const override { + printf("Access operation, receive:%s\n", ip.c_str()); + return true; + } + virtual int CreateWorkerSpecificData(void** data) const { + int* num = new int(1234); + *data = static_cast(num); + return 0; + } + virtual int DeleteWorkerSpecificData(void* data) const { + delete static_cast(data); + return 0; + } +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main(int argc, char* argv[]) { + if (argc < 2) { + printf("Usage: ./server port\n"); + exit(0); + } + + int my_port = (argc > 1) ? atoi(argv[1]) : 8221; + + SignalSetup(); + + MyConnFactory conn_factory; + MyServerHandle handle; + + std::unique_ptr my_thread(NewHolyThread(my_port, &conn_factory, 1000, &handle)); + if (my_thread->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + } + my_thread->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/myproto.proto b/tools/pika_migrate/src/net/examples/myproto.proto new file mode 100644 index 0000000000..dea350cfa5 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/myproto.proto @@ -0,0 +1,11 @@ +package myproto; + +message Ping { + required string address = 2; + required int32 port = 3; +} + +message PingRes { + required int32 res = 1; + required string mess = 2; +} diff --git a/tools/pika_migrate/src/net/examples/myproto_cli.cc b/tools/pika_migrate/src/net/examples/myproto_cli.cc new file mode 100644 index 0000000000..881b2b4f74 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/myproto_cli.cc @@ -0,0 +1,53 @@ +#include +#include +#include +#include +#include + +#include "myproto.pb.h" +#include "net/include/net_cli.h" +#include "net/include/net_define.h" + +using namespace net; + +int main(int argc, char* argv[]) { + if (argc < 3) { + printf("Usage: ./client ip port\n"); + exit(0); + } + + std::string ip(argv[1]); + int port = atoi(argv[2]); + + std::unique_ptr cli(NewPbCli()); + + Status s = cli->Connect(ip, port); + if (!s.ok()) { + printf("Connect (%s:%d) failed, %s\n", ip.c_str(), port, s.ToString().c_str()); + } + printf("Connect (%s:%d) ok, fd is %d\n", ip.c_str(), port, cli->fd()); + + for (int i = 0; i < 100000; i++) { + myproto::Ping msg; + msg.set_address("127.00000"); + msg.set_port(2222); + + s = cli->Send((void*)&msg); + if (!s.ok()) { + printf("Send failed %s\n", s.ToString().c_str()); + break; + } + + printf("Send sussces\n"); + myproto::PingRes req; + s = cli->Recv((void*)&req); + if (!s.ok()) { + printf("Recv failed %s\n", s.ToString().c_str()); + break; + } + printf("Recv res %d mess (%s)\n", req.res(), req.mess().c_str()); + } + cli->Close(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/myredis_cli.cc b/tools/pika_migrate/src/net/examples/myredis_cli.cc new file mode 100644 index 0000000000..2fb053d076 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/myredis_cli.cc @@ -0,0 +1,117 @@ +#include +#include +#include +#include +#include +#include + +#include "net/include/client_thread.h" +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/include/redis_conn.h" +#include "net/src/net_multiplexer.h" + +using namespace net; + +class MyConn : public RedisConn { + public: + MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data); + virtual ~MyConn() = default; + + protected: + int DealMessage(const RedisCmdArgsType& argv, std::string* response) override; + + private: +}; + +MyConn::MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data) + : RedisConn(fd, ip_port, thread) { + // Handle worker_specific_data ... +} + +std::unique_ptr client; +int sendto_port; +int MyConn::DealMessage(const RedisCmdArgsType& argv, std::string* response) { + sleep(1); + std::cout << "DealMessage" << std::endl; + std::string set = "*3\r\n$3\r\nSet\r\n$3\r\nabc\r\n$3\r\nabc\r\n"; + client->Write("127.0.0.1", sendto_port, set); + return 0; +} + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, + net::NetMultiplexer* net_epoll = nullptr) const override { + return std::make_shared(connfd, ip_port, thread, worker_specific_data); + } +}; + +class MyClientHandle : public net::ClientHandle { + public: + void CronHandle() const override {} + void FdTimeoutHandle(int fd, const std::string& ip_port) const override; + void FdClosedHandle(int fd, const std::string& ip_port) const override; + bool AccessHandle(std::string& ip) const override { return true; } + int CreateWorkerSpecificData(void** data) const override { return 0; } + int DeleteWorkerSpecificData(void* data) const override { return 0; } + void DestConnectFailedHandle(std::string ip_port, std::string reason) const override {} +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +bool first_time = true; +void DoCronWork(ClientThread* client, int port) { + if (first_time) { + first_time = false; + std::string ping = "*1\r\n$4\r\nPING\r\n"; + client->Write("127.0.0.1", port, ping); + } +} + +int main(int argc, char* argv[]) { + if (argc < 2) { + printf("client will send to 6379\n"); + } else { + printf("client will send to %d\n", atoi(argv[1])); + } + + sendto_port = (argc > 1) ? atoi(argv[1]) : 6379; + + SignalSetup(); + + std::unique_ptr conn_factory = std::make_unique(); + //"handle" will be deleted within "client->StopThread()" + ClientHandle* handle = new ClientHandle(); + + client = std::make_unique(conn_factory.get(), 3000, 60, handle, nullptr); + + if (client->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + DoCronWork(client.get(), sendto_port); + } + + client->StopThread(); + client.reset(); + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/myredis_srv.cc b/tools/pika_migrate/src/net/examples/myredis_srv.cc new file mode 100644 index 0000000000..6672a412bb --- /dev/null +++ b/tools/pika_migrate/src/net/examples/myredis_srv.cc @@ -0,0 +1,114 @@ +#include +#include +#include +#include +#include + +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/include/redis_conn.h" +#include "net/include/server_thread.h" +#include "net/src/holy_thread.h" +#include "net/src/net_multiplexer.h" + +using namespace net; + +std::map db; + +class MyConn : public RedisConn { + public: + MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data); + virtual ~MyConn() = default; + + protected: + int DealMessage(const RedisCmdArgsType& argv, std::string* response) override; + + private: +}; + +MyConn::MyConn(int fd, const std::string& ip_port, Thread* thread, void* worker_specific_data) + : RedisConn(fd, ip_port, thread) { + // Handle worker_specific_data ... +} + +int MyConn::DealMessage(const RedisCmdArgsType& argv, std::string* response) { + printf("Get redis message "); + for (int i = 0; i < argv.size(); i++) { + printf("%s ", argv[i].c_str()); + } + printf("\n"); + + std::string val = "result"; + std::string res; + // set command + if (argv.size() == 3) { + response->append("+OK\r\n"); + db[argv[1]] = argv[2]; + } else if (argv.size() == 2) { + std::map::iterator iter = db.find(argv[1]); + if (iter != db.end()) { + const std::string& val = iter->second; + response->append("*1\r\n$"); + response->append(std::to_string(val.length())); + response->append("\r\n"); + response->append(val); + response->append("\r\n"); + } else { + response->append("$-1\r\n"); + } + } else { + response->append("+OK\r\n"); + } + return 0; +} + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, net::NetMultiplexer* net_epoll = nullptr) const { + return std::make_shared(connfd, ip_port, thread, worker_specific_data); + } +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main(int argc, char* argv[]) { + if (argc < 2) { + printf("server will listen to 6379\n"); + } else { + printf("server will listen to %d\n", atoi(argv[1])); + } + int my_port = (argc > 1) ? atoi(argv[1]) : 6379; + + SignalSetup(); + + std::unique_ptr conn_factory = std::make_unique(); + + std::unique_ptr my_thread = + std::make_unique(my_port, conn_factory.get(), 1000, nullptr, false); + if (my_thread->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + } + my_thread->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/performance/CMakeLists.txt b/tools/pika_migrate/src/net/examples/performance/CMakeLists.txt new file mode 100644 index 0000000000..be83188cc7 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/performance/CMakeLists.txt @@ -0,0 +1,46 @@ +cmake_minimum_required (VERSION 3.18) + +aux_source_directory(../src DIR_SRCS) +set(CMAKE_CXX_STANDARD 17) + + +file(GLOB PERFORMANCE_PROTO_FILES ${PROJECT_SOURCE_DIR}/examples/performance/*.proto) +message(PERFORMANCE_PROTO_FILES: ${PERFORMANCE_PROTO_FILES}) +set(proto_cxx_files "") +set(proto_h_files "") + +foreach(proto_files ${PERFORMANCE_PROTO_FILES}) + custom_protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS ${proto_files}) + list(APPEND proto_cxx_files ${PROTO_SRCS}) + list(APPEND proto_h_files ${PROTO_HDRS}) +endforeach() + + +file(GLOB NET_EXAMPLES_PERFORMANCE_SOURCE ${PROJECT_SOURCE_DIR}/examples/performance/*.cc) + + +foreach(net_example_performance_source ${NET_EXAMPLES_PERFORMANCE_SOURCE}) + get_filename_component(net_example_performance_filename ${net_example_performance_source} NAME) + string(REPLACE ".cc" "" net_example_performance_name ${net_example_performance_filename}) + + add_executable(${net_example_performance_name} EXCLUDE_FROM_ALL ${net_example_performance_source} ${proto_cxx_files} ${proto_h_files}) + target_include_directories(${net_example_performance_name} + PUBLIC ${CMAKE_CURRENT_BINARY_DIR} + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + add_dependencies(${net_example_performance_name} net pstd glog gflags ${LIBUNWIND_NAME} protobuf) + + target_link_libraries(${net_example_performance_name} + PUBLIC net + PUBLIC storage + PUBLIC pstd + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + PUBLIC pthread + PUBLIC ${PROTOBUF_LIBRARY} + ) +endforeach() diff --git a/tools/pika_migrate/src/net/examples/performance/README.md b/tools/pika_migrate/src/net/examples/performance/README.md new file mode 100644 index 0000000000..acd332bc99 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/performance/README.md @@ -0,0 +1,13 @@ +client and server code used to get net performance benchmark + +### usage + +after compiler you will get two executable program server and client + +start server +./server 127.0.0.1(you ip) port(listen port) + +./client 127.0.0.1(server ip) port(sever port) + +since there should be many clients to get the net's performance limitation, +so in our case, we will always have 10~20 client to pressure measure server diff --git a/tools/pika_migrate/src/net/examples/performance/client.cc b/tools/pika_migrate/src/net/examples/performance/client.cc new file mode 100644 index 0000000000..a408b03308 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/performance/client.cc @@ -0,0 +1,48 @@ +#include +#include +#include +#include + +#include "message.pb.h" +#include "net/include/net_cli.h" +#include "net/include/net_define.h" + +using namespace net; + +int main(int argc, char* argv[]) { + if (argc < 3) { + printf("Usage: ./client ip port\n"); + exit(0); + } + + std::string ip(argv[1]); + int port = atoi(argv[2]); + + std::unique_ptr cli(NewPbCli()); + + Status s = cli->Connect(ip, port); + if (!s.ok()) { + printf("Connect (%s:%d) failed, %s\n", ip.c_str(), port, s.ToString().c_str()); + } + for (int i = 0; i < 100000000; i++) { + Ping msg; + msg.set_ping("ping"); + + s = cli->Send((void*)&msg); + if (!s.ok()) { + printf("Send failed %s\n", s.ToString().c_str()); + break; + } + + Pong req; + s = cli->Recv((void*)&req); + if (!s.ok()) { + printf("Recv failed %s\n", s.ToString().c_str()); + break; + } + // printf ("Recv (%s)\n", req.pong().c_str()); + } + cli->Close(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/performance/message.proto b/tools/pika_migrate/src/net/examples/performance/message.proto new file mode 100644 index 0000000000..e8bb68daad --- /dev/null +++ b/tools/pika_migrate/src/net/examples/performance/message.proto @@ -0,0 +1,7 @@ +message Ping { + required string ping = 1; +} + +message Pong { + required string pong = 1; +} diff --git a/tools/pika_migrate/src/net/examples/performance/server.cc b/tools/pika_migrate/src/net/examples/performance/server.cc new file mode 100644 index 0000000000..ce70abddcc --- /dev/null +++ b/tools/pika_migrate/src/net/examples/performance/server.cc @@ -0,0 +1,103 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "message.pb.h" +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/include/pb_conn.h" +#include "net/include/server_thread.h" + +using namespace net; +using namespace std; + +uint64_t NowMicros() { + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +} + +static atomic num(0); + +class PingConn : public PbConn { + public: + PingConn(int fd, std::string ip_port, net::ServerThread* pself_thread = nullptr) : PbConn(fd, ip_port, pself_thread) {} + virtual ~PingConn() {} + + int DealMessage() { + num++; + request_.ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + + response_.Clear(); + response_.set_pong("hello " + request_.ping()); + // res_ = &response_; + + set_is_reply(true); + + return 0; + } + + private: + Ping request_; + Pong response_; + + PingConn(PingConn&); + PingConn& operator=(PingConn&); +}; + +class PingConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, + NetMultiplexer* net_mpx = nullptr) const override { + return std::make_shared(connfd, ip_port, dynamic_cast(thread)); + } +}; + +std::atomic should_stop(false); + +static void IntSigHandle(const int sig) { should_stop.store(true); } + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main(int argc, char* argv[]) { + if (argc < 2) { + printf("Usage: ./server ip port\n"); + exit(0); + } + + std::string ip(argv[1]); + int port = atoi(argv[2]); + + PingConnFactory conn_factory; + + SignalSetup(); + + std::unique_ptr st_thread(NewDispatchThread(ip, port, 24, &conn_factory, 1000)); + st_thread->StartThread(); + pstd::TimeType st, ed; + + while (!should_stop) { + st = NowMicros(); + int prv = num.load(); + sleep(1); + printf("num %d\n", num.load()); + ed = NowMicros(); + printf("mmap cost time microsecond(us) %lld\n", ed - st); + printf("average qps %lf\n", (double)(num.load() - prv) / ((double)(ed - st) / 1000000)); + } + st_thread->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/redis_cli_test.cc b/tools/pika_migrate/src/net/examples/redis_cli_test.cc new file mode 100644 index 0000000000..c2b40c33dd --- /dev/null +++ b/tools/pika_migrate/src/net/examples/redis_cli_test.cc @@ -0,0 +1,123 @@ +#include "net/include/redis_cli.h" +#include +#include +#include "net/include/net_cli.h" +#include "pstd/include/xdebug.h" + +using namespace net; + +int main(int argc, char* argv[]) { + if (argc < 3) { + printf("Usage: ./redis_cli ip port\n"); + exit(0); + } + + std::string ip(argv[1]); + int port = atoi(argv[2]); + + std::string str; + int i = 5; + + printf("\nTest Serialize\n"); + int ret = net::SerializeRedisCommand(&str, "HSET %s %d", "key", i); + printf(" 1. Serialize by va return %d, (%s)\n", ret, str.c_str()); + + RedisCmdArgsType vec; + vec.push_back("hset"); + vec.push_back("key"); + vec.push_back(std::to_string(5)); + + ret = net::SerializeRedisCommand(vec, &str); + printf(" 2. Serialize by vec return %d, (%s)\n", ret, str.c_str()); + + std::unique_ptr rcli(NewRedisCli()); + rcli->set_connect_timeout(3000); + + // redis v3.2+ protect mode will block other ip + // printf (" Connect with bind_ip(101.199.114.205)\n"); + // Status s = rcli->Connect(ip, port, "101.199.114.205"); + + Status s = rcli->Connect(ip, port, "101.199.114.205"); + // Test connect timeout with a non-routable IP + // Status s = rcli->Connect("10.255.255.1", 9824); + + printf(" RedisCli Connect(%s:%d) return %s\n", ip.c_str(), port, s.ToString().c_str()); + if (!s.ok()) { + printf("Connect failed, %s\n", s.ToString().c_str()); + exit(-1); + } + + ret = rcli->set_send_timeout(100); + printf("set send timeout 100 ms, return %d\n", ret); + + ret = rcli->set_recv_timeout(100); + printf("set recv timeout 100 ms, return %d\n", ret); + + /* + char ch; + scanf ("%c", &ch); + */ + + net::RedisCmdArgsType redis_argv; + printf("\nTest Send and Recv Ping\n"); + std::string ping = "*1\r\n$4\r\nping\r\n"; + for (int i = 0; i < 1; i++) { + s = rcli->Send(&ping); + printf("Send %d: %s\n", i, s.ToString().c_str()); + + s = rcli->Recv(&redis_argv); + printf("Recv %d: return %s\n", i, s.ToString().c_str()); + if (redis_argv.size() > 0) { + printf(" redis_argv[0] is (%s)\n", redis_argv[0].c_str()); + } + } + + printf("\nTest Send and Recv Mutli\n"); + net::SerializeRedisCommand(&str, "MSET a 1 b 2 c 3 d 4"); + printf("Send mset parse (%s)\n", str.c_str()); + s = rcli->Send(&str); + printf("Send mset return %s\n", s.ToString().c_str()); + + s = rcli->Recv(&redis_argv); + printf("Recv mset return %s with %lu elements\n", s.ToString().c_str(), redis_argv.size()); + for (size_t i = 0; i < redis_argv.size(); i++) { + printf(" redis_argv[%lu] = (%s)", i, redis_argv[i].c_str()); + } + + printf("\n\nTest Mget case 1: send 1 time, and recv 1 time\n"); + net::SerializeRedisCommand(&str, "MGET a b c d "); + printf("Send mget parse (%s)\n", str.c_str()); + + for (int si = 0; si < 2; si++) { + s = rcli->Send(&str); + printf("Send mget case 1: i=%d, return %s\n", si, s.ToString().c_str()); + + s = rcli->Recv(&redis_argv); + printf("Recv mget case 1: i=%d, return %s with %lu elements\n", si, s.ToString().c_str(), redis_argv.size()); + for (size_t i = 0; i < redis_argv.size(); i++) { + printf(" redis_argv[%lu] = (%s)\n", i, redis_argv[i].c_str()); + } + } + + printf("\nTest Mget case 2: send 2 times, then recv 2 times\n"); + net::SerializeRedisCommand(&str, "MGET a b c d "); + printf("\nSend mget parse (%s)\n", str.c_str()); + + for (int si = 0; si < 2; si++) { + s = rcli->Send(&str); + printf("Send mget case 2: i=%d, return %s\n", si, s.ToString().c_str()); + } + + for (int si = 0; si < 2; si++) { + s = rcli->Recv(&redis_argv); + printf("Recv mget case 1: i=%d, return %s with %lu elements\n", si, s.ToString().c_str(), redis_argv.size()); + for (size_t i = 0; i < redis_argv.size(); i++) { + printf(" redis_argv[%lu] = (%s)\n", i, redis_argv[i].c_str()); + } + } + + char ch; + scanf("%c", &ch); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/redis_parser_test.cc b/tools/pika_migrate/src/net/examples/redis_parser_test.cc new file mode 100644 index 0000000000..90bee28692 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/redis_parser_test.cc @@ -0,0 +1,108 @@ +#include +#include +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" +#include "pstd/include/xdebug.h" + +using namespace net; + +int main(int argc, char* argv[]) { + if (argc < 3) { + printf("Usage: ./redis_parser_test ip port\n"); + exit(0); + } + + std::string ip(argv[1]); + int port = atoi(argv[2]); + + std::unique_ptr rcli(NewRedisCli()); + rcli->set_connect_timeout(3000); + + Status s = rcli->Connect(ip, port, "127.0.0.1"); + printf(" RedisCli Connect(%s:%d) return %s\n", ip.c_str(), port, s.ToString().c_str()); + if (!s.ok()) { + printf("Connect failed, %s\n", s.ToString().c_str()); + exit(-1); + } + + net::RedisCmdArgsType redis_argv; + + std::string one_command = "*3\r\n$3\r\nSET\r\n$1\r\na\r\n$2\r\nab\r\n"; + + { + printf("\nTest Send One whole command\n"); + std::string one_command = "*3\r\n$3\r\nSET\r\n$1\r\na\r\n$2\r\nab\r\n"; + s = rcli->Send(&one_command); + printf("Send %s\n", s.ToString().c_str()); + + s = rcli->Recv(&redis_argv); + printf("Recv return %s\n", s.ToString().c_str()); + if (redis_argv.size() > 0) { + printf(" redis_argv[0] is (%s)\n", redis_argv[0].c_str()); + } + } + + { + printf("\nTest Send command into two times bulk itself break\n"); + std::string half_command = "*3\r\n$3\r\nSET\r\n$3\r\nabc\r\n$10\r\n12345"; + std::string another_half_command = "67890\r\n"; + std::string one_command_and_a_half = one_command + half_command; + s = rcli->Send(&one_command_and_a_half); + printf("Send %s\n", s.ToString().c_str()); + sleep(1); + s = rcli->Send(&another_half_command); + printf("Send %s\n", s.ToString().c_str()); + + s = rcli->Recv(&redis_argv); + printf("Recv return %s\n", s.ToString().c_str()); + if (redis_argv.size() > 0) { + printf(" redis_argv[0] is (%s)\n", redis_argv[0].c_str()); + } + } + + { + printf("\nTest Send command into two times bulk num break\n"); + std::string half_command = "*3\r\n$3\r\nSET\r\n$1"; + std::string another_half_command = "0\r\n0123456789\r\n$10\r\n1234567890\r\n"; + std::string one_command_and_a_half = one_command + half_command; + s = rcli->Send(&one_command_and_a_half); + printf("Send %s\n", s.ToString().c_str()); + sleep(1); + s = rcli->Send(&another_half_command); + printf("Send %s\n", s.ToString().c_str()); + + s = rcli->Recv(&redis_argv); + printf("Recv return %s\n", s.ToString().c_str()); + if (redis_argv.size() > 0) { + printf(" redis_argv[0] is (%s)\n", redis_argv[0].c_str()); + } + } + + { + printf("\nTest Send command byte by byte\n"); + std::string half_command = "*"; + std::string another_half_command = + "11\r\n$4\r\nMSET\r\n$10\r\n0123456789\r\n$10\r\n1234567890\r\n$1\r\na\r\n$1\r\na\r\n$1\r\na\r\n$1\r\na\r\n$" + "1\r\na\r\n$1\r\na\r\n$1\r\na\r\n$1\r\na\r\n"; + std::string one_command_and_a_half = one_command + half_command; + s = rcli->Send(&one_command_and_a_half); + printf("Send %s\n", s.ToString().c_str()); + for (size_t i = 0; i < another_half_command.size(); ++i) { + sleep(1); + std::string one_char_str(another_half_command, i, 1); + s = rcli->Send(&one_char_str); + printf("Send %d %s\n", i, s.ToString().c_str()); + } + + s = rcli->Recv(&redis_argv); + printf("Recv return %s\n", s.ToString().c_str()); + if (redis_argv.size() > 0) { + printf(" redis_argv[0] is (%s)\n", redis_argv[0].c_str()); + } + } + + char ch; + scanf("%c", &ch); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/simple_http_server.cc b/tools/pika_migrate/src/net/examples/simple_http_server.cc new file mode 100644 index 0000000000..73751c95e3 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/simple_http_server.cc @@ -0,0 +1,93 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "net/include/net_thread.h" +#include "net/include/server_thread.h" +#include "net/include/simple_http_conn.h" +#include "net_multiplexer.h" +#include "pstd/include/pstd_status.h" + +using namespace net; + +class MyHTTPConn : public net::SimpleHTTPConn { + public: + MyHTTPConn(const int fd, const std::string& ip_port, Thread* worker) : SimpleHTTPConn(fd, ip_port, worker) {} + virtual void DealMessage(const net::Request* req, net::Response* res) { + std::cout << "handle get" << std::endl; + std::cout << " + method: " << req->method << std::endl; + std::cout << " + path: " << req->path << std::endl; + std::cout << " + version: " << req->version << std::endl; + std::cout << " + content: " << req->content << std::endl; + std::cout << " + headers: " << std::endl; + for (auto& h : req->headers) { + std::cout << " + " << h.first << ":" << h.second << std::endl; + } + std::cout << " + query_params: " << std::endl; + for (auto& q : req->query_params) { + std::cout << " + " << q.first << ":" << q.second << std::endl; + } + std::cout << " + post_params: " << std::endl; + for (auto& q : req->post_params) { + std::cout << " + " << q.first << ":" << q.second << std::endl; + } + + res->SetStatusCode(200); + res->SetBody("china"); + } +}; + +class MyConnFactory : public ConnFactory { + public: + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, NetMultiplexer* net_epoll) const { + return std::make_shared(connfd, ip_port, thread); + } +}; + +static std::atomic running(false); + +static void IntSigHandle(const int sig) { + printf("Catch Signal %d, cleanup...\n", sig); + running.store(false); + printf("server Exit"); +} + +static void SignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +int main(int argc, char* argv[]) { + int port; + if (argc < 2) { + printf("Usage: ./simple_http_server port"); + } else { + port = atoi(argv[1]); + } + + SignalSetup(); + + std::unique_ptr my_conn_factory = std::make_unique(); + std::unique_ptr st(NewDispatchThread(port, 4, my_conn_factory.get(), 1000)); + + if (st->StartThread() != 0) { + printf("StartThread error happened!\n"); + exit(-1); + } + running.store(true); + while (running.load()) { + sleep(1); + } + st->StopThread(); + + return 0; +} diff --git a/tools/pika_migrate/src/net/examples/thread_pool_test.cc b/tools/pika_migrate/src/net/examples/thread_pool_test.cc new file mode 100644 index 0000000000..d220b7f695 --- /dev/null +++ b/tools/pika_migrate/src/net/examples/thread_pool_test.cc @@ -0,0 +1,97 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "unistd.h" + +#include +#include +#include +#include + +#include "net/include/thread_pool.h" +#include "pstd/include/pstd_mutex.h" + +using namespace std; + +uint64_t NowMicros() { + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +} + +static pstd::Mutex print_lock; + +void task(void* arg) { + std::unique_ptr int_arg(static_cast(arg)); + { + std::lock_guard l(print_lock); + std::cout << " task : " << *int_arg << " time(micros) " << NowMicros() << " thread id: " << pthread_self() + << std::endl; + } + sleep(1); +} + +int main() { + // 10 threads + net::ThreadPool t(10, 1000), t2(10, 5); + t.start_thread_pool(); + t2.start_thread_pool(); + size_t qsize = 0, pqsize = 0; + + std::cout << "Test Normal Task... " << std::endl; + for (int i = 0; i < 10; i++) { + int* pi = new int(i); + t.Schedule(task, (void*)pi); + t.cur_queue_size(&qsize); + t.cur_time_queue_size(&pqsize); + std::lock_guard l(print_lock); + std::cout << " current queue size:" << qsize << ", " << pqsize << std::endl; + } + + while (qsize > 0) { + t.cur_queue_size(&qsize); + sleep(1); + } + + std::cout << std::endl << std::endl << std::endl; + + qsize = pqsize = 0; + std::cout << "Test Time Task" << std::endl; + t.stop_thread_pool(); + t.start_thread_pool(); + for (int i = 0; i < 10; i++) { + int* pi = new int(i); + t.DelaySchedule(i * 1000, task, (void*)pi); + t.cur_queue_size(&qsize); + t.cur_time_queue_size(&pqsize); + std::lock_guard l(print_lock); + std::cout << "Schedule task " << i << " time(micros) " << NowMicros() << " for " << i * 1000 * 1000 << " micros " + << std::endl; + } + while (pqsize > 0) { + t.cur_time_queue_size(&pqsize); + sleep(1); + } + std::cout << std::endl << std::endl; + + qsize = pqsize = 0; + t.stop_thread_pool(); + t.start_thread_pool(); + std::cout << "Test Drop Task... " << std::endl; + for (int i = 0; i < 10; i++) { + int* pi = new int(i); + t.DelaySchedule(i * 1000, task, (void*)pi); + t.cur_queue_size(&qsize); + t.cur_time_queue_size(&pqsize); + std::lock_guard l(print_lock); + std::cout << " current queue size:" << qsize << ", " << pqsize << std::endl; + } + sleep(3); + std::cout << "QueueClear..." << std::endl; + t.stop_thread_pool(); + sleep(10); + + return 0; +} diff --git a/tools/pika_migrate/src/net/include/backend_thread.h b/tools/pika_migrate/src/net/include/backend_thread.h new file mode 100644 index 0000000000..b374ec86c6 --- /dev/null +++ b/tools/pika_migrate/src/net/include/backend_thread.h @@ -0,0 +1,161 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_BACKEND_THREAD_H_ +#define NET_INCLUDE_BACKEND_THREAD_H_ + +#include +#include +#include +#include +#include + +#include "net/include/net_thread.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" + +// remove 'unused parameter' warning +#define UNUSED(expr) \ + do { \ + (void)(expr); \ + } while (0) + +#define kConnWriteBuf (1024 * 1024 * 100) // cache 100 MB data per connection + +namespace net { + +struct NetFiredEvent; +class ConnFactory; +class NetConn; + +/* + * BackendHandle will be invoked at appropriate occasion + * in client thread's main loop. + */ +class BackendHandle { + public: + BackendHandle() = default; + virtual ~BackendHandle() = default; + + /* + * CronHandle() will be invoked on every cron_interval elapsed. + */ + virtual void CronHandle() const {} + + /* + * FdTimeoutHandle(...) will be invoked after connection timeout. + */ + virtual void FdTimeoutHandle(int fd, const std::string& ip_port) const { + UNUSED(fd); + UNUSED(ip_port); + } + + /* + * FdClosedHandle(...) will be invoked before connection closed. + */ + virtual void FdClosedHandle(int fd, const std::string& ip_port) const { + UNUSED(fd); + UNUSED(ip_port); + } + + /* + * AccessHandle(...) will be invoked after Write invoked + * but before handled. + */ + virtual bool AccessHandle(std::string& ip) const { + UNUSED(ip); + return true; + } + + /* + * CreateWorkerSpecificData(...) will be invoked in StartThread() routine. + * 'data' pointer should be assigned. + */ + virtual int CreateWorkerSpecificData(void** data) const { + UNUSED(data); + return 0; + } + + /* + * DeleteWorkerSpecificData(...) is related to CreateWorkerSpecificData(...), + * it will be invoked in StopThread(...) routine, + * resources assigned in CreateWorkerSpecificData(...) should be deleted in + * this handle + */ + virtual int DeleteWorkerSpecificData(void* data) const { + UNUSED(data); + return 0; + } + + /* + * DestConnectFailedHandle(...) will run the invoker's logic when socket connect failed + */ + virtual void DestConnectFailedHandle(const std::string& ip_port, const std::string& reason) const { + UNUSED(ip_port); + UNUSED(reason); + } +}; + +class BackendThread : public Thread { + public: + BackendThread(ConnFactory* conn_factory, int cron_interval, int keepalive_timeout, BackendHandle* handle, + void* private_data); + ~BackendThread() override; + /* + * StartThread will return the error code as pthread_create return + * Return 0 if success + */ + int StartThread() override; + int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + pstd::Status Write(int fd, const std::string& msg); + pstd::Status Close(int fd); + // Try to connect fd noblock, if return EINPROGRESS or EAGAIN or EWOULDBLOCK + // put this fd in epoll (SetWaitConnectOnEpoll), process in ProcessConnectStatus + pstd::Status Connect(const std::string& dst_ip, int dst_port, int* fd); + std::shared_ptr GetConn(int fd); + + private: + void* ThreadMain() override; + + void InternalDebugPrint(); + // Set connect fd into epoll + // connect condition: no EPOLLERR EPOLLHUP events, no error in socket opt + pstd::Status ProcessConnectStatus(NetFiredEvent* pfe, int* should_close); + void SetWaitConnectOnEpoll(int sockfd); + + void AddConnection(const std::string& peer_ip, int peer_port, int sockfd); + void CloseFd(const std::shared_ptr& conn); + void CloseFd(int fd); + void CleanUpConnRemaining(int fd); + void DoCronTask(); + void NotifyWrite(std::string& ip_port); + void NotifyWrite(int fd); + void NotifyClose(int fd); + void ProcessNotifyEvents(const NetFiredEvent* pfe); + + int keepalive_timeout_; + int cron_interval_; + BackendHandle* handle_; + bool own_handle_{false}; + void* private_data_; + + /* + * The Epoll event handler + */ + std::unique_ptr net_multiplexer_; + + ConnFactory* conn_factory_; + + pstd::Mutex mu_; + std::map> to_send_; // ip+":"+port, to_send_msg + + std::map> conns_; + std::set connecting_fds_; +}; + +} // namespace net +#endif // NET_INCLUDE_CLIENT_THREAD_H_ diff --git a/tools/pika_migrate/src/net/include/bg_thread.h b/tools/pika_migrate/src/net/include/bg_thread.h new file mode 100644 index 0000000000..b9c5259273 --- /dev/null +++ b/tools/pika_migrate/src/net/include/bg_thread.h @@ -0,0 +1,81 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_BG_THREAD_H_ +#define NET_INCLUDE_BG_THREAD_H_ + +#include +#include +#include +#include "net/include/net_thread.h" + +#include "pstd/include/pstd_mutex.h" + +namespace net { + +struct TimerItem { + uint64_t exec_time; + void (*function)(void*); + void* arg; + TimerItem(uint64_t _exec_time, void (*_function)(void*), void* _arg) + : exec_time(_exec_time), function(_function), arg(_arg) {} + bool operator<(const TimerItem& item) const { return exec_time > item.exec_time; } +}; + +class BGThread final : public Thread { + public: + explicit BGThread(int full = 100000) : full_(full) {} + + ~BGThread() override { + // call virtual in destructor, BGThread must be final + StopThread(); + } + + int StopThread() override { + should_stop_ = true; + rsignal_.notify_one(); + wsignal_.notify_one(); + return Thread::StopThread(); + } + + void Schedule(void (*function)(void*), void* arg); + void Schedule(void (*function)(void*), void* arg, std::function& call_back); + /* + * timeout is in millionsecond + */ + void DelaySchedule(uint64_t timeout, void (*function)(void*), void* arg); + + void QueueSize(int* pri_size, int* qu_size); + void QueueClear(); + void SwallowReadyTasks(); + + private: + class BGItem { + public: + void (*function)(void*); + void* arg; + //dtor_call_back is an optional call back fun + std::function dtor_call_back; + BGItem(void (*_function)(void*), void* _arg) : function(_function), arg(_arg) {} + BGItem(void (*_function)(void*), void* _arg, std::function& _dtor_call_back) : function(_function), arg(_arg), dtor_call_back(_dtor_call_back) {} + ~BGItem() { + if (dtor_call_back) { + dtor_call_back(); + } + } + }; + + std::queue> queue_; + std::priority_queue timer_queue_; + + size_t full_; + pstd::Mutex mu_; + pstd::CondVar rsignal_; + pstd::CondVar wsignal_; + void* ThreadMain() override; +}; + +} // namespace net +#endif // NET_INCLUDE_BG_THREAD_H_ diff --git a/tools/pika_migrate/src/net/include/build_version.h b/tools/pika_migrate/src/net/include/build_version.h new file mode 100644 index 0000000000..f3726d8e7b --- /dev/null +++ b/tools/pika_migrate/src/net/include/build_version.h @@ -0,0 +1,20 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#ifndef NET_INCLUDE_BUILD_VERSION_H_ +#define NET_INCLUDE_BUILD_VERSION_H_ + +// this variable tells us about the git revision +extern const char* net_build_git_sha; + +// Date on which the code was compiled: +extern const char* net_build_compile_date; + +#endif // NET_INCLUDE_BUILD_VERSION_H_ diff --git a/tools/pika_migrate/src/net/include/client_thread.h b/tools/pika_migrate/src/net/include/client_thread.h new file mode 100644 index 0000000000..c57174724d --- /dev/null +++ b/tools/pika_migrate/src/net/include/client_thread.h @@ -0,0 +1,162 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_CLIENT_THREAD_H_ +#define NET_INCLUDE_CLIENT_THREAD_H_ + +#include +#include +#include +#include +#include + +#include "net/include/net_thread.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" + +// remove 'unused parameter' warning +#define UNUSED(expr) \ + do { \ + (void)(expr); \ + } while (0) + +#define kConnWriteBuf (1024 * 1024 * 100) // cache 100 MB data per connection + +namespace net { + +struct NetFiredEvent; +class ConnFactory; +class NetConn; + +/* + * ClientHandle will be invoked at appropriate occasion + * in client thread's main loop. + */ +class ClientHandle { + public: + ClientHandle() = default; + virtual ~ClientHandle() = default; + + /* + * CronHandle() will be invoked on every cron_interval elapsed. + */ + virtual void CronHandle() const {} + + /* + * FdTimeoutHandle(...) will be invoked after connection timeout. + */ + virtual void FdTimeoutHandle(int fd, const std::string& ip_port) const { + UNUSED(fd); + UNUSED(ip_port); + } + + /* + * FdClosedHandle(...) will be invoked before connection closed. + */ + virtual void FdClosedHandle(int fd, const std::string& ip_port) const { + UNUSED(fd); + UNUSED(ip_port); + } + + /* + * AccessHandle(...) will be invoked after Write invoked + * but before handled. + */ + virtual bool AccessHandle(std::string& ip) const { + UNUSED(ip); + return true; + } + + /* + * CreateWorkerSpecificData(...) will be invoked in StartThread() routine. + * 'data' pointer should be assigned. + */ + virtual int CreateWorkerSpecificData(void** data) const { + UNUSED(data); + return 0; + } + + /* + * DeleteWorkerSpecificData(...) is related to CreateWorkerSpecificData(...), + * it will be invoked in StopThread(...) routine, + * resources assigned in CreateWorkerSpecificData(...) should be deleted in + * this handle + */ + virtual int DeleteWorkerSpecificData(void* data) const { + UNUSED(data); + return 0; + } + + /* + * DestConnectFailedHandle(...) will run the invoker's logic when socket connect failed + */ + virtual void DestConnectFailedHandle(const std::string& ip_port, const std::string& reason) const { + UNUSED(ip_port); + UNUSED(reason); + } +}; + +class ClientThread : public Thread { + public: + ClientThread(ConnFactory* conn_factory, int cron_interval, int keepalive_timeout, ClientHandle* handle, + void* private_data); + ~ClientThread() override; + /* + * StartThread will return the error code as pthread_create return + * Return 0 if success + */ + int StartThread() override; + int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + pstd::Status Write(const std::string& ip, int port, const std::string& msg); + pstd::Status Close(const std::string& ip, int port); + + private: + void* ThreadMain() override; + + void InternalDebugPrint(); + // Set connect fd into epoll + // connect condition: no EPOLLERR EPOLLHUP events, no error in socket opt + pstd::Status ProcessConnectStatus(NetFiredEvent* pfe, int* should_close); + void SetWaitConnectOnEpoll(int sockfd); + + void NewConnection(const std::string& peer_ip, int peer_port, int sockfd); + // Try to connect fd noblock, if return EINPROGRESS or EAGAIN or EWOULDBLOCK + // put this fd in epoll (SetWaitConnectOnEpoll), process in ProcessConnectStatus + pstd::Status ScheduleConnect(const std::string& dst_ip, int dst_port); + void CloseFd(const std::shared_ptr& conn); + void CloseFd(int fd, const std::string& ip_port); + void CleanUpConnRemaining(const std::string& ip_port); + void DoCronTask(); + void NotifyWrite(const std::string& ip_port); + void ProcessNotifyEvents(const NetFiredEvent* pfe); + + int keepalive_timeout_; + int cron_interval_; + ClientHandle* handle_; + bool own_handle_{false}; + void* private_data_; + + /* + * The event handler + */ + std::unique_ptr net_multiplexer_; + + ConnFactory* conn_factory_; + + pstd::Mutex mu_; + std::map> to_send_; // ip+":"+port, to_send_msg + + std::map> fd_conns_; + std::map> ipport_conns_; + std::set connecting_fds_; + + pstd::Mutex to_del_mu_; + std::vector to_del_; +}; + +} // namespace net +#endif // NET_INCLUDE_CLIENT_THREAD_H_ diff --git a/tools/pika_migrate/src/net/include/http_conn.h b/tools/pika_migrate/src/net/include/http_conn.h new file mode 100644 index 0000000000..5996bdeee2 --- /dev/null +++ b/tools/pika_migrate/src/net/include/http_conn.h @@ -0,0 +1,204 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_HTTP_CONN_H_ +#define NET_INCLUDE_HTTP_CONN_H_ +#include +#include +#include +#include + +#include "pstd/include/pstd_status.h" +#include "pstd/include/xdebug.h" +#include "pstd/include/noncopyable.h" + +#include "net/include/net_conn.h" +#include "net/include/net_define.h" +#include "net/src/net_util.h" + +namespace net { + +class HTTPConn; + +class HTTPRequest { + public: + std::string url() const; + std::string path() const; + std::string query_value(const std::string& field) const; + std::map query_params() const; + std::map postform_params() const; + std::map headers() const; + std::string postform_value(const std::string& field) const; + std::string method() const; + std::string content_type() const; + + std::string client_ip_port() const; + + void Reset(); + void Dump() const; + + private: + friend class HTTPConn; + explicit HTTPRequest(HTTPConn* conn); + ~HTTPRequest(); + + HTTPConn* conn_; + + std::string method_; + std::string url_; + std::string path_; + std::string version_; + std::string content_type_; + bool reply_100continue_{false}; + std::map postform_params_; + std::map query_params_; + std::map headers_; + + std::string client_ip_port_; + + enum RequestParserStatus { + kHeaderMethod, + kHeaderPath, + kHeaderVersion, + kHeaderParamKey, + kHeaderParamValue, + }; + + enum RequestStatus { + kNewRequest, + kHeaderReceiving, + kBodyReceiving, + kBodyReceived, + }; + + RequestStatus req_status_{kNewRequest}; + RequestParserStatus parse_status_{kHeaderMethod}; + + char* rbuf_; + uint64_t rbuf_pos_{0}; + uint64_t remain_recv_len_{0}; + + ReadStatus ReadData(); + int ParseHeader(); + + ReadStatus DoRead(); + bool ParseHeadFromArray(const char* data, int size); + bool ParseGetUrl(); + bool ParseHeadLine(const char* data, int line_start, int line_end); + bool ParseParameters(std::string& data, size_t line_start = 0); +}; + +class HTTPResponse { + public: + void SetStatusCode(int code); + void SetHeaders(const std::string& key, const std::string& value); + void SetHeaders(const std::string& key, size_t value); + void SetContentLength(uint64_t size); + + void Reset(); + bool Finished(); + + private: + friend class HTTPConn; + HTTPConn* conn_; + + explicit HTTPResponse(HTTPConn* conn); + ~HTTPResponse(); + + enum ResponseStatus { + kPrepareHeader, + kSendingHeader, + kSendingBody, + }; + + ResponseStatus resp_status_{kPrepareHeader}; + + char* wbuf_; + int64_t buf_len_{0}; + int64_t wbuf_pos_{0}; + + uint64_t remain_send_len_{0}; + bool finished_{true}; + + int status_code_{200}; + std::map headers_; + + bool Flush(); + bool SerializeHeader(); +}; + +class HTTPHandles : public pstd::noncopyable { + public: + // You need implement these handles. + /* + * We have parsed HTTP request for now, + * then HandleRequest(req, resp) will be called. + * Return true if reply needed, and then handle response header and body + * by functions below, otherwise false. + */ + virtual bool HandleRequest(const HTTPRequest* req) = 0; + /* + * ReadBodyData(...) will be called if there are data follow up, + * We deliver data just once. + */ + virtual void HandleBodyData(const char* data, size_t data_size) = 0; + + /* + * Fill response headers in this handle when body received. + * You MUST set Content-Length by means of calling resp->SetContentLength(num). + * Besides, resp->SetStatusCode(code) should be called either. + */ + virtual void PrepareResponse(HTTPResponse* resp) = 0; + /* + * Fill write buffer 'buf' in this handle, and should not exceed 'max_size'. + * Return actual size filled. + * Return -2 if has written all + * Return Other as Error and close connection + */ + virtual int WriteResponseBody(char* buf, size_t max_size) = 0; + + // Close handle + virtual void HandleConnClosed() {} + + HTTPHandles() = default; + virtual ~HTTPHandles() = default; + + protected: + /* + * Assigned in ServerHandle's CreateWorkerSpecificData + * Used for handles above + */ + void* worker_specific_data_; + + private: + friend class HTTPConn; +}; + +class HTTPConn : public NetConn { + public: + HTTPConn(int fd, const std::string& ip_port, Thread* sthread, std::shared_ptr handles_, + void* worker_specific_data); + ~HTTPConn() override; + + ReadStatus GetRequest() override; + WriteStatus SendReply() override; + + private: + friend class HTTPRequest; + friend class HTTPResponse; + + HTTPRequest* request_; + HTTPResponse* response_; + +#ifdef __ENABLE_SSL + bool security_; +#endif + + std::shared_ptr handles_; +}; + +} // namespace net + +#endif // NET_INCLUDE_HTTP_CONN_H_ diff --git a/tools/pika_migrate/src/net/include/net_cli.h b/tools/pika_migrate/src/net/include/net_cli.h new file mode 100644 index 0000000000..dd5aab198c --- /dev/null +++ b/tools/pika_migrate/src/net/include/net_cli.h @@ -0,0 +1,62 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_NET_CLI_H_ +#define NET_INCLUDE_NET_CLI_H_ + +#include +#include + +#include "pstd/include/pstd_status.h" +#include "pstd/include/noncopyable.h" + +namespace net { + +class NetCli : public pstd::noncopyable { + public: + explicit NetCli(const std::string& ip = "", int port = 0); + virtual ~NetCli(); + + pstd::Status Connect(const std::string& bind_ip = ""); + pstd::Status Connect(const std::string& peer_ip, int peer_port, const std::string& bind_ip = ""); + // Check whether the connection got fin from peer or not + virtual int CheckAliveness(); + // Compress and write the message + virtual pstd::Status Send(void* msg) = 0; + + // Read, parse and store the reply + virtual pstd::Status Recv(void* result = nullptr) = 0; + + void Close(); + + // TODO(baotiao): delete after redis_cli use RecvRaw + int fd() const; + + bool Available() const; + + struct timeval last_interaction_; + + // default connect timeout is 1000ms + int set_send_timeout(int send_timeout); + int set_recv_timeout(int recv_timeout); + void set_connect_timeout(int connect_timeout); + + protected: + pstd::Status SendRaw(void* buf, size_t count); + pstd::Status RecvRaw(void* buf, size_t* count); + + private: + struct Rep; + std::unique_ptr rep_; + int set_tcp_nodelay(); + +}; + +extern NetCli* NewPbCli(const std::string& peer_ip = "", int peer_port = 0); + +extern NetCli* NewRedisCli(); + +} // namespace net +#endif // NET_INCLUDE_NET_CLI_H_ diff --git a/tools/pika_migrate/src/net/include/net_conn.h b/tools/pika_migrate/src/net/include/net_conn.h new file mode 100644 index 0000000000..fab23f71b2 --- /dev/null +++ b/tools/pika_migrate/src/net/include/net_conn.h @@ -0,0 +1,132 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_NET_CONN_H_ +#define NET_INCLUDE_NET_CONN_H_ + +#include +#include +#include + +#ifdef __ENABLE_SSL +# include +# include +#endif + +#include "net/include/net_define.h" +#include "net/include/server_thread.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/testutil.h" +#include "pstd/include/noncopyable.h" + +namespace net { + +class Thread; + +class NetConn : public std::enable_shared_from_this, public pstd::noncopyable { + public: + NetConn(int fd, std::string ip_port, Thread* thread, NetMultiplexer* mpx = nullptr); +#ifdef __ENABLE_SSL + virtual ~NetConn(); +#else + virtual ~NetConn() = default; +#endif + + /* + * Set the fd to nonblock && set the flag_ the fd flag + */ + bool SetNonblock(); + +#ifdef __ENABLE_SSL + bool CreateSSL(SSL_CTX* ssl_ctx); +#endif + + virtual ReadStatus GetRequest() = 0; + virtual WriteStatus SendReply() = 0; + virtual int WriteResp(const std::string& resp) { return 0; } + + virtual void TryResizeBuffer() {} + + int flags() const { return flags_; } + + void set_fd(const int fd) { fd_ = fd; } + + int fd() const { return fd_; } + + std::string ip_port() const { return ip_port_; } + + bool is_ready_to_reply() { return is_writable() && is_reply(); } + + virtual void set_is_writable(const bool is_writable) { is_writable_ = is_writable; } + + virtual bool is_writable() { return is_writable_; } + + virtual void set_is_reply(const bool is_reply) { is_reply_ = is_reply; } + + virtual bool is_reply() { return is_reply_; } + + std::string name() { return name_; } + void set_name(std::string name) { name_ = std::move(name); } + + bool IsClose() { return close_; } + void SetClose(bool close); + + void set_last_interaction(const struct timeval& now) { last_interaction_ = now; } + + struct timeval last_interaction() const { return last_interaction_; } + + Thread* thread() const { return thread_; } + + void set_net_multiplexer(NetMultiplexer* ep) { net_multiplexer_ = ep; } + + NetMultiplexer* net_multiplexer() const { return net_multiplexer_; } + + std::string String() const { + std::stringstream ss; + ss << "fd: " << fd_ << ", ip_port: " << ip_port_ << ", name: " << name_ << ", is_reply: " << is_reply_ << ", close: " << close_; + return ss.str(); + } + +#ifdef __ENABLE_SSL + SSL* ssl() { return ssl_; } + + bool security() { return ssl_ != nullptr; } +#endif + + private: + int fd_ = -1; + std::string ip_port_; + bool is_reply_ = false; + bool is_writable_ = false; + bool close_ = false; + struct timeval last_interaction_; + int flags_ = 0; + std::string name_; + +#ifdef __ENABLE_SSL + SSL* ssl_; +#endif + + // thread this conn belong to + Thread* thread_ = nullptr; + // the net epoll this conn belong to + NetMultiplexer* net_multiplexer_ = nullptr; + +}; + +/* + * for every conn, we need create a corresponding ConnFactory + */ +class ConnFactory { + public: + virtual ~ConnFactory() = default; + virtual std::shared_ptr NewNetConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_private_data, /* Has set in ThreadEnvHandle */ + NetMultiplexer* net_mpx = nullptr) const = 0; +}; + +} // namespace net + +#endif // NET_INCLUDE_NET_CONN_H_ diff --git a/tools/pika_migrate/src/net/include/net_define.h b/tools/pika_migrate/src/net/include/net_define.h new file mode 100644 index 0000000000..4ec16cc4e3 --- /dev/null +++ b/tools/pika_migrate/src/net/include/net_define.h @@ -0,0 +1,120 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_NET_DEFINE_H_ +#define NET_INCLUDE_NET_DEFINE_H_ + +#include +#include +#include + +namespace net { + +#define NET_MAX_CLIENTS 10240 +#define NET_MAX_MESSAGE 1024 +#define NET_NAME_LEN 1024 + +const int kProtoMaxMessage = 512 * 1024 * 1024; // 512MB +#define PB_IOBUF_LEN 67108864 // 64MB +/* + * The pb head and code length + */ +#define COMMAND_HEADER_LENGTH 4 +#define COMMAND_CODE_LENGTH 4 + +const int kCommandHeaderLength = 4; + +/* + * The socket block type + */ +enum BlockType { + kBlock = 0, + kNonBlock = 1, +}; + +enum NotifyType { + kNotiConnect = 0, + kNotiClose = 1, + kNotiEpollout = 2, + kNotiEpollin = 3, + kNotiEpolloutAndEpollin = 4, + kNotiWrite = 5, + kNotiWait = 6, +}; + +enum EventStatus { + kNone = 0, + kReadable = 0x1, + kWritable = 0x1 << 1, + kErrorEvent = 0x1 << 2, +}; + +enum ConnStatus { + kHeader = 0, + kPacket = 1, + kComplete = 2, + kBuildObuf = 3, + kWriteObuf = 4, +}; + +enum ReadStatus { + kReadHalf = 0, + kReadAll = 1, + kReadError = 2, + kReadClose = 3, + kFullError = 4, + kParseError = 5, + kDealError = 6, + kOk = 7, +}; + +enum WriteStatus { + kWriteHalf = 0, + kWriteAll = 1, + kWriteError = 2, +}; + +enum RetCode { + kSuccess = 0, + kBindError = 1, + kCreateThreadError = 2, + kListenError = 3, + kSetSockOptError = 4, + kCreateThreadPoolError = 5, +}; + +/* + * define the redis protocol + */ +#define REDIS_MAX_MESSAGE (1 << 28) // 256MB +#define REDIS_MBULK_BIG_ARG (1024 * 32) // 32KB +#define DEFAULT_WBUF_SIZE 262144 // 256KB +#define REDIS_INLINE_MAXLEN (1024 * 64) // 64KB +#define REDIS_IOBUF_LEN 16384 // 16KB +#define REDIS_REQ_INLINE 1 +#define REDIS_REQ_MULTIBULK 2 + +/* + * define the net cron interval (ms) + */ +#define NET_CRON_INTERVAL 1000 + +/* + * define the macro in NET_conf + */ + +#define NET_WORD_SIZE 1024 +#define NET_LINE_SIZE 1024 +#define NET_CONF_MAX_NUM 1024 + +/* + * define common character + */ +#define SPACE ' ' +#define COLON ':' +#define SHARP '#' + +} // namespace net +#endif // NET_INCLUDE_NET_DEFINE_H_ diff --git a/tools/pika_migrate/src/net/include/net_interfaces.h b/tools/pika_migrate/src/net/include/net_interfaces.h new file mode 100644 index 0000000000..e7c01715d6 --- /dev/null +++ b/tools/pika_migrate/src/net/include/net_interfaces.h @@ -0,0 +1,14 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_NET_INTERFACES_H_ +#define NET_INCLUDE_NET_INTERFACES_H_ + +#include + +std::string GetDefaultInterface(); +std::string GetIpByInterface(const std::string& network_interface); + +#endif diff --git a/tools/pika_migrate/src/net/include/net_pubsub.h b/tools/pika_migrate/src/net/include/net_pubsub.h new file mode 100644 index 0000000000..51b76268aa --- /dev/null +++ b/tools/pika_migrate/src/net/include/net_pubsub.h @@ -0,0 +1,129 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_PUBSUB_H_ +#define NET_INCLUDE_PUBSUB_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +#include "net/include/net_define.h" +#include "net/include/net_thread.h" +#include "net/src/net_multiplexer.h" + +namespace net { + +class NetFiredEvent; +class NetConn; + +class PubSubThread : public Thread { + public: + PubSubThread(); + + ~PubSubThread() override; + + // PubSub + + int Publish(const std::string& channel, const std::string& msg); + + void Subscribe(const std::shared_ptr& conn, const std::vector& channels, bool pattern, + std::vector>* result); + + int UnSubscribe(const std::shared_ptr& conn, const std::vector& channels, bool pattern, + std::vector>* result); + + void PubSubChannels(const std::string& pattern, std::vector* result); + + void PubSubNumSub(const std::vector& channels, std::vector>* result); + + int PubSubNumPat(); + + // Move out from pubsub thread + void MoveConnOut(const std::shared_ptr& conn); + // Move into pubsub thread + void MoveConnIn(const std::shared_ptr& conn, const NotifyType& notify_type); + + void ConnCanSubscribe(const std::vector& allChannel, + const std::function&)>& func); + + enum ReadyState { + kNotReady, + kReady, + }; + + struct ConnHandle { + ConnHandle(std::shared_ptr pc, ReadyState state = kNotReady) : conn(std::move(pc)), ready_state(state) {} + void UpdateReadyState(const ReadyState& state); + bool IsReady(); + std::shared_ptr conn; + ReadyState ready_state; + }; + + void UpdateConnReadyState(int fd, const ReadyState& state); + + bool IsReady(int fd); + int ClientPubSubChannelSize(const std::shared_ptr& conn); + int ClientPubSubChannelPatternSize(const std::shared_ptr& conn); + void NotifyCloseAllConns(); + + private: + void RemoveConn(const std::shared_ptr& conn); + void CloseConn(const std::shared_ptr& conn); + void CloseAllConns(); + int ClientChannelSize(const std::shared_ptr& conn); + + int msg_pfd_[2]; + bool should_exit_; + + mutable pstd::RWMutex rwlock_; /* For external statistics */ + std::map> conns_; + std::atomic close_all_conn_sig_{false}; + + pstd::Mutex pub_mutex_; + pstd::CondVar receiver_rsignal_; + pstd::Mutex receiver_mutex_; + + /* + * receive fd from worker thread + */ + pstd::Mutex mutex_; + std::queue queue_; + + std::string channel_; + std::string message_; + int receivers_{-1}; + + /* + * The epoll handler + */ + std::unique_ptr net_multiplexer_; + + void* ThreadMain() override; + + // clean conns + void Cleanup(); + + // PubSub + pstd::Mutex channel_mutex_; + pstd::Mutex pattern_mutex_; + + std::map>> pubsub_channel_; // channel <---> conns + std::map>> pubsub_pattern_; // channel <---> conns + +}; // class PubSubThread + +} // namespace net +#endif // THIRD_NET_NET_INCLUDE_NET_PUBSUB_H_ diff --git a/tools/pika_migrate/src/net/include/net_stats.h b/tools/pika_migrate/src/net/include/net_stats.h new file mode 100644 index 0000000000..c93142ff2a --- /dev/null +++ b/tools/pika_migrate/src/net/include/net_stats.h @@ -0,0 +1,36 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#ifndef NET_INCLUDE_REDIS_STSTS_H_ +#define NET_INCLUDE_REDIS_STSTS_H_ + +#include + +namespace net { + +class NetworkStatistic { + public: + NetworkStatistic() = default; + ~NetworkStatistic() = default; + + size_t NetInputBytes(); + size_t NetOutputBytes(); + size_t NetReplInputBytes(); + size_t NetReplOutputBytes(); + void IncrRedisInputBytes(uint64_t bytes); + void IncrRedisOutputBytes(uint64_t bytes); + void IncrReplInputBytes(uint64_t bytes); + void IncrReplOutputBytes(uint64_t bytes); + + private: + std::atomic stat_net_input_bytes {0}; /* Bytes read from network. */ + std::atomic stat_net_output_bytes {0}; /* Bytes written to network. */ + std::atomic stat_net_repl_input_bytes {0}; /* Bytes read during replication, added to stat_net_input_bytes in 'info'. */ + std::atomic stat_net_repl_output_bytes {0}; /* Bytes written during replication, added to stat_net_output_bytes in 'info'. */ +}; + +} + +#endif // NET_INCLUDE_REDIS_STSTS_H_ diff --git a/tools/pika_migrate/src/net/include/net_thread.h b/tools/pika_migrate/src/net/include/net_thread.h new file mode 100644 index 0000000000..ff96811e91 --- /dev/null +++ b/tools/pika_migrate/src/net/include/net_thread.h @@ -0,0 +1,57 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_NET_THREAD_H_ +#define NET_INCLUDE_NET_THREAD_H_ + +#include +#include +#include + +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/noncopyable.h" + +namespace net { + +class Thread : public pstd::noncopyable { + public: + Thread(); + virtual ~Thread(); + + virtual int StartThread(); + virtual int StopThread(); + int JoinThread(); + + bool should_stop() { return should_stop_.load(); } + + void set_should_stop() { should_stop_.store(true); } + + bool is_running() { return running_.load(); } + + pthread_t thread_id() const { return thread_id_; } + + std::string thread_name() const { return thread_name_; } + + virtual void set_thread_name(const std::string& name) { thread_name_ = name; } + + protected: + std::atomic_bool should_stop_; + void set_is_running(bool is_running) { + std::lock_guard l(running_mu_); + running_ = is_running; + } + + private: + static void* RunThread(void* arg); + virtual void* ThreadMain() = 0; + + pstd::Mutex running_mu_; + std::atomic_bool running_ = false; + pthread_t thread_id_{}; + std::string thread_name_; +}; + +} // namespace net +#endif // NET_INCLUDE_NET_THREAD_H_ diff --git a/tools/pika_migrate/src/net/include/pb_conn.h b/tools/pika_migrate/src/net/include/pb_conn.h new file mode 100644 index 0000000000..48459ba257 --- /dev/null +++ b/tools/pika_migrate/src/net/include/pb_conn.h @@ -0,0 +1,90 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_PB_CONN_H_ +#define NET_INCLUDE_PB_CONN_H_ + +#include +#include +#include + +#include "google/protobuf/message.h" +#include "net/include/net_conn.h" +#include "net/include/net_define.h" +#include "pstd/include/pstd_status.h" + +namespace net { + +using pstd::Status; + +class PbConn : public NetConn { + public: + struct WriteBuf { + WriteBuf(const size_t item_pos = 0) : item_pos_(item_pos) {} + std::queue queue_; + size_t item_pos_; + }; + PbConn(int fd, const std::string& ip_port, Thread* thread, NetMultiplexer* net_mpx = nullptr); + ~PbConn() override; + + ReadStatus GetRequest() override; + WriteStatus SendReply() override; + void TryResizeBuffer() override; + int WriteResp(const std::string& resp) override; + void NotifyWrite(); + void NotifyClose(); + void set_is_reply(bool reply) override; + bool is_reply() override; + /* + * The Variable need by read the buf, + * We allocate the memory when we start the server + */ + uint32_t header_len_{static_cast(-1)}; + char* rbuf_; + uint32_t cur_pos_{0}; + uint32_t rbuf_len_{0}; + int32_t remain_packet_len_{0}; + + ConnStatus connStatus_{kHeader}; + + protected: + // NOTE: if this function return non 0, the the server will close this connection + // + // In the implementation of DealMessage, we should distinguish two types of error + // + // 1. protocol parsing error + // 2. service logic error + // + // protocol parsing error means that we receive a message that is not + // a protobuf message that we know, + // in this situation we should close this connection. + // why we should close connection? + // beacause if we parse protocol error, it means that the content in this + // connection can't not be parse, we can't recognize the next message. + // The only thing we can do is close this connection. + // in this condition the DealMessage should return -1; + // + // + // the logic error means that we have receive the message, and the + // message is protobuf message that we define in proto file. + // After receiving this message, we start execute our service logic. + // the service logic error we should put it in res_, and return 0 + // since this is the service logic error, not the network error. + // this connection we can use again. + + // If you want to send response back, build your pb version response yourself, + // serializeToString and invoke WriteResp and NotifyWrite if necessary. + virtual int DealMessage() = 0; + + private: + pstd::Mutex resp_mu_; + WriteBuf write_buf_; + pstd::Mutex is_reply_mu_; + int64_t is_reply_{0}; + virtual void BuildInternalTag(const std::string& resp, std::string* tag); +}; + +} // namespace net +#endif // NET_INCLUDE_PB_CONN_H_ diff --git a/tools/pika_migrate/src/net/include/period_thread.h b/tools/pika_migrate/src/net/include/period_thread.h new file mode 100644 index 0000000000..051140b2bf --- /dev/null +++ b/tools/pika_migrate/src/net/include/period_thread.h @@ -0,0 +1,26 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_PERIOD_THREAD_H_ +#define NET_INCLUDE_PERIOD_THREAD_H_ + +#include + +#include "net/include/net_thread.h" + +namespace net { + +class PeriodThread : public Thread { + public: + explicit PeriodThread(struct timeval period = (struct timeval){1, 0}); + void* ThreadMain() override; + virtual void PeriodMain() = 0; + + private: + struct timeval period_; +}; + +} // namespace net +#endif // NET_INCLUDE_PERIOD_THREAD_H_ diff --git a/tools/pika_migrate/src/net/include/redis_cli.h b/tools/pika_migrate/src/net/include/redis_cli.h new file mode 100644 index 0000000000..d54d9ef523 --- /dev/null +++ b/tools/pika_migrate/src/net/include/redis_cli.h @@ -0,0 +1,27 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#ifndef NET_INCLUDE_REDIS_CLI_H_ +#define NET_INCLUDE_REDIS_CLI_H_ + +#include +#include + +namespace net { + +using RedisCmdArgsType = std::vector; +// We can serialize redis command by 2 ways: +// 1. by variable argmuments; +// eg. RedisCli::Serialize(cmd, "set %s %d", "key", 5); +// cmd will be set as the result string; +// 2. by a string vector; +// eg. RedisCli::Serialize(argv, cmd); +// also cmd will be set as the result string. +extern int SerializeRedisCommand(std::string* cmd, const char* format, ...); +extern int SerializeRedisCommand(RedisCmdArgsType argv, std::string* cmd); + +} // namespace net + +#endif // NET_INCLUDE_REDIS_CLI_H_ diff --git a/tools/pika_migrate/src/net/include/redis_conn.h b/tools/pika_migrate/src/net/include/redis_conn.h new file mode 100644 index 0000000000..30e3a8a7b9 --- /dev/null +++ b/tools/pika_migrate/src/net/include/redis_conn.h @@ -0,0 +1,67 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_REDIS_CONN_H_ +#define NET_INCLUDE_REDIS_CONN_H_ + +#include +#include +#include + +#include "net/include/net_conn.h" +#include "net/include/net_define.h" +#include "net/include/redis_parser.h" +#include "pstd/include/pstd_status.h" + +namespace net { + +using RedisCmdArgsType = std::vector; + +enum HandleType { kSynchronous, kAsynchronous }; + +class RedisConn : public NetConn { + public: + RedisConn(int fd, const std::string& ip_port, Thread* thread, NetMultiplexer* net_mpx = nullptr, + const HandleType& handle_type = kSynchronous, int rbuf_max_len = REDIS_MAX_MESSAGE); + ~RedisConn() override; + + ReadStatus GetRequest() override; + WriteStatus SendReply() override; + int WriteResp(const std::string& resp) override; + + void TryResizeBuffer() override; + void SetHandleType(const HandleType& handle_type); + HandleType GetHandleType(); + + virtual void ProcessRedisCmds(const std::vector& argvs, bool async, std::string* response); + void NotifyEpoll(bool success); + + virtual int DealMessage(const RedisCmdArgsType& argv, std::string* response) = 0; + virtual const std::string& GetCurrentTable() = 0; + + private: + static int ParserDealMessageCb(RedisParser* parser, const RedisCmdArgsType& argv); + static int ParserCompleteCb(RedisParser* parser, const std::vector& argvs); + ReadStatus ParseRedisParserStatus(RedisParserStatus status); + + HandleType handle_type_ = kSynchronous; + + char* rbuf_ = nullptr; + int rbuf_len_ = 0; + int rbuf_max_len_ = 0; + int msg_peak_ = 0; + int command_len_ = 0; + + uint32_t wbuf_pos_ = 0; + std::string response_; + + // For Redis Protocol parser + int last_read_pos_ = -1; + RedisParser redis_parser_; + long bulk_len_ = -1; +}; + +} // namespace net +#endif // NET_INCLUDE_REDIS_CONN_H_ diff --git a/tools/pika_migrate/src/net/include/redis_parser.h b/tools/pika_migrate/src/net/include/redis_parser.h new file mode 100644 index 0000000000..de5cd77dd2 --- /dev/null +++ b/tools/pika_migrate/src/net/include/redis_parser.h @@ -0,0 +1,97 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_REDIS_PARSER_H_ +#define NET_INCLUDE_REDIS_PARSER_H_ + +#include "net/include/net_define.h" + +#include + +#define REDIS_PARSER_REQUEST 1 +#define REDIS_PARSER_RESPONSE 2 + +namespace net { + +class RedisParser; + +using RedisCmdArgsType = std::vector; +using RedisParserDataCb = int (*)(RedisParser *, const RedisCmdArgsType &); +using RedisParserMultiDataCb = int (*)(RedisParser *, const std::vector &); +using RedisParserCb = int (*)(RedisParser *); +using RedisParserType = int; + +enum RedisParserStatus { + kRedisParserNone = 0, + kRedisParserInitDone = 1, + kRedisParserHalf = 2, + kRedisParserDone = 3, + kRedisParserError = 4, +}; + +enum RedisParserError { + kRedisParserOk = 0, + kRedisParserInitError = 1, + kRedisParserFullError = 2, // input overwhelm internal buffer + kRedisParserProtoError = 3, + kRedisParserDealError = 4, + kRedisParserCompleteError = 5, +}; + +struct RedisParserSettings { + RedisParserDataCb DealMessage; + RedisParserMultiDataCb Complete; + RedisParserSettings() { + DealMessage = nullptr; + Complete = nullptr; + } +}; + +class RedisParser { + public: + RedisParser(); + RedisParserStatus RedisParserInit(RedisParserType type, const RedisParserSettings& settings); + RedisParserStatus ProcessInputBuffer(const char* input_buf, int length, int* parsed_len); + long get_bulk_len() { return bulk_len_; } + RedisParserError get_error_code() { return error_code_; } + void* data = nullptr; /* A pointer to get hook to the "connection" or "socket" object */ + private: + // for DEBUG + void PrintCurrentStatus(); + + void CacheHalfArgv(); + int FindNextSeparators(); + int GetNextNum(int pos, long* value); + RedisParserStatus ProcessInlineBuffer(); + RedisParserStatus ProcessMultibulkBuffer(); + RedisParserStatus ProcessRequestBuffer(); + RedisParserStatus ProcessResponseBuffer(); + void SetParserStatus(RedisParserStatus status, RedisParserError error = kRedisParserOk); + void ResetRedisParser(); + void ResetCommandStatus(); + + RedisParserSettings parser_settings_; + RedisParserStatus status_code_{kRedisParserNone}; + RedisParserError error_code_{kRedisParserOk}; + + int redis_type_ = -1; // REDIS_REQ_INLINE or REDIS_REQ_MULTIBULK + + long multibulk_len_ = 0; + long bulk_len_ = 0; + std::string half_argv_; + + int redis_parser_type_ = -1; // REDIS_PARSER_REQUEST or REDIS_PARSER_RESPONSE + + RedisCmdArgsType argv_; + std::vector argvs_; + + int cur_pos_ = 0; + const char* input_buf_{nullptr}; + std::string input_str_; + int length_ = 0; +}; + +} // namespace net +#endif // NET_INCLUDE_REDIS_PARSER_H_ diff --git a/tools/pika_migrate/src/net/include/server_thread.h b/tools/pika_migrate/src/net/include/server_thread.h new file mode 100644 index 0000000000..b8defbf2a6 --- /dev/null +++ b/tools/pika_migrate/src/net/include/server_thread.h @@ -0,0 +1,242 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_SERVER_THREAD_H_ +#define NET_INCLUDE_SERVER_THREAD_H_ + +#include +#include +#include +#include + +#ifdef __ENABLE_SSL +# include +# include +# include +#endif + +#include "net/include/net_define.h" +#include "net/include/net_thread.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/env.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/pstd_status.h" + +// remove 'unused parameter' warning +#define UNUSED(expr) \ + do { \ + (void)(expr); \ + } while (0) + +namespace net { + +class ServerSocket; + +class NetConn; +struct NetFiredEvent; +class ConnFactory; +class WorkerThread; + +/* + * ServerHandle will be invoked at appropriate occasion + * in server thread's main loop. + */ +class ServerHandle { + public: + ServerHandle() = default; + virtual ~ServerHandle() = default; + + /* + * CronHandle() will be invoked on every cron_interval elapsed. + */ + virtual void CronHandle() const {} + + /* + * FdTimeoutHandle(...) will be invoked after connection timeout. + */ + virtual void FdTimeoutHandle(int fd, const std::string& ip_port) const { + UNUSED(fd); + UNUSED(ip_port); + } + + /* + * FdClosedHandle(...) will be invoked before connection closed. + */ + virtual void FdClosedHandle(int fd, const std::string& ip_port) const { + UNUSED(fd); + UNUSED(ip_port); + } + + /* + * AccessHandle(...) will be invoked after client fd accept() + * but before handled. + */ + virtual bool AccessHandle(std::string& ip) const { + UNUSED(ip); + return true; + } + + virtual bool AccessHandle(int fd, std::string& ip) const { + UNUSED(fd); + UNUSED(ip); + return true; + } + + /* + * CreateWorkerSpecificData(...) will be invoked in StartThread() routine. + * 'data' pointer should be assigned, we will pass the pointer as parameter + * in every connection's factory create function. + */ + virtual int CreateWorkerSpecificData(void** data) const { + UNUSED(data); + return 0; + } + + /* + * DeleteWorkerSpecificData(...) is related to CreateWorkerSpecificData(...), + * it will be invoked in StopThread(...) routine, + * resources assigned in CreateWorkerSpecificData(...) should be deleted in + * this handle + */ + virtual int DeleteWorkerSpecificData(void* data) const { + UNUSED(data); + return 0; + } +}; + +const char kKillAllConnsTask[] = "kill_all_conns"; + +const int kDefaultKeepAliveTime = 60; // (s) + +class ServerThread : public Thread { + public: + ServerThread(int port, int cron_interval, const ServerHandle* handle); + ServerThread(const std::string& bind_ip, int port, int cron_interval, const ServerHandle* handle); + ServerThread(const std::set& bind_ips, int port, int cron_interval, const ServerHandle* handle); + +#ifdef __ENABLE_SSL + /* + * Enable TLS, set before StartThread, default: false + * Just HTTPConn has supported for now. + */ + int EnableSecurity(const std::string& cert_file, const std::string& key_file); + SSL_CTX* ssl_ctx() { return ssl_ctx_; } + bool security() { return security_; } +#endif + + int SetTcpNoDelay(int connfd); + + /* + * StartThread will return the error code as pthread_create + * Return 0 if success + */ + int StartThread() override; + + virtual void set_keepalive_timeout(int timeout) = 0; + + virtual int conn_num() const = 0; + + struct ConnInfo { + int fd; + std::string ip_port; + struct timeval last_interaction; + }; + virtual std::vector conns_info() const = 0; + + // Move out from server thread + virtual std::shared_ptr MoveConnOut(int fd) = 0; + // Move into server thread + virtual void MoveConnIn(std::shared_ptr conn, const NotifyType& type) = 0; + + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + + virtual void KillAllConns() = 0; + virtual bool KillConn(const std::string& ip_port) = 0; + + virtual void HandleNewConn(int connfd, const std::string& ip_port) = 0; + + virtual void SetQueueLimit(int queue_limit) {} + + ~ServerThread() override; + + protected: + /* + * The event handler + */ + std::unique_ptr net_multiplexer_; + + private: + friend class HolyThread; + friend class DispatchThread; + friend class WorkerThread; + + int cron_interval_ = 0; + virtual void DoCronTask(); + + // process events in notify_queue + virtual void ProcessNotifyEvents(const NetFiredEvent* pfe); + + + const ServerHandle* handle_; + bool own_handle_ = false; + +#ifdef __ENABLE_SSL + bool security_; + SSL_CTX* ssl_ctx_; +#endif + + /* + * The tcp server port and address + */ + int port_ = -1; + std::set ips_; + std::vector> server_sockets_; + std::set server_fds_; + + virtual int InitHandle(); + void* ThreadMain() override; + /* + * The server event handle + */ + virtual void HandleConnEvent(NetFiredEvent* pfe) = 0; +}; + +// !!!Attention: If u use this constructor, the keepalive_timeout_ will +// be equal to kDefaultKeepAliveTime(60s). In master-slave mode, the slave +// binlog receiver will close the binlog sync connection in HolyThread::DoCronTask +// if master did not send data in kDefaultKeepAliveTime. +extern ServerThread* NewHolyThread(int port, ConnFactory* conn_factory, int cron_interval = 0, + const ServerHandle* handle = nullptr); +extern ServerThread* NewHolyThread(const std::string& bind_ip, int port, ConnFactory* conn_factory, + int cron_interval = 0, const ServerHandle* handle = nullptr); +extern ServerThread* NewHolyThread(const std::set& bind_ips, int port, ConnFactory* conn_factory, + int cron_interval = 0, const ServerHandle* handle = nullptr); +extern ServerThread* NewHolyThread(const std::set& bind_ips, int port, ConnFactory* conn_factory, + bool async, int cron_interval = 0, const ServerHandle* handle = nullptr); + +/** + * This type Dispatch thread just get Connection and then Dispatch the fd to + * worker thread + * + * @brief + * + * @param port the port number + * @param conn_factory connection factory object + * @param cron_interval the cron job interval + * @param queue_limit the size limit of workers' connection queue + * @param handle the server's handle (e.g. CronHandle, AccessHandle...) + * @param ehandle the worker's enviroment setting handle + */ +extern ServerThread* NewDispatchThread(int port, int work_num, ConnFactory* conn_factory, int cron_interval = 0, + int queue_limit = 1000, const ServerHandle* handle = nullptr); +extern ServerThread* NewDispatchThread(const std::string& ip, int port, int work_num, ConnFactory* conn_factory, + int cron_interval = 0, int queue_limit = 1000, + const ServerHandle* handle = nullptr); +extern ServerThread* NewDispatchThread(const std::set& ips, int port, int work_num, + ConnFactory* conn_factory, int cron_interval = 0, int queue_limit = 1000, + const ServerHandle* handle = nullptr); + +} // namespace net +#endif // NET_INCLUDE_SERVER_THREAD_H_ diff --git a/tools/pika_migrate/src/net/include/simple_http_conn.h b/tools/pika_migrate/src/net/include/simple_http_conn.h new file mode 100644 index 0000000000..415d509377 --- /dev/null +++ b/tools/pika_migrate/src/net/include/simple_http_conn.h @@ -0,0 +1,106 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_SIMPLE_HTTP_CONN_H_ +#define NET_INCLUDE_SIMPLE_HTTP_CONN_H_ + +#include +#include +#include + +#include "pstd/include/pstd_status.h" +#include "pstd/include/xdebug.h" + +#include "net/include/net_conn.h" +#include "net/include/net_define.h" +#include "net/src/net_util.h" + +namespace net { + +class Request { + public: + // attach in header + std::string method; + std::string path; + std::string version; + std::map headers; + + // in header for Get, in content for Post Put Delete + std::map query_params; + + // POST: content-type: application/x-www-form-urlencoded + std::map post_params; + + // attach in content + std::string content; + + Request(); + void Clear(); + bool ParseHeadFromArray(const char* data, int size); + bool ParseBodyFromArray(const char* data, int size); + + private: + enum ParseStatus { kHeaderMethod, kHeaderPath, kHeaderVersion, kHeaderParamKey, kHeaderParamValue, kBody }; + + bool ParseGetUrl(); + bool ParseHeadLine(const char* data, int line_start, int line_end, ParseStatus* parseStatus); + bool ParseParameters(const std::string& data, size_t line_start = 0, bool from_url = true); +}; + +class Response { + public: + Response() = default; + void Clear(); + int SerializeHeaderToArray(char* data, size_t size); + int SerializeBodyToArray(char* data, size_t size, int* pos); + bool HasMoreBody(size_t pos) { return pos < body_.size(); } + + void SetStatusCode(int code); + + void SetHeaders(const std::string& key, const std::string& value) { headers_[key] = value; } + + void SetHeaders(const std::string& key, const int value) { headers_[key] = std::to_string(value); } + + void SetBody(const std::string& body) { body_.assign(body); } + + private: + int status_code_{0}; + std::string reason_phrase_; + std::map headers_; + std::string body_; +}; + +class SimpleHTTPConn : public NetConn { + public: + SimpleHTTPConn(int fd, const std::string& ip_port, Thread* thread); + ~SimpleHTTPConn() override; + + ReadStatus GetRequest() override; + WriteStatus SendReply() override; + + private: + virtual void DealMessage(const Request* req, Response* res) = 0; + + bool BuildRequestHeader(); + bool AppendRequestBody(); + bool FillResponseBuf(); + void HandleMessage(); + + ConnStatus conn_status_{kHeader}; + char* rbuf_; + uint32_t rbuf_pos_{0}; + char* wbuf_; + uint32_t wbuf_len_{0}; // length we wanna write out + uint32_t wbuf_pos_{0}; + uint32_t header_len_{0}; + uint64_t remain_packet_len_{0}; + + Request* request_; + int response_pos_{-1}; + Response* response_; +}; + +} // namespace net +#endif // NET_INCLUDE_SIMPLE_HTTP_CONN_H_ diff --git a/tools/pika_migrate/src/net/include/thread_pool.h b/tools/pika_migrate/src/net/include/thread_pool.h new file mode 100644 index 0000000000..0ec3d1bcb1 --- /dev/null +++ b/tools/pika_migrate/src/net/include/thread_pool.h @@ -0,0 +1,89 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_INCLUDE_THREAD_POOL_H_ +#define NET_INCLUDE_THREAD_POOL_H_ + +#include +#include +#include +#include + +#include "net/include/net_define.h" +#include "pstd/include/pstd_mutex.h" + +namespace net { + +using TaskFunc = void (*)(void *); + +struct Task { + Task() = default; + TaskFunc func = nullptr; + void* arg = nullptr; + Task(TaskFunc _func, void* _arg) : func(_func), arg(_arg) {} +}; + +struct TimeTask { + uint64_t exec_time; + TaskFunc func; + void* arg; + TimeTask(uint64_t _exec_time, TaskFunc _func, void* _arg) : exec_time(_exec_time), func(_func), arg(_arg) {} + bool operator<(const TimeTask& task) const { return exec_time > task.exec_time; } +}; + +class ThreadPool : public pstd::noncopyable { + public: + class Worker { + public: + explicit Worker(ThreadPool* tp) : start_(false), thread_pool_(tp){}; + static void* WorkerMain(void* arg); + + int start(); + int stop(); + + private: + pthread_t thread_id_; + std::atomic start_; + ThreadPool* const thread_pool_; + std::string worker_name_; + }; + + explicit ThreadPool(size_t worker_num, size_t max_queue_size, std::string thread_pool_name = "ThreadPool"); + virtual ~ThreadPool(); + + int start_thread_pool(); + int stop_thread_pool(); + bool should_stop(); + void set_should_stop(); + + void Schedule(TaskFunc func, void* arg); + void DelaySchedule(uint64_t timeout, TaskFunc func, void* arg); + size_t max_queue_size(); + size_t worker_size(); + void cur_queue_size(size_t* qsize); + void cur_time_queue_size(size_t* qsize); + std::string thread_pool_name(); + + private: + void runInThread(); + + size_t worker_num_; + size_t max_queue_size_; + std::string thread_pool_name_; + std::queue queue_; + std::priority_queue time_queue_; + std::vector workers_; + std::atomic running_; + std::atomic should_stop_; + + pstd::Mutex mu_; + pstd::CondVar rsignal_; + pstd::CondVar wsignal_; + +}; + +} // namespace net + +#endif // NET_INCLUDE_THREAD_POOL_H_ diff --git a/tools/pika_migrate/src/net/src/backend_thread.cc b/tools/pika_migrate/src/net/src/backend_thread.cc new file mode 100644 index 0000000000..27389293d7 --- /dev/null +++ b/tools/pika_migrate/src/net/src/backend_thread.cc @@ -0,0 +1,470 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/backend_thread.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "net/include/net_conn.h" +#include "net/src/server_socket.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +namespace net { + +using pstd::Status; + +BackendThread::BackendThread(ConnFactory* conn_factory, int cron_interval, int keepalive_timeout, BackendHandle* handle, + void* private_data) + : keepalive_timeout_(keepalive_timeout), + cron_interval_(cron_interval), + handle_(handle), + + private_data_(private_data), + conn_factory_(conn_factory) { + net_multiplexer_.reset(CreateNetMultiplexer()); + net_multiplexer_->Initialize(); +} + +BackendThread::~BackendThread() = default; + +int BackendThread::StartThread() { + if (!handle_) { + handle_ = new BackendHandle(); + own_handle_ = true; + } + own_handle_ = false; + int res = handle_->CreateWorkerSpecificData(&private_data_); + if (res) { + return res; + } + set_thread_name("BackendThread"); + + return Thread::StartThread(); +} + +int BackendThread::StopThread() { + if (private_data_) { + int res = handle_->DeleteWorkerSpecificData(private_data_); + if (res) { + return res; + } + private_data_ = nullptr; + } + if (own_handle_) { + delete handle_; + } + return Thread::StopThread(); +} + +Status BackendThread::Write(const int fd, const std::string& msg) { + { + std::lock_guard l(mu_); + if (conns_.find(fd) == conns_.end()) { + return Status::Corruption(std::to_string(fd) + " cannot find !"); + } + auto addr = conns_.find(fd)->second->ip_port(); + if (!handle_->AccessHandle(addr)) { + return Status::Corruption(addr + " is baned by user!"); + } + size_t size = 0; + for (auto& str : to_send_[fd]) { + size += str.size(); + } + if (size > kConnWriteBuf) { + return Status::Corruption("Connection buffer over maximum size"); + } + to_send_[fd].push_back(msg); + } + NotifyWrite(fd); + return Status::OK(); +} + +Status BackendThread::Close(const int fd) { + { + std::lock_guard l(mu_); + if (conns_.find(fd) == conns_.end()) { + return Status::OK(); + } + } + NotifyClose(fd); + return Status::OK(); +} + +Status BackendThread::ProcessConnectStatus(NetFiredEvent* pfe, int* should_close) { + if (pfe->mask & kErrorEvent) { + *should_close = 1; + return Status::Corruption("POLLERR or POLLHUP"); + } + int val = 0; + socklen_t lon = sizeof(int); + + if (getsockopt(pfe->fd, SOL_SOCKET, SO_ERROR, &val, &lon) == -1) { + *should_close = 1; + return Status::Corruption("Get Socket opt failed"); + } + if (val) { + *should_close = 1; + return Status::Corruption("Get socket error " + std::to_string(val)); + } + return Status::OK(); +} + +void BackendThread::SetWaitConnectOnEpoll(int sockfd) { + net_multiplexer_->NetAddEvent(sockfd, kReadable | kWritable); + connecting_fds_.insert(sockfd); +} + +void BackendThread::AddConnection(const std::string& peer_ip, int peer_port, int sockfd) { + std::string ip_port = peer_ip + ":" + std::to_string(peer_port); + std::shared_ptr tc = conn_factory_->NewNetConn(sockfd, ip_port, this, nullptr, net_multiplexer_.get()); + tc->SetNonblock(); + // This flag specifies that the file descriptor should be closed when an exec function is invoked. + fcntl(sockfd, F_SETFD, fcntl(sockfd, F_GETFD) | FD_CLOEXEC); + + { + std::lock_guard l(mu_); + conns_.insert(std::make_pair(sockfd, tc)); + } +} + +Status BackendThread::Connect(const std::string& dst_ip, const int dst_port, int* fd) { + Status s; + int sockfd = -1; + int rv; + char cport[6]; + struct addrinfo hints; + struct addrinfo *servinfo; + struct addrinfo *p; + snprintf(cport, sizeof(cport), "%d", dst_port); + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + + if (!fd) { + return Status::InvalidArgument("fd argument is nullptr"); + } + // We do not handle IPv6 + if (rv = getaddrinfo(dst_ip.c_str(), cport, &hints, &servinfo); rv) { + return Status::IOError("connect getaddrinfo error for ", dst_ip); + } + for (p = servinfo; p != nullptr; p = p->ai_next) { + if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol)) == -1) { + continue; + } + int flags = fcntl(sockfd, F_GETFL, 0); + fcntl(sockfd, F_SETFL, flags | O_NONBLOCK); + + if (connect(sockfd, p->ai_addr, p->ai_addrlen) == -1) { + if (errno == EHOSTUNREACH) { + CloseFd(sockfd); + continue; + } else if (errno == EINPROGRESS || errno == EAGAIN || errno == EWOULDBLOCK) { + AddConnection(dst_ip, dst_port, sockfd); + SetWaitConnectOnEpoll(sockfd); + freeaddrinfo(servinfo); + *fd = sockfd; + return Status::OK(); + } else { + CloseFd(sockfd); + freeaddrinfo(servinfo); + return Status::IOError("EHOSTUNREACH", "The target host cannot be reached"); + } + } + + AddConnection(dst_ip, dst_port, sockfd); + net_multiplexer_->NetAddEvent(sockfd, kReadable | kWritable); + struct sockaddr_in laddr; + socklen_t llen = sizeof(laddr); + getsockname(sockfd, reinterpret_cast(&laddr), &llen); + std::string lip(inet_ntoa(laddr.sin_addr)); + int lport = ntohs(laddr.sin_port); + if (dst_ip == lip && dst_port == lport) { + return Status::IOError("EHOSTUNREACH", "same ip port"); + } + + freeaddrinfo(servinfo); + return s; + } + if (!p) { + s = Status::IOError(strerror(errno), "Can't create socket "); + return s; + } + freeaddrinfo(servinfo); + freeaddrinfo(p); + int val = 1; + setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)); + *fd = sockfd; + return s; +} + +std::shared_ptr BackendThread::GetConn(int fd) { + std::lock_guard l(mu_); + if (auto it = conns_.find(fd); it != conns_.end()) { + return it->second; + } + return nullptr; +} + +void BackendThread::CloseFd(const std::shared_ptr& conn) { + close(conn->fd()); + CleanUpConnRemaining(conn->fd()); + handle_->FdClosedHandle(conn->fd(), conn->ip_port()); +} + +void BackendThread::CloseFd(const int fd) { + close(fd); + CleanUpConnRemaining(fd); + // user don't use ip_port + handle_->FdClosedHandle(fd, ""); +} + +void BackendThread::CleanUpConnRemaining(const int fd) { + std::lock_guard l(mu_); + to_send_.erase(fd); +} + +void BackendThread::DoCronTask() { + struct timeval now; + gettimeofday(&now, nullptr); + std::lock_guard l(mu_); + auto iter = conns_.begin(); + while (iter != conns_.end()) { + std::shared_ptr conn = iter->second; + + // Check keepalive timeout connection + if (keepalive_timeout_ > 0 && (now.tv_sec - conn->last_interaction().tv_sec > keepalive_timeout_)) { + LOG(INFO) << "Do cron task del fd " << conn->fd(); + net_multiplexer_->NetDelEvent(conn->fd(), 0); + close(conn->fd()); + handle_->FdTimeoutHandle(conn->fd(), conn->ip_port()); + if (conns_.count(conn->fd())) { + conns_.erase(conn->fd()); + } + if (connecting_fds_.count(conn->fd())) { + connecting_fds_.erase(conn->fd()); + } + iter = conns_.erase(iter); + continue; + } + + // Maybe resize connection buffer + conn->TryResizeBuffer(); + + ++iter; + } +} + +void BackendThread::InternalDebugPrint() { + LOG(INFO) << "___________________________________"; + { + std::lock_guard l(mu_); + LOG(INFO) << "To send map: "; + for (const auto& to_send : to_send_) { + UNUSED(to_send); + const std::vector& tmp = to_send.second; + for (const auto& tmp_to_send : tmp) { + UNUSED(tmp_to_send); + LOG(INFO) << to_send.first << " " << tmp_to_send; + } + } + } + LOG(INFO) << "Connected fd map: "; + std::lock_guard l(mu_); + for (const auto& fd_conn : conns_) { + UNUSED(fd_conn); + LOG(INFO) << "fd " << fd_conn.first; + } + LOG(INFO) << "Connecting fd map: "; + for (const auto& connecting_fd : connecting_fds_) { + UNUSED(connecting_fd); + LOG(INFO) << "fd: " << connecting_fd; + } + LOG(INFO) << "___________________________________"; +} + +void BackendThread::NotifyWrite(std::string& ip_port) { + // put fd = 0, cause this lib user doesnt need to know which fd to write to + // we will check fd by checking ipport_conns_ + NetItem ti(0, ip_port, kNotiWrite); + net_multiplexer_->Register(ti, true); +} + +void BackendThread::NotifyWrite(const int fd) { + NetItem ti(fd, "", kNotiWrite); + net_multiplexer_->Register(ti, true); +} + +void BackendThread::NotifyClose(const int fd) { + NetItem ti(fd, "", kNotiClose); + net_multiplexer_->Register(ti, true); +} + +void BackendThread::ProcessNotifyEvents(const NetFiredEvent* pfe) { + if (pfe->mask & kReadable) { + char bb[2048]; + int64_t nread = read(net_multiplexer_->NotifyReceiveFd(), bb, 2048); + if (nread == 0) { + return; + } else { + for (int32_t idx = 0; idx < nread; ++idx) { + NetItem ti = net_multiplexer_->NotifyQueuePop(); + int fd = ti.fd(); + std::string ip_port = ti.ip_port(); + std::lock_guard l(mu_); + if (ti.notify_type() == kNotiWrite) { + if (conns_.find(fd) == conns_.end()) { + // TODO(): need clean and notify? + continue; + } else { + // connection exist + net_multiplexer_->NetModEvent(fd, 0, kReadable | kWritable); + } + { + auto iter = to_send_.find(fd); + if (iter == to_send_.end()) { + continue; + } + // get msg from to_send_ + std::vector& msgs = iter->second; + for (auto& msg : msgs) { + conns_[fd]->WriteResp(msg); + } + to_send_.erase(iter); + } + } else if (ti.notify_type() == kNotiClose) { + LOG(INFO) << "received kNotiClose"; + net_multiplexer_->NetDelEvent(fd, 0); + CloseFd(fd); + conns_.erase(fd); + connecting_fds_.erase(fd); + } + } + } + } +} + +void* BackendThread::ThreadMain() { + int nfds = 0; + NetFiredEvent* pfe = nullptr; + + struct timeval when; + gettimeofday(&when, nullptr); + struct timeval now = when; + + when.tv_sec += (cron_interval_ / 1000); + when.tv_usec += ((cron_interval_ % 1000) * 1000); + int timeout = cron_interval_; + if (timeout <= 0) { + timeout = NET_CRON_INTERVAL; + } + + std::string ip_port; + + while (!should_stop()) { + if (cron_interval_ > 0) { + gettimeofday(&now, nullptr); + if (when.tv_sec > now.tv_sec || (when.tv_sec == now.tv_sec && when.tv_usec > now.tv_usec)) { + timeout = static_cast((when.tv_sec - now.tv_sec) * 1000 + (when.tv_usec - now.tv_usec) / 1000); + } else { + // do user defined cron + handle_->CronHandle(); + + DoCronTask(); + when.tv_sec = now.tv_sec + (cron_interval_ / 1000); + when.tv_usec = now.tv_usec + ((cron_interval_ % 1000) * 1000); + timeout = cron_interval_; + } + } + //{ + // InternalDebugPrint(); + //} + nfds = net_multiplexer_->NetPoll(timeout); + for (int i = 0; i < nfds; i++) { + pfe = (net_multiplexer_->FiredEvents()) + i; + if (!pfe) { + continue; + } + + if (pfe->fd == net_multiplexer_->NotifyReceiveFd()) { + ProcessNotifyEvents(pfe); + continue; + } + + int should_close = 0; + std::shared_ptr conn; + { + std::unique_lock lock(mu_); + if (auto it = conns_.find(pfe->fd); it == conns_.end()) { + lock.unlock(); + LOG(INFO) << "fd " << pfe->fd << " not found in fd_conns"; + net_multiplexer_->NetDelEvent(pfe->fd, 0); + continue; + } else { + conn = it->second; + } + } + + if (connecting_fds_.count(pfe->fd)) { + Status s = ProcessConnectStatus(pfe, &should_close); + if (!s.ok()) { + handle_->DestConnectFailedHandle(conn->ip_port(), s.ToString()); + } + connecting_fds_.erase(pfe->fd); + } + + if ((should_close == 0) && (pfe->mask & kWritable) && conn->is_reply()) { + WriteStatus write_status = conn->SendReply(); + conn->set_last_interaction(now); + if (write_status == kWriteAll) { + net_multiplexer_->NetModEvent(pfe->fd, 0, kReadable); + conn->set_is_reply(false); + } else if (write_status == kWriteHalf) { + continue; + } else { + LOG(INFO) << "send reply error " << write_status; + should_close = 1; + } + } + + if (!should_close && (pfe->mask & kReadable)) { + ReadStatus read_status = conn->GetRequest(); + conn->set_last_interaction(now); + if (read_status == kReadAll) { + } else if (read_status == kReadHalf) { + continue; + } else { + LOG(INFO) << "Get request error " << read_status; + should_close = 1; + } + } + + if ((pfe->mask & kErrorEvent) || should_close) { + { + LOG(INFO) << "close connection " << pfe->fd << " reason " << pfe->mask << " " << should_close; + net_multiplexer_->NetDelEvent(pfe->fd, 0); + CloseFd(conn); + mu_.lock(); + conns_.erase(pfe->fd); + mu_.unlock(); + if (connecting_fds_.count(conn->fd())) { + connecting_fds_.erase(conn->fd()); + } + } + } + } + } + return nullptr; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/bg_thread.cc b/tools/pika_migrate/src/net/src/bg_thread.cc new file mode 100644 index 0000000000..b0835330f9 --- /dev/null +++ b/tools/pika_migrate/src/net/src/bg_thread.cc @@ -0,0 +1,133 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/bg_thread.h" +#include +#include + +namespace net { + +void BGThread::Schedule(void (*function)(void*), void* arg) { + std::unique_lock lock(mu_); + + wsignal_.wait(lock, [this]() { return queue_.size() < full_ || should_stop(); }); + + if (!should_stop()) { + queue_.emplace(std::make_unique(function, arg)); + rsignal_.notify_one(); + } +} + +void BGThread::Schedule(void (*function)(void*), void* arg, std::function& call_back) { + std::unique_lock lock(mu_); + + wsignal_.wait(lock, [this]() { return queue_.size() < full_ || should_stop(); }); + + if (!should_stop()) { + queue_.emplace(std::make_unique(function, arg, call_back)); + rsignal_.notify_one(); + } +}; + +void BGThread::QueueSize(int* pri_size, int* qu_size) { + std::lock_guard lock(mu_); + *pri_size = static_cast(timer_queue_.size()); + *qu_size = static_cast(queue_.size()); +} + +void BGThread::QueueClear() { + std::lock_guard lock(mu_); + std::queue>().swap(queue_); + std::priority_queue().swap(timer_queue_); + wsignal_.notify_one(); +} + +void BGThread::SwallowReadyTasks() { + // it's safe to swallow all the remain tasks in ready and timer queue, + // while the schedule function would stop to add any tasks. + mu_.lock(); + while (!queue_.empty()) { + std::unique_ptr task_item = std::move(queue_.front()); + queue_.pop(); + mu_.unlock(); + task_item->function(task_item->arg); + mu_.lock(); + } + mu_.unlock(); + + auto now = std::chrono::system_clock::now(); + uint64_t unow = std::chrono::duration_cast(now.time_since_epoch()).count(); + mu_.lock(); + + while (!timer_queue_.empty()) { + auto [exec_time, function, arg] = timer_queue_.top(); + if (unow < exec_time) { + break; + } + timer_queue_.pop(); + // Don't lock while doing task + mu_.unlock(); + (*function)(arg); + mu_.lock(); + } + mu_.unlock(); +} + +void* BGThread::ThreadMain() { + while (!should_stop()) { + std::unique_lock lock(mu_); + + rsignal_.wait(lock, [this]() { return !queue_.empty() || !timer_queue_.empty() || should_stop(); }); + + if (should_stop()) { + break; + } + + if (!timer_queue_.empty()) { + auto now = std::chrono::system_clock::now(); + uint64_t unow = std::chrono::duration_cast(now.time_since_epoch()).count(); + auto [exec_time, function, arg] = timer_queue_.top(); + if (unow >= exec_time) { + timer_queue_.pop(); + lock.unlock(); + (*function)(arg); + continue; + } else if (queue_.empty() && !should_stop()) { + rsignal_.wait_for(lock, std::chrono::microseconds(exec_time - unow)); + + lock.unlock(); + continue; + } + } + + if (!queue_.empty()) { + std::unique_ptr task_item = std::move(queue_.front()); + queue_.pop(); + wsignal_.notify_one(); + lock.unlock(); + task_item->function(task_item->arg); + } + } + // swalloc all the remain tasks in ready and timer queue + SwallowReadyTasks(); + return nullptr; +} + +/* + * timeout is in millisecond + */ +void BGThread::DelaySchedule(uint64_t timeout, void (*function)(void*), void* arg) { + auto now = std::chrono::system_clock::now(); + uint64_t unow = std::chrono::duration_cast(now.time_since_epoch()).count(); + uint64_t exec_time = unow + timeout * 1000; + + std::lock_guard lock(mu_); + if (!should_stop()) { + timer_queue_.emplace(exec_time, function, arg); + rsignal_.notify_one(); + } +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/build_version.cc.in b/tools/pika_migrate/src/net/src/build_version.cc.in new file mode 100644 index 0000000000..5087b21249 --- /dev/null +++ b/tools/pika_migrate/src/net/src/build_version.cc.in @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/build_version.h" +const char* net_build_git_sha = "net_build_git_sha:@@GIT_SHA@@"; +const char* net_build_git_date = "net_build_git_date:@@GIT_DATE_TIME@@"; +const char* net_build_compile_date = __DATE__; diff --git a/tools/pika_migrate/src/net/src/client_thread.cc b/tools/pika_migrate/src/net/src/client_thread.cc new file mode 100644 index 0000000000..5561d6d3c0 --- /dev/null +++ b/tools/pika_migrate/src/net/src/client_thread.cc @@ -0,0 +1,482 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/client_thread.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "net/include/net_conn.h" +#include "net/src/server_socket.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +namespace net { + +using pstd::Status; + +ClientThread::ClientThread(ConnFactory* conn_factory, int cron_interval, int keepalive_timeout, ClientHandle* handle, + void* private_data) + : keepalive_timeout_(keepalive_timeout), + cron_interval_(cron_interval), + handle_(handle), + private_data_(private_data), + conn_factory_(conn_factory) { + net_multiplexer_.reset(CreateNetMultiplexer()); + net_multiplexer_->Initialize(); +} + +ClientThread::~ClientThread() = default; + +int ClientThread::StartThread() { + if (!handle_) { + handle_ = new ClientHandle(); + own_handle_ = true; + } + own_handle_ = false; + int res = handle_->CreateWorkerSpecificData(&private_data_); + if (res) { + return res; + } + set_thread_name("ClientThread"); + + return Thread::StartThread(); +} + +int ClientThread::StopThread() { + if (private_data_) { + int res = handle_->DeleteWorkerSpecificData(private_data_); + if (res) { + return res; + } + private_data_ = nullptr; + } + if (own_handle_) { + delete handle_; + } + return Thread::StopThread(); +} + +Status ClientThread::Write(const std::string& ip, const int port, const std::string& msg) { + std::string ip_port = ip + ":" + std::to_string(port); + if (!handle_->AccessHandle(ip_port)) { + return Status::Corruption(ip_port + " is baned by user!"); + } + { + std::lock_guard l(mu_); + size_t size = 0; + for (auto& str : to_send_[ip_port]) { + size += str.size(); + } + if (size > kConnWriteBuf) { + return Status::Corruption("Connection buffer over maximum size"); + } + to_send_[ip_port].push_back(msg); + } + NotifyWrite(ip_port); + return Status::OK(); +} + +Status ClientThread::Close(const std::string& ip, const int port) { + { + std::lock_guard l(to_del_mu_); + to_del_.push_back(ip + ":" + std::to_string(port)); + } + return Status::OK(); +} + +Status ClientThread::ProcessConnectStatus(NetFiredEvent* pfe, int* should_close) { + if (pfe->mask & kErrorEvent) { + *should_close = 1; + return Status::Corruption("POLLERR or POLLHUP"); + } + int val = 0; + socklen_t lon = sizeof(int); + + if (getsockopt(pfe->fd, SOL_SOCKET, SO_ERROR, &val, &lon) == -1) { + *should_close = 1; + return Status::Corruption("Get Socket opt failed"); + } + if (val) { + *should_close = 1; + return Status::Corruption("Get socket error " + std::to_string(val)); + } + return Status::OK(); +} + +void ClientThread::SetWaitConnectOnEpoll(int sockfd) { + net_multiplexer_->NetAddEvent(sockfd, kReadable | kWritable); + connecting_fds_.insert(sockfd); +} + +void ClientThread::NewConnection(const std::string& peer_ip, int peer_port, int sockfd) { + std::string ip_port = peer_ip + ":" + std::to_string(peer_port); + std::shared_ptr tc = conn_factory_->NewNetConn(sockfd, ip_port, this, nullptr, net_multiplexer_.get()); + tc->SetNonblock(); + // This flag specifies that the file descriptor should be closed when an exec function is invoked. + fcntl(sockfd, F_SETFD, fcntl(sockfd, F_GETFD) | FD_CLOEXEC); + + fd_conns_.insert(std::make_pair(sockfd, tc)); + ipport_conns_.insert(std::make_pair(ip_port, tc)); +} + +Status ClientThread::ScheduleConnect(const std::string& dst_ip, int dst_port) { + Status s; + int sockfd = -1; + int rv; + char cport[6]; + struct addrinfo hints; + struct addrinfo *servinfo; + struct addrinfo *p; + snprintf(cport, sizeof(cport), "%d", dst_port); + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + + // We do not handle IPv6 + if (rv = getaddrinfo(dst_ip.c_str(), cport, &hints, &servinfo); rv) { + return Status::IOError("connect getaddrinfo error for ", dst_ip); + } + for (p = servinfo; p != nullptr; p = p->ai_next) { + if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol)) == -1) { + continue; + } + int flags = fcntl(sockfd, F_GETFL, 0); + fcntl(sockfd, F_SETFL, flags | O_NONBLOCK); + + if (connect(sockfd, p->ai_addr, p->ai_addrlen) == -1) { + if (errno == EHOSTUNREACH) { + CloseFd(sockfd, dst_ip + ":" + std::to_string(dst_port)); + continue; + } else if (errno == EINPROGRESS || errno == EAGAIN || errno == EWOULDBLOCK) { + NewConnection(dst_ip, dst_port, sockfd); + SetWaitConnectOnEpoll(sockfd); + freeaddrinfo(servinfo); + return Status::OK(); + } else { + CloseFd(sockfd, dst_ip + ":" + std::to_string(dst_port)); + freeaddrinfo(servinfo); + return Status::IOError("EHOSTUNREACH", "The target host cannot be reached"); + } + } + + NewConnection(dst_ip, dst_port, sockfd); + net_multiplexer_->NetAddEvent(sockfd, kReadable | kWritable); + struct sockaddr_in laddr; + socklen_t llen = sizeof(laddr); + getsockname(sockfd, reinterpret_cast(&laddr), &llen); + std::string lip(inet_ntoa(laddr.sin_addr)); + int lport = ntohs(laddr.sin_port); + if (dst_ip == lip && dst_port == lport) { + return Status::IOError("EHOSTUNREACH", "same ip port"); + } + + freeaddrinfo(servinfo); + + return s; + } + if (!p) { + s = Status::IOError(strerror(errno), "Can't create socket "); + return s; + } + freeaddrinfo(servinfo); + freeaddrinfo(p); + int val = 1; + setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)); + return s; +} + +void ClientThread::CloseFd(const std::shared_ptr& conn) { + close(conn->fd()); + CleanUpConnRemaining(conn->ip_port()); + handle_->FdClosedHandle(conn->fd(), conn->ip_port()); +} + +void ClientThread::CloseFd(int fd, const std::string& ip_port) { + close(fd); + CleanUpConnRemaining(ip_port); + handle_->FdClosedHandle(fd, ip_port); +} + +void ClientThread::CleanUpConnRemaining(const std::string& ip_port) { + std::lock_guard l(mu_); + to_send_.erase(ip_port); +} + +void ClientThread::DoCronTask() { + struct timeval now; + gettimeofday(&now, nullptr); + auto iter = fd_conns_.begin(); + while (iter != fd_conns_.end()) { + std::shared_ptr conn = iter->second; + + // Check keepalive timeout connection + if (keepalive_timeout_ > 0 && (now.tv_sec - conn->last_interaction().tv_sec > keepalive_timeout_)) { + LOG(INFO) << "Do cron task del fd " << conn->fd(); + net_multiplexer_->NetDelEvent(conn->fd(), 0); + // did not clean up content in to_send queue + // will try to send remaining by reconnecting + close(conn->fd()); + handle_->FdTimeoutHandle(conn->fd(), conn->ip_port()); + if (ipport_conns_.count(conn->ip_port())) { + ipport_conns_.erase(conn->ip_port()); + } + if (connecting_fds_.count(conn->fd())) { + connecting_fds_.erase(conn->fd()); + } + iter = fd_conns_.erase(iter); + continue; + } + + // Maybe resize connection buffer + conn->TryResizeBuffer(); + + ++iter; + } + + std::vector to_del; + { + std::lock_guard l(to_del_mu_); + to_del = std::move(to_del_); + to_del_.clear(); + } + + for (auto& conn_name : to_del) { + auto iter = ipport_conns_.find(conn_name); + if (iter == ipport_conns_.end()) { + continue; + } + std::shared_ptr conn = iter->second; + net_multiplexer_->NetDelEvent(conn->fd(), 0); + CloseFd(conn); + fd_conns_.erase(conn->fd()); + ipport_conns_.erase(conn->ip_port()); + connecting_fds_.erase(conn->fd()); + } +} + +void ClientThread::InternalDebugPrint() { + LOG(INFO) << "___________________________________"; + { + std::lock_guard l(mu_); + LOG(INFO) << "To send map: "; + for (const auto& to_send : to_send_) { + UNUSED(to_send); + const std::vector& tmp = to_send.second; + for (const auto& tmp_to_send : tmp) { + UNUSED(tmp_to_send); + LOG(INFO) << to_send.first << " " << tmp_to_send; + } + } + } + LOG(INFO) << "Ipport conn map: "; + for (const auto& ipport_conn : ipport_conns_) { + UNUSED(ipport_conn); + LOG(INFO) << "ipport " << ipport_conn.first; + } + LOG(INFO) << "Connected fd map: "; + for (const auto& fd_conn : fd_conns_) { + UNUSED(fd_conn); + LOG(INFO) << "fd " << fd_conn.first; + } + LOG(INFO) << "Connecting fd map: "; + for (const auto& connecting_fd : connecting_fds_) { + UNUSED(connecting_fd); + LOG(INFO) << "fd: " << connecting_fd; + } + LOG(INFO) << "___________________________________"; +} + +void ClientThread::NotifyWrite(const std::string& ip_port) { + // put fd = 0, cause this lib user does not need to know which fd to write to + // we will check fd by checking ipport_conns_ + NetItem ti(0, ip_port, kNotiWrite); + net_multiplexer_->Register(ti, true); +} + +void ClientThread::ProcessNotifyEvents(const NetFiredEvent* pfe) { + if (pfe->mask & kReadable) { + char bb[2048]; + int64_t nread = read(net_multiplexer_->NotifyReceiveFd(), bb, 2048); + if (nread == 0) { + return; + } else { + for (int32_t idx = 0; idx < nread; ++idx) { + NetItem ti = net_multiplexer_->NotifyQueuePop(); + std::string ip_port = ti.ip_port(); + int fd = ti.fd(); + if (ti.notify_type() == kNotiWrite) { + if (ipport_conns_.find(ip_port) == ipport_conns_.end()) { + std::string ip; + int port = 0; + if (!pstd::ParseIpPortString(ip_port, ip, port)) { + continue; + } + Status s = ScheduleConnect(ip, port); + if (!s.ok()) { + std::string ip_port = ip + ":" + std::to_string(port); + handle_->DestConnectFailedHandle(ip_port, s.ToString()); + LOG(INFO) << "Ip " << ip << ", port " << port << " Connect err " << s.ToString(); + continue; + } + } else { + // connection exist + net_multiplexer_->NetModEvent(ipport_conns_[ip_port]->fd(), 0, kReadable | kWritable); + } + std::vector msgs; + { + std::lock_guard l(mu_); + auto iter = to_send_.find(ip_port); + if (iter == to_send_.end()) { + continue; + } + msgs.swap(iter->second); + } + // get msg from to_send_ + std::vector send_failed_msgs; + for (auto& msg : msgs) { + if (ipport_conns_[ip_port]->WriteResp(msg)) { + send_failed_msgs.push_back(msg); + } + } + std::lock_guard l(mu_); + if (!send_failed_msgs.empty()) { + send_failed_msgs.insert(send_failed_msgs.end(), to_send_[ip_port].begin(), + to_send_[ip_port].end()); + send_failed_msgs.swap(to_send_[ip_port]); + NotifyWrite(ip_port); + } + } else if (ti.notify_type() == kNotiClose) { + LOG(INFO) << "received kNotiClose"; + net_multiplexer_->NetDelEvent(fd, 0); + CloseFd(fd, ip_port); + fd_conns_.erase(fd); + ipport_conns_.erase(ip_port); + connecting_fds_.erase(fd); + } + } + } + } +} + +void* ClientThread::ThreadMain() { + int nfds = 0; + NetFiredEvent* pfe = nullptr; + + struct timeval when; + gettimeofday(&when, nullptr); + struct timeval now = when; + + when.tv_sec += (cron_interval_ / 1000); + when.tv_usec += ((cron_interval_ % 1000) * 1000); + int timeout = cron_interval_; + if (timeout <= 0) { + timeout = NET_CRON_INTERVAL; + } + + std::string ip_port; + + while (!should_stop()) { + if (cron_interval_ > 0) { + gettimeofday(&now, nullptr); + if (when.tv_sec > now.tv_sec || (when.tv_sec == now.tv_sec && when.tv_usec > now.tv_usec)) { + timeout = static_cast((when.tv_sec - now.tv_sec) * 1000 + (when.tv_usec - now.tv_usec) / 1000); + } else { + // do user defined cron + handle_->CronHandle(); + + DoCronTask(); + when.tv_sec = now.tv_sec + (cron_interval_ / 1000); + when.tv_usec = now.tv_usec + ((cron_interval_ % 1000) * 1000); + timeout = cron_interval_; + } + } + //{ + // InternalDebugPrint(); + //} + nfds = net_multiplexer_->NetPoll(timeout); + for (int i = 0; i < nfds; i++) { + pfe = (net_multiplexer_->FiredEvents()) + i; + if (!pfe) { + continue; + } + + if (pfe->fd == net_multiplexer_->NotifyReceiveFd()) { + ProcessNotifyEvents(pfe); + continue; + } + + int should_close = 0; + auto iter = fd_conns_.find(pfe->fd); + if (iter == fd_conns_.end()) { + LOG(INFO) << "fd " << pfe->fd << "not found in fd_conns"; + net_multiplexer_->NetDelEvent(pfe->fd, 0); + continue; + } + + std::shared_ptr conn = iter->second; + + if (connecting_fds_.count(pfe->fd)) { + Status s = ProcessConnectStatus(pfe, &should_close); + if (!s.ok()) { + handle_->DestConnectFailedHandle(conn->ip_port(), s.ToString()); + } + connecting_fds_.erase(pfe->fd); + } + + if ((should_close == 0) && (pfe->mask & kWritable) && conn->is_reply()) { + WriteStatus write_status = conn->SendReply(); + conn->set_last_interaction(now); + if (write_status == kWriteAll) { + net_multiplexer_->NetModEvent(pfe->fd, 0, kReadable); + conn->set_is_reply(false); + } else if (write_status == kWriteHalf) { + continue; + } else { + LOG(INFO) << "send reply error " << write_status; + should_close = 1; + } + } + + if ((should_close == 0) && (pfe->mask & kReadable)) { + ReadStatus read_status = conn->GetRequest(); + conn->set_last_interaction(now); + if (read_status == kReadAll) { + // net_multiplexer_->NetModEvent(pfe->fd, 0, EPOLLOUT); + } else if (read_status == kReadHalf) { + continue; + } else { + LOG(INFO) << "Get request error " << read_status; + should_close = 1; + } + } + + if ((pfe->mask & kErrorEvent) || should_close) { + { + LOG(INFO) << "close connection " << pfe->fd << " reason " << pfe->mask << " " << should_close; + net_multiplexer_->NetDelEvent(pfe->fd, 0); + CloseFd(conn); + fd_conns_.erase(pfe->fd); + if (ipport_conns_.count(conn->ip_port())) { + ipport_conns_.erase(conn->ip_port()); + } + if (connecting_fds_.count(conn->fd())) { + connecting_fds_.erase(conn->fd()); + } + } + } + } + } + return nullptr; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/dispatch_thread.cc b/tools/pika_migrate/src/net/src/dispatch_thread.cc new file mode 100644 index 0000000000..6fbe97373e --- /dev/null +++ b/tools/pika_migrate/src/net/src/dispatch_thread.cc @@ -0,0 +1,349 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include + +#include "net/src/dispatch_thread.h" +#include "net/src/worker_thread.h" + +namespace net { + +DispatchThread::DispatchThread(int port, int work_num, ConnFactory* conn_factory, int cron_interval, int queue_limit, + const ServerHandle* handle) + : ServerThread::ServerThread(port, cron_interval, handle), + last_thread_(0), + work_num_(work_num), + queue_limit_(queue_limit) { + for (int i = 0; i < work_num_; i++) { + worker_thread_.emplace_back(std::make_unique(conn_factory, this, queue_limit, cron_interval)); + } +} + +DispatchThread::DispatchThread(const std::string& ip, int port, int work_num, ConnFactory* conn_factory, + int cron_interval, int queue_limit, const ServerHandle* handle) + : ServerThread::ServerThread(ip, port, cron_interval, handle), + last_thread_(0), + work_num_(work_num), + queue_limit_(queue_limit) { + for (int i = 0; i < work_num_; i++) { + worker_thread_.emplace_back(std::make_unique(conn_factory, this, queue_limit, cron_interval)); + } +} + +DispatchThread::DispatchThread(const std::set& ips, int port, int work_num, ConnFactory* conn_factory, + int cron_interval, int queue_limit, const ServerHandle* handle) + : ServerThread::ServerThread(ips, port, cron_interval, handle), + last_thread_(0), + work_num_(work_num), + queue_limit_(queue_limit) { + for (int i = 0; i < work_num_; i++) { + worker_thread_.emplace_back(std::make_unique(conn_factory, this, queue_limit, cron_interval)); + } +} + +DispatchThread::~DispatchThread() = default; + +int DispatchThread::StartThread() { + for (int i = 0; i < work_num_; i++) { + int ret = handle_->CreateWorkerSpecificData(&(worker_thread_[i]->private_data_)); + if (ret) { + return ret; + } + + if (!thread_name().empty()) { + worker_thread_[i]->set_thread_name("WorkerThread"); + } + ret = worker_thread_[i]->StartThread(); + if (ret) { + return ret; + } + } + + // Adding timer tasks and run timertaskThread + timer_task_thread_.AddTimerTask("blrpop_blocking_info_scan", 250, true, + [this] { this->ScanExpiredBlockedConnsOfBlrpop(); }); + timer_task_thread_.set_thread_name("DispacherTimerTaskThread"); + timer_task_thread_.StartThread(); + return ServerThread::StartThread(); +} + +int DispatchThread::StopThread() { + for (int i = 0; i < work_num_; i++) { + worker_thread_[i]->set_should_stop(); + } + for (int i = 0; i < work_num_; i++) { + int ret = worker_thread_[i]->StopThread(); + if (ret) { + return ret; + } + if (worker_thread_[i]->private_data_) { + ret = handle_->DeleteWorkerSpecificData(worker_thread_[i]->private_data_); + if (ret) { + return ret; + } + worker_thread_[i]->private_data_ = nullptr; + } + } + timer_task_thread_.StopThread(); + return ServerThread::StopThread(); +} + +void DispatchThread::set_keepalive_timeout(int timeout) { + for (int i = 0; i < work_num_; ++i) { + worker_thread_[i]->set_keepalive_timeout(timeout); + } +} + +int DispatchThread::conn_num() const { + int conn_num = 0; + for (int i = 0; i < work_num_; ++i) { + conn_num += worker_thread_[i]->conn_num(); + } + return conn_num; +} + +std::vector DispatchThread::conns_info() const { + std::vector result; + for (int i = 0; i < work_num_; ++i) { + const auto worker_conns_info = worker_thread_[i]->conns_info(); + result.insert(result.end(), worker_conns_info.begin(), worker_conns_info.end()); + } + return result; +} + +std::shared_ptr DispatchThread::MoveConnOut(int fd) { + for (int i = 0; i < work_num_; ++i) { + std::shared_ptr conn = worker_thread_[i]->MoveConnOut(fd); + if (conn) { + return conn; + } + } + return nullptr; +} + +void DispatchThread::MoveConnIn(std::shared_ptr conn, const NotifyType& type) { + std::unique_ptr& worker_thread = worker_thread_[last_thread_]; + bool success = worker_thread->MoveConnIn(conn, type, true); + if (success) { + last_thread_ = (last_thread_ + 1) % work_num_; + conn->set_net_multiplexer(worker_thread->net_multiplexer()); + } +} + +bool DispatchThread::KillConn(const std::string& ip_port) { + bool result = false; + for (int i = 0; i < work_num_; ++i) { + result = worker_thread_[i]->TryKillConn(ip_port) || result; + } + return result; +} + +void DispatchThread::KillAllConns() { KillConn(kKillAllConnsTask); } + +void DispatchThread::HandleNewConn(const int connfd, const std::string& ip_port) { + // Slow workers may consume many fds. + // We simply loop to find next legal worker. + NetItem ti(connfd, ip_port); + LOG(INFO) << "accept new conn " << ti.String(); + int next_thread = last_thread_; + bool find = false; + for (int cnt = 0; cnt < work_num_; cnt++) { + std::unique_ptr& worker_thread = worker_thread_[next_thread]; + find = worker_thread->MoveConnIn(ti, false); + if (find) { + last_thread_ = (next_thread + 1) % work_num_; + LOG(INFO) << "find worker(" << next_thread << "), refresh the last_thread_ to " << last_thread_; + break; + } + next_thread = (next_thread + 1) % work_num_; + } + + if (!find) { + LOG(INFO) << "all workers are full, queue limit is " << queue_limit_; + // every worker is full + // TODO(anan) maybe add log + close(connfd); + } +} + +bool BlockedConnNode::IsExpired() { + if (expire_time_ == 0) { + return false; + } + auto now = std::chrono::system_clock::now(); + int64_t now_in_ms = std::chrono::time_point_cast(now).time_since_epoch().count(); + if (expire_time_ <= now_in_ms) { + return true; + } + return false; +} + +std::shared_ptr& BlockedConnNode::GetConnBlocked() { return conn_blocked_; } +BlockKeyType BlockedConnNode::GetBlockType() const { return block_type_; } + +void DispatchThread::CleanWaitNodeOfUnBlockedBlrConn(std::shared_ptr conn_unblocked) { + // removed all the waiting info of this conn/ doing cleaning work + auto pair = blocked_conn_to_keys_.find(conn_unblocked->fd()); + if (pair == blocked_conn_to_keys_.end()) { + LOG(WARNING) << "blocking info of blpop/brpop went wrong, blpop/brpop can't working correctly"; + return; + } + auto& blpop_keys_list = pair->second; + for (auto& blpop_key : *blpop_keys_list) { + auto& wait_list_of_this_key = key_to_blocked_conns_.find(blpop_key)->second; + for (auto conn = wait_list_of_this_key->begin(); conn != wait_list_of_this_key->end();) { + if (conn->GetConnBlocked()->fd() == conn_unblocked->fd()) { + conn = wait_list_of_this_key->erase(conn); + break; + } + conn++; + } + } + blocked_conn_to_keys_.erase(conn_unblocked->fd()); +} + +void DispatchThread::CleanKeysAfterWaitNodeCleaned() { + // after wait info of a conn is cleaned, some wait list of keys might be empty, must erase them from the map + std::vector keys_to_erase; + for (auto& pair : key_to_blocked_conns_) { + if (pair.second->empty()) { + // wait list of this key is empty, just erase this key + keys_to_erase.emplace_back(pair.first); + } + } + for (auto& blrpop_key : keys_to_erase) { + key_to_blocked_conns_.erase(blrpop_key); + } +} + +void DispatchThread::ClosingConnCheckForBlrPop(std::shared_ptr conn_to_close) { + if (!conn_to_close) { + // dynamic pointer cast failed, it's not an instance of RedisConn, no need of the process below + return; + } + { + std::shared_lock l(block_mtx_); + if (blocked_conn_to_keys_.find(conn_to_close->fd()) == blocked_conn_to_keys_.end()) { + // this conn_to_close is not disconnected from blocking state cause by "blpop/brpop" + return; + } + } + std::lock_guard l(block_mtx_); + CleanWaitNodeOfUnBlockedBlrConn(conn_to_close); + CleanKeysAfterWaitNodeCleaned(); +} + +void DispatchThread::ScanExpiredBlockedConnsOfBlrpop() { + std::unique_lock latch(block_mtx_); + for (auto& pair : key_to_blocked_conns_) { + auto& conns_list = pair.second; + for (auto conn_node = conns_list->begin(); conn_node != conns_list->end();) { + if (conn_node->IsExpired()) { + std::shared_ptr conn_ptr = conn_node->GetConnBlocked(); + conn_ptr->WriteResp("$-1\r\n"); + conn_ptr->NotifyEpoll(true); + conn_node = conns_list->erase(conn_node); + CleanWaitNodeOfUnBlockedBlrConn(conn_ptr); + } else { + conn_node++; + } + } + } + CleanKeysAfterWaitNodeCleaned(); +} + +void DispatchThread::SetQueueLimit(int queue_limit) { queue_limit_ = queue_limit; } + +void DispatchThread::AllConn(const std::function&)>& func) { + std::unique_lock l(block_mtx_); + for (const auto& item : worker_thread_) { + std::unique_lock wl(item->rwlock_); + for (const auto& conn : item->conns_) { + func(conn.second); + } + } +} + +/** + * @param keys format: tablename + key,because can watch the key of different db + */ +void DispatchThread::AddWatchKeys(const std::unordered_set& keys, + const std::shared_ptr& client_conn) { + std::lock_guard lg(watch_keys_mu_); + for (const auto& key : keys) { + if (key_conns_map_.count(key) == 0) { + key_conns_map_.emplace(); + } + key_conns_map_[key].emplace(client_conn); + conn_keys_map_[client_conn].emplace(key); + } +} + +void DispatchThread::RemoveWatchKeys(const std::shared_ptr& client_conn) { + std::lock_guard lg(watch_keys_mu_); + auto& keys = conn_keys_map_[client_conn]; + for (const auto& key : keys) { + if (key_conns_map_.count(key) == 0 || key_conns_map_[key].count(client_conn) == 0) { + continue; + } + key_conns_map_[key].erase(client_conn); + if (key_conns_map_[key].empty()) { + key_conns_map_.erase(key); + } + } + conn_keys_map_.erase(client_conn); +} + +std::vector> DispatchThread::GetInvolvedTxn(const std::vector& keys) { + std::lock_guard lg(watch_keys_mu_); + auto involved_conns = std::vector>{}; + for (const auto& key : keys) { + if (key_conns_map_.count(key) == 0 || key_conns_map_[key].empty()) { + continue; + } + for (auto& client_conn : key_conns_map_[key]) { + involved_conns.emplace_back(client_conn); + } + } + return involved_conns; +} + +std::vector> DispatchThread::GetAllTxns() { + std::lock_guard lg(watch_keys_mu_); + auto involved_conns = std::vector>{}; + for (auto& [client_conn, _] : conn_keys_map_) { + involved_conns.emplace_back(client_conn); + } + return involved_conns; +} + +std::vector> DispatchThread::GetDBTxns(std::string db_name) { + std::lock_guard lg(watch_keys_mu_); + auto involved_conns = std::vector>{}; + for (auto& [db_key, client_conns] : key_conns_map_) { + if (db_key.find(db_name) == 0) { + involved_conns.insert(involved_conns.end(), client_conns.begin(), client_conns.end()); + } + } + return involved_conns; +} + +extern ServerThread* NewDispatchThread(int port, int work_num, ConnFactory* conn_factory, int cron_interval, + int queue_limit, const ServerHandle* handle) { + return new DispatchThread(port, work_num, conn_factory, cron_interval, queue_limit, handle); +} +extern ServerThread* NewDispatchThread(const std::string& ip, int port, int work_num, ConnFactory* conn_factory, + int cron_interval, int queue_limit, const ServerHandle* handle) { + return new DispatchThread(ip, port, work_num, conn_factory, cron_interval, queue_limit, handle); +} +extern ServerThread* NewDispatchThread(const std::set& ips, int port, int work_num, + ConnFactory* conn_factory, int cron_interval, int queue_limit, + const ServerHandle* handle) { + return new DispatchThread(ips, port, work_num, conn_factory, cron_interval, queue_limit, handle); +} + +}; // namespace net diff --git a/tools/pika_migrate/src/net/src/dispatch_thread.h b/tools/pika_migrate/src/net/src/dispatch_thread.h new file mode 100644 index 0000000000..6d6543d3a9 --- /dev/null +++ b/tools/pika_migrate/src/net/src/dispatch_thread.h @@ -0,0 +1,168 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_DISPATCH_THREAD_H_ +#define NET_SRC_DISPATCH_THREAD_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "net/include/net_conn.h" +#include "net/include/redis_conn.h" +#include "net/include/server_thread.h" +#include "net/src/net_util.h" +#include "pstd/include/env.h" +#include "pstd/include/xdebug.h" + +enum BlockKeyType { Blpop, Brpop }; +namespace net { + +class NetItem; +class NetFiredEvent; +class WorkerThread; + +struct BlockKey { // this data struct is made for the scenario of multi dbs in pika. + std::string db_name; + std::string key; + bool operator==(const BlockKey& p) const { return p.db_name == db_name && p.key == key; } +}; +struct BlockKeyHash { + std::size_t operator()(const BlockKey& k) const { + return std::hash{}(k.db_name) ^ std::hash{}(k.key); + } +}; + +class BlockedConnNode { + public: + virtual ~BlockedConnNode() {} + BlockedConnNode(int64_t expire_time, std::shared_ptr& conn_blocked, BlockKeyType block_type) + : expire_time_(expire_time), conn_blocked_(conn_blocked), block_type_(block_type) {} + bool IsExpired(); + std::shared_ptr& GetConnBlocked(); + BlockKeyType GetBlockType() const; + + private: + int64_t expire_time_; + std::shared_ptr conn_blocked_; + BlockKeyType block_type_; +}; + + +class DispatchThread : public ServerThread { + public: + DispatchThread(int port, int work_num, ConnFactory* conn_factory, int cron_interval, int queue_limit, + const ServerHandle* handle); + DispatchThread(const std::string& ip, int port, int work_num, ConnFactory* conn_factory, int cron_interval, + int queue_limit, const ServerHandle* handle); + DispatchThread(const std::set& ips, int port, int work_num, ConnFactory* conn_factory, int cron_interval, + int queue_limit, const ServerHandle* handle); + + ~DispatchThread() override; + + int StartThread() override; + + int StopThread() override; + + void set_keepalive_timeout(int timeout) override; + + int conn_num() const override; + + std::vector conns_info() const override; + + std::shared_ptr MoveConnOut(int fd) override; + + void MoveConnIn(std::shared_ptr conn, const NotifyType& type) override; + + void KillAllConns() override; + + bool KillConn(const std::string& ip_port) override; + + void HandleNewConn(int connfd, const std::string& ip_port) override; + + void SetQueueLimit(int queue_limit) override; + + void AllConn(const std::function&)>& func); + + /** + * BlPop/BrPop used start + */ + void CleanWaitNodeOfUnBlockedBlrConn(std::shared_ptr conn_unblocked); + + void CleanKeysAfterWaitNodeCleaned(); + + // if a client closed the conn when waiting for the response of "blpop/brpop", some cleaning work must be done. + void ClosingConnCheckForBlrPop(std::shared_ptr conn_to_close); + + + void ScanExpiredBlockedConnsOfBlrpop(); + + std::unordered_map>, BlockKeyHash>& GetMapFromKeyToConns() { + return key_to_blocked_conns_; + } + std::unordered_map>>& GetMapFromConnToKeys() { + return blocked_conn_to_keys_; + } + std::shared_mutex& GetBlockMtx() { return block_mtx_; }; + // BlPop/BrPop used end + + void AddWatchKeys(const std::unordered_set &keys, const std::shared_ptr& client_conn); + + void RemoveWatchKeys(const std::shared_ptr& client_conn); + + std::vector> GetInvolvedTxn(const std::vector &keys); + std::vector> GetAllTxns(); + std::vector> GetDBTxns(std::string db_name); + + private: + /* + * Here we used auto poll to find the next work thread, + * last_thread_ is the last work thread + */ + int last_thread_; + int work_num_; + /* + * This is the work threads + */ + std::vector> worker_thread_; + int queue_limit_; + std::map localdata_; + + std::unordered_map>> key_conns_map_; + std::unordered_map, std::unordered_set> conn_keys_map_; + std::mutex watch_keys_mu_; + + void HandleConnEvent(NetFiredEvent* pfe) override { UNUSED(pfe); } + + /* + * Blpop/BRpop used + */ + /* key_to_blocked_conns_: + * mapping from "Blockkey"(eg. "") to a list that stored the nodes of client-connections that + * were blocked by command blpop/brpop with key. + */ + std::unordered_map>, BlockKeyHash> key_to_blocked_conns_; + + /* + * blocked_conn_to_keys_: + * mapping from conn(fd) to a list of keys that the client is waiting for. + */ + std::unordered_map>> blocked_conn_to_keys_; + + /* + * latch of the two maps above. + */ + std::shared_mutex block_mtx_; + + TimerTaskThread timer_task_thread_; +}; // class DispatchThread + +} // namespace net +#endif // NET_SRC_DISPATCH_THREAD_H_ diff --git a/tools/pika_migrate/src/net/src/holy_thread.cc b/tools/pika_migrate/src/net/src/holy_thread.cc new file mode 100644 index 0000000000..cb12906880 --- /dev/null +++ b/tools/pika_migrate/src/net/src/holy_thread.cc @@ -0,0 +1,325 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "net/src/holy_thread.h" + +#include + +#include "net/include/net_conn.h" +#include "net/src/net_item.h" +#include "net/src/net_multiplexer.h" +#include "pstd/include/xdebug.h" + +namespace net { + +HolyThread::HolyThread(int port, ConnFactory* conn_factory, int cron_interval, const ServerHandle* handle, bool async) + : ServerThread::ServerThread(port, cron_interval, handle), + conn_factory_(conn_factory), + + keepalive_timeout_(kDefaultKeepAliveTime), + async_(async) {} + +HolyThread::HolyThread(const std::string& bind_ip, int port, ConnFactory* conn_factory, int cron_interval, + const ServerHandle* handle, bool async) + : ServerThread::ServerThread(bind_ip, port, cron_interval, handle), conn_factory_(conn_factory), async_(async) {} + +HolyThread::HolyThread(const std::set& bind_ips, int port, ConnFactory* conn_factory, int cron_interval, + const ServerHandle* handle, bool async) + : ServerThread::ServerThread(bind_ips, port, cron_interval, handle), conn_factory_(conn_factory), async_(async) {} + +HolyThread::~HolyThread() { Cleanup(); } + +int HolyThread::conn_num() const { + std::shared_lock l(rwlock_); + return static_cast(conns_.size()); +} + +std::vector HolyThread::conns_info() const { + std::vector result; + std::shared_lock l(rwlock_); + for (auto& conn : conns_) { + result.push_back({conn.first, conn.second->ip_port(), conn.second->last_interaction()}); + } + return result; +} + +std::shared_ptr HolyThread::MoveConnOut(int fd) { + std::lock_guard l(rwlock_); + std::shared_ptr conn = nullptr; + auto iter = conns_.find(fd); + if (iter != conns_.end()) { + int fd = iter->first; + conn = iter->second; + net_multiplexer_->NetDelEvent(fd, 0); + conns_.erase(iter); + } + return conn; +} + +std::shared_ptr HolyThread::get_conn(int fd) { + std::shared_lock l(rwlock_); + auto iter = conns_.find(fd); + if (iter != conns_.end()) { + return iter->second; + } else { + return nullptr; + } +} + +int HolyThread::StartThread() { + int ret = handle_->CreateWorkerSpecificData(&private_data_); + if (ret) { + return ret; + } + return ServerThread::StartThread(); +} + +int HolyThread::StopThread() { + if (private_data_) { + int ret = handle_->DeleteWorkerSpecificData(private_data_); + if (ret) { + return ret; + } + private_data_ = nullptr; + } + return ServerThread::StopThread(); +} + +void HolyThread::HandleNewConn(const int connfd, const std::string& ip_port) { + std::shared_ptr tc = conn_factory_->NewNetConn(connfd, ip_port, this, private_data_, net_multiplexer_.get()); + tc->SetNonblock(); + { + std::lock_guard l(rwlock_); + conns_[connfd] = tc; + } + + net_multiplexer_->NetAddEvent(connfd, kReadable); +} + +void HolyThread::HandleConnEvent(NetFiredEvent* pfe) { + if (!pfe) { + return; + } + std::shared_ptr in_conn = nullptr; + int should_close = 0; + + { + std::shared_lock l(rwlock_); + if (auto iter = conns_.find(pfe->fd); iter == conns_.end()) { + net_multiplexer_->NetDelEvent(pfe->fd, 0); + return; + } else { + in_conn = iter->second; + } + } + + if (async_) { + if (pfe->mask & kReadable) { + ReadStatus read_status = in_conn->GetRequest(); + struct timeval now; + gettimeofday(&now, nullptr); + in_conn->set_last_interaction(now); + if (read_status == kReadAll) { + // do nothing still watch EPOLLIN + } else if (read_status == kReadHalf) { + return; + } else { + // kReadError kReadClose kFullError kParseError kDealError + should_close = 1; + } + } + if ((pfe->mask & kWritable) && in_conn->is_reply()) { + WriteStatus write_status = in_conn->SendReply(); + if (write_status == kWriteAll) { + in_conn->set_is_reply(false); + net_multiplexer_->NetModEvent(pfe->fd, 0, kReadable); + } else if (write_status == kWriteHalf) { + return; + } else if (write_status == kWriteError) { + should_close = 1; + } + } + } else { + if (pfe->mask & kReadable) { + ReadStatus getRes = in_conn->GetRequest(); + struct timeval now; + gettimeofday(&now, nullptr); + in_conn->set_last_interaction(now); + if (getRes != kReadAll && getRes != kReadHalf) { + // kReadError kReadClose kFullError kParseError kDealError + should_close = 1; + } else if (in_conn->is_reply()) { + net_multiplexer_->NetModEvent(pfe->fd, 0, kWritable); + } else { + return; + } + } + if (pfe->mask & kWritable) { + WriteStatus write_status = in_conn->SendReply(); + if (write_status == kWriteAll) { + in_conn->set_is_reply(false); + net_multiplexer_->NetModEvent(pfe->fd, 0, kReadable); + } else if (write_status == kWriteHalf) { + return; + } else if (write_status == kWriteError) { + should_close = 1; + } + } + } + if ((pfe->mask & kErrorEvent) || should_close) { + net_multiplexer_->NetDelEvent(pfe->fd, 0); + CloseFd(in_conn); + in_conn = nullptr; + + { + std::lock_guard l(rwlock_); + conns_.erase(pfe->fd); + } + } +} + +void HolyThread::DoCronTask() { + struct timeval now; + gettimeofday(&now, nullptr); + std::vector> to_close; + std::vector> to_timeout; + { + std::lock_guard l(rwlock_); + + // Check whether close all connection + std::lock_guard kl(killer_mutex_); + if (deleting_conn_ipport_.count(kKillAllConnsTask)) { + for (auto& conn : conns_) { + to_close.push_back(conn.second); + } + conns_.clear(); + deleting_conn_ipport_.clear(); + for (const auto& conn : to_close) { + CloseFd(conn); + } + return; + } + + auto iter = conns_.begin(); + while (iter != conns_.end()) { + std::shared_ptr conn = iter->second; + // Check connection should be closed + if (deleting_conn_ipport_.count(conn->ip_port())) { + to_close.push_back(conn); + deleting_conn_ipport_.erase(conn->ip_port()); + iter = conns_.erase(iter); + continue; + } + + // Check keepalive timeout connection + if (keepalive_timeout_ > 0 && (now.tv_sec - conn->last_interaction().tv_sec > keepalive_timeout_)) { + to_timeout.push_back(conn); + iter = conns_.erase(iter); + continue; + } + + // Maybe resize connection buffer + conn->TryResizeBuffer(); + + ++iter; + } + } + for (const auto& conn : to_close) { + CloseFd(conn); + } + for (const auto& conn : to_timeout) { + CloseFd(conn); + handle_->FdTimeoutHandle(conn->fd(), conn->ip_port()); + } +} + +void HolyThread::CloseFd(const std::shared_ptr& conn) { + close(conn->fd()); + handle_->FdClosedHandle(conn->fd(), conn->ip_port()); +} + +// clean all conns +void HolyThread::Cleanup() { + std::map> to_close; + { + std::lock_guard l(rwlock_); + to_close = std::move(conns_); + conns_.clear(); + } + for (auto& iter : to_close) { + CloseFd(iter.second); + } +} + +void HolyThread::KillAllConns() { KillConn(kKillAllConnsTask); } + +bool HolyThread::KillConn(const std::string& ip_port) { + bool find = false; + if (ip_port != kKillAllConnsTask) { + std::shared_lock lock(rwlock_); + for (auto& [_, conn] : conns_) { + if (conn->ip_port() == ip_port) { + find = true; + break; + } + } + } + if (find || ip_port == kKillAllConnsTask) { + std::lock_guard l(killer_mutex_); + deleting_conn_ipport_.insert(ip_port); + return true; + } + return false; +} + +void HolyThread::ProcessNotifyEvents(const net::NetFiredEvent* pfe) { + if (pfe->mask & kReadable) { + char bb[2048]; + int64_t nread = read(net_multiplexer_->NotifyReceiveFd(), bb, 2048); + if (nread == 0) { + return; + } else { + for (int32_t idx = 0; idx < nread; ++idx) { + net::NetItem ti = net_multiplexer_->NotifyQueuePop(); + std::string ip_port = ti.ip_port(); + int fd = ti.fd(); + if (ti.notify_type() == net::kNotiWrite) { + net_multiplexer_->NetModEvent(ti.fd(), 0, kReadable | kWritable); + } else if (ti.notify_type() == net::kNotiClose) { + LOG(INFO) << "receive noti close"; + std::shared_ptr conn = get_conn(fd); + if (!conn) { + continue; + } + CloseFd(conn); + conn = nullptr; + { + std::lock_guard l(rwlock_); + conns_.erase(fd); + } + } + } + } + } +} + +extern ServerThread* NewHolyThread(int port, ConnFactory* conn_factory, int cron_interval, const ServerHandle* handle) { + return new HolyThread(port, conn_factory, cron_interval, handle); +} +extern ServerThread* NewHolyThread(const std::string& bind_ip, int port, ConnFactory* conn_factory, int cron_interval, + const ServerHandle* handle) { + return new HolyThread(bind_ip, port, conn_factory, cron_interval, handle); +} +extern ServerThread* NewHolyThread(const std::set& bind_ips, int port, ConnFactory* conn_factory, + int cron_interval, const ServerHandle* handle) { + return new HolyThread(bind_ips, port, conn_factory, cron_interval, handle); +} +extern ServerThread* NewHolyThread(const std::set& bind_ips, int port, ConnFactory* conn_factory, + bool async, int cron_interval, const ServerHandle* handle) { + return new HolyThread(bind_ips, port, conn_factory, cron_interval, handle, async); +} +}; // namespace net diff --git a/tools/pika_migrate/src/net/src/holy_thread.h b/tools/pika_migrate/src/net/src/holy_thread.h new file mode 100644 index 0000000000..312de4c84f --- /dev/null +++ b/tools/pika_migrate/src/net/src/holy_thread.h @@ -0,0 +1,81 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_HOLY_THREAD_H_ +#define NET_SRC_HOLY_THREAD_H_ + +#include +#include +#include +#include +#include + +#include "net/include/net_conn.h" +#include "net/include/server_thread.h" +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/xdebug.h" + +namespace net { +class NetConn; + +class HolyThread : public ServerThread { + public: + // This type thread will listen and work self list redis thread + HolyThread(int port, ConnFactory* conn_factory, int cron_interval = 0, const ServerHandle* handle = nullptr, + bool async = true); + HolyThread(const std::string& bind_ip, int port, ConnFactory* conn_factory, int cron_interval = 0, + const ServerHandle* handle = nullptr, bool async = true); + HolyThread(const std::set& bind_ips, int port, ConnFactory* conn_factory, int cron_interval = 0, + const ServerHandle* handle = nullptr, bool async = true); + ~HolyThread() override; + + int StartThread() override; + + int StopThread() override; + + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + + void set_keepalive_timeout(int timeout) override { keepalive_timeout_ = timeout; } + + int conn_num() const override; + + std::vector conns_info() const override; + + std::shared_ptr MoveConnOut(int fd) override; + + void MoveConnIn(std::shared_ptr conn, const NotifyType& type) override {} + + void KillAllConns() override; + + bool KillConn(const std::string& ip_port) override; + + virtual std::shared_ptr get_conn(int fd); + + void ProcessNotifyEvents(const net::NetFiredEvent* pfe) override; + void Cleanup(); + + private: + mutable pstd::RWMutex rwlock_; /* For external statistics */ + std::map> conns_; + + ConnFactory* conn_factory_ = nullptr; + void* private_data_ = nullptr; + + std::atomic keepalive_timeout_; // keepalive second + bool async_; + + void DoCronTask() override; + + pstd::Mutex killer_mutex_; + std::set deleting_conn_ipport_; + + void HandleNewConn(int connfd, const std::string& ip_port) override; + void HandleConnEvent(NetFiredEvent* pfe) override; + + void CloseFd(const std::shared_ptr& conn); +}; // class HolyThread + +} // namespace net +#endif // NET_SRC_HOLY_THREAD_H_ diff --git a/tools/pika_migrate/src/net/src/http_conn.cc b/tools/pika_migrate/src/net/src/http_conn.cc new file mode 100644 index 0000000000..bde5f46177 --- /dev/null +++ b/tools/pika_migrate/src/net/src/http_conn.cc @@ -0,0 +1,620 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/http_conn.h" +#include +#include +#include + +#include +#include +#include + +#include "net/include/net_define.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +namespace net { + +static const uint32_t kHTTPMaxMessage = 1024 * 1024 * 8; +static const uint32_t kHTTPMaxHeader = 1024 * 1024; + +static const std::map http_status_map = { + {100, "Continue"}, + {101, "Switching Protocols"}, + {102, "Processing"}, + + {200, "OK"}, + {201, "Created"}, + {202, "Accepted"}, + {203, "Non-Authoritative Information"}, + {204, "No Content"}, + {205, "Reset Content"}, + {206, "Partial Content"}, + {207, "Multi-Status"}, + + {400, "Bad Request"}, + {401, "Unauthorized"}, + {402, ""}, // reserve + {403, "Forbidden"}, + {404, "Not Found"}, + {405, "Method Not Allowed"}, + {406, "Not Acceptable"}, + {407, "Proxy Authentication Required"}, + {408, "Request Timeout"}, + {409, "Conflict"}, + {416, "Requested Range not satisfiable"}, + + {500, "Internal Server Error"}, + {501, "Not Implemented"}, + {502, "Bad Gateway"}, + {503, "Service Unavailable"}, + {504, "Gateway Timeout"}, + {505, "HTTP Version Not Supported"}, + {506, "Variant Also Negotiates"}, + {507, "Insufficient Storage"}, + {508, "Bandwidth Limit Exceeded"}, + {509, "Not Extended"}, +}; + +inline int find_lf(const char* data, int size) { + const char* c = data; + int count = 0; + while (count < size) { + if (*c == '\n') { + break; + } + c++; + count++; + } + return count; +} + +bool HTTPRequest::ParseHeadLine(const char* data, int line_start, int line_end) { + std::string param_key; + std::string param_value; + for (int i = line_start; i <= line_end; i++) { + switch (parse_status_) { + case kHeaderMethod: + if (data[i] != ' ') { + method_.push_back(data[i]); + } else { + parse_status_ = kHeaderPath; + } + break; + case kHeaderPath: + if (data[i] != ' ') { + url_.push_back(data[i]); + } else { + parse_status_ = kHeaderVersion; + } + break; + case kHeaderVersion: + if (data[i] != '\r' && data[i] != '\n') { + version_.push_back(data[i]); + } else if (data[i] == '\n') { + parse_status_ = kHeaderParamKey; + } + break; + case kHeaderParamKey: + if (data[i] != ':' && data[i] != ' ') { + param_key.push_back(data[i]); + } else if (data[i] == ' ') { + parse_status_ = kHeaderParamValue; + } + break; + case kHeaderParamValue: + if (data[i] != '\r' && data[i] != '\n') { + param_value.push_back(data[i]); + } else if (data[i] == '\r') { + headers_[pstd::StringToLower(param_key)] = param_value; + parse_status_ = kHeaderParamKey; + } + break; + + default: + return false; + } + } + return true; +} + +bool HTTPRequest::ParseGetUrl() { + path_ = url_; + // Format path + if (headers_.count("host") && path_.find(headers_["host"]) != std::string::npos && + path_.size() > (7 + headers_["host"].size())) { + // http://www.xxx.xxx/path_/to + path_.assign(path_.substr(7 + headers_["host"].size())); + } + size_t n = path_.find('?'); + if (n == std::string::npos) { + return true; // no parameter + } + if (!ParseParameters(path_, n + 1)) { + return false; + } + path_.resize(n); + return true; +} + +// Parse query parameter from GET url or POST application/x-www-form-urlencoded +// format: key1=value1&key2=value2&key3=value3 +bool HTTPRequest::ParseParameters(std::string& data, size_t line_start) { + size_t pre = line_start; + size_t mid; + size_t end; + while (pre < data.size()) { + mid = data.find('=', pre); + if (mid == std::string::npos) { + mid = data.size(); + } + end = data.find('&', pre); + if (end == std::string::npos) { + end = data.size(); + } + if (end <= mid) { + // empty value + query_params_[data.substr(pre, end - pre)] = std::string(); + pre = end + 1; + } else { + query_params_[data.substr(pre, mid - pre)] = data.substr(mid + 1, end - mid - 1); + pre = end + 1; + } + } + return true; +} + +int HTTPRequest::ParseHeader() { + rbuf_[rbuf_pos_] = '\0'; // Avoid strstr() parsing expire char + char* sep_pos = strstr(rbuf_, "\r\n\r\n"); + if (!sep_pos) { + // Haven't find header + return 0; + } + auto header_len = static_cast(sep_pos - rbuf_ + 4); + int remain_size = header_len; + if (remain_size <= 5) { + // Header error + return -1; + } + + // Parse header line + int line_start = 0; + int line_end = 0; + while (remain_size > 4) { + line_end += find_lf(rbuf_ + line_start, remain_size); + if (line_end < line_start) { + return -1; + } + if (!ParseHeadLine(rbuf_, line_start, line_end)) { + return -1; + } + remain_size -= (line_end - line_start + 1); + line_start = ++line_end; + } + + // Parse query parameter from url + if (!ParseGetUrl()) { + return -1; + } + + remain_recv_len_ = headers_.count("content-length") ? std::stoul(headers_.at("content-length")) : 0; + + if (headers_.count("content-type")) { + content_type_.assign(headers_.at("content-type")); + } + + if (headers_.count("expect") && + (headers_.at("expect") == "100-Continue" || headers_.at("expect") == "100-continue")) { + reply_100continue_ = true; + } + + return header_len; +} + +void HTTPRequest::Dump() const { + std::cout << "Method: " << method_ << std::endl; + std::cout << "Url: " << url_ << std::endl; + std::cout << "Path: " << path_ << std::endl; + std::cout << "Version: " << version_ << std::endl; + std::cout << "Headers: " << std::endl; + for (auto& header : headers_) { + std::cout << " ----- " << header.first << ": " << header.second << std::endl; + } + std::cout << "Query params: " << std::endl; + for (auto& item : query_params_) { + std::cout << " ----- " << item.first << ": " << item.second << std::endl; + } +} + +// Return bytes actual be writen, should be less than size +bool HTTPResponse::SerializeHeader() { + int serial_size = 0; + int ret; + + const std::string& reason_phrase = http_status_map.at(status_code_); + + // Serialize statues line + ret = snprintf(wbuf_, kHTTPMaxHeader, "HTTP/1.1 %d %s\r\n", status_code_, reason_phrase.c_str()); + serial_size += ret; + if (ret < 0 || ret == static_cast(kHTTPMaxHeader)) { + return false; + } + + for (auto& line : headers_) { + ret = snprintf(wbuf_ + serial_size, kHTTPMaxHeader - serial_size, "%s: %s\r\n", line.first.c_str(), + line.second.c_str()); + serial_size += ret; + if (ret < 0 || serial_size == static_cast(kHTTPMaxHeader)) { + return false; + } + } + + ret = snprintf(wbuf_ + serial_size, kHTTPMaxHeader - serial_size, "\r\n"); + serial_size += ret; + if (ret < 0 || serial_size == static_cast(kHTTPMaxHeader)) { + return false; + } + + buf_len_ = serial_size; + return true; +} + +HTTPConn::HTTPConn(const int fd, const std::string& ip_port, Thread* thread, std::shared_ptr handles, + void* worker_specific_data) + : NetConn(fd, ip_port, thread), +#ifdef __ENABLE_SSL + // security_(thread->security()), +#endif + handles_(std::move(handles)) { + handles_->worker_specific_data_ = worker_specific_data; + // this pointer is safe here + request_ = new HTTPRequest(this); + response_ = new HTTPResponse(this); +} + +HTTPConn::~HTTPConn() { + delete request_; + delete response_; +} + +HTTPRequest::HTTPRequest(HTTPConn* conn) + : conn_(conn) + { + rbuf_ = new char[kHTTPMaxMessage]; +} + +HTTPRequest::~HTTPRequest() { delete[] rbuf_; } + +std::string HTTPRequest::url() const { return url_; } + +std::string HTTPRequest::path() const { return path_; } + +std::string HTTPRequest::query_value(const std::string& field) const { + if (query_params_.count(field)) { + return query_params_.at(field); + } + return ""; +} + +std::string HTTPRequest::postform_value(const std::string& field) const { + if (postform_params_.count(field)) { + return postform_params_.at(field); + } + return ""; +} + +std::string HTTPRequest::method() const { return method_; } + +std::string HTTPRequest::content_type() const { return content_type_; } + +std::map HTTPRequest::query_params() const { return query_params_; } + +std::map HTTPRequest::postform_params() const { return postform_params_; } + +std::map HTTPRequest::headers() const { return headers_; } + +std::string HTTPRequest::client_ip_port() const { return client_ip_port_; } + +void HTTPRequest::Reset() { + rbuf_pos_ = 0; + method_.clear(); + path_.clear(); + version_.clear(); + url_.clear(); + content_type_.clear(); + remain_recv_len_ = 0; + reply_100continue_ = false; + postform_params_.clear(); + query_params_.clear(); + headers_.clear(); + parse_status_ = kHeaderMethod; + client_ip_port_ = conn_->ip_port(); +} + +ReadStatus HTTPRequest::DoRead() { + ssize_t nread; +#ifdef __ENABLE_SSL + if (conn_->security_) { + nread = SSL_read(conn_->ssl(), rbuf_ + rbuf_pos_, static_cast(kHTTPMaxMessage)); + if (nread <= 0) { + int sslerr = SSL_get_error(conn_->ssl(), static_cast(nread)); + switch (sslerr) { + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_WRITE: + return kReadHalf; + case SSL_ERROR_SYSCALL: + break; + case SSL_ERROR_SSL: + default: + return kReadClose; + } + } + } else +#endif + { + nread = read(conn_->fd(), rbuf_ + rbuf_pos_, kHTTPMaxMessage - rbuf_pos_); + } + if (nread > 0) { + rbuf_pos_ += nread; + if (req_status_ == kBodyReceiving) { + remain_recv_len_ -= nread; + } + } else if (nread == -1 && errno == EAGAIN) { + return kReadHalf; + } else if (nread <= 0) { + return kReadClose; + } + + return kOk; +} + +ReadStatus HTTPRequest::ReadData() { + if (req_status_ == kNewRequest) { + Reset(); + if (conn_->response_->Finished()) { + conn_->response_->Reset(); + } else { + return kReadHalf; + } + req_status_ = kHeaderReceiving; + } + + ReadStatus s; + while (true) { + int header_len = 0; + switch (req_status_) { + case kHeaderReceiving: + if ((s = DoRead()) != kOk) { + conn_->handles_->HandleConnClosed(); + return s; + } + header_len = ParseHeader(); + if (header_len < 0 || rbuf_pos_ > kHTTPMaxHeader) { + // Parse header error + conn_->handles_->HandleConnClosed(); + return kReadError; + } else if (header_len > 0) { + // Parse header success + req_status_ = kBodyReceiving; + bool need_reply = conn_->handles_->HandleRequest(this); + if (need_reply) { + req_status_ = kBodyReceived; + break; + } + + // Move remain body part to begin + memmove(rbuf_, rbuf_ + header_len, rbuf_pos_ - header_len); + remain_recv_len_ -= rbuf_pos_ - header_len; + rbuf_pos_ -= header_len; + + if (reply_100continue_ && remain_recv_len_ != 0) { + conn_->response_->SetStatusCode(100); + reply_100continue_ = false; + return kReadAll; + } + + if (remain_recv_len_ == 0) { + conn_->handles_->HandleBodyData(rbuf_, rbuf_pos_); + req_status_ = kBodyReceived; + } + } else { + // Haven't find header + } + break; + case kBodyReceiving: + if ((s = DoRead()) != kOk) { + conn_->handles_->HandleConnClosed(); + return s; + } + if (rbuf_pos_ == kHTTPMaxMessage || remain_recv_len_ == 0) { + conn_->handles_->HandleBodyData(rbuf_, rbuf_pos_); + rbuf_pos_ = 0; + } + if (remain_recv_len_ == 0) { + req_status_ = kBodyReceived; + } + break; + case kBodyReceived: + req_status_ = kNewRequest; + conn_->handles_->PrepareResponse(conn_->response_); + return kReadAll; + default: + break; + } + } + + assert(true); +} + +ReadStatus HTTPConn::GetRequest() { + ReadStatus status = request_->ReadData(); + if (status == kReadAll) { + set_is_reply(true); + } + return status; +} + +HTTPResponse::HTTPResponse(HTTPConn* conn) + : conn_(conn) + { + wbuf_ = new char[kHTTPMaxMessage]; +} + +HTTPResponse::~HTTPResponse() { delete[] wbuf_; } + +void HTTPResponse::Reset() { + headers_.clear(); + status_code_ = 200; + finished_ = false; + remain_send_len_ = 0; + wbuf_pos_ = 0; + buf_len_ = 0; + resp_status_ = kPrepareHeader; +} + +bool HTTPResponse::Finished() { return finished_; } + +void HTTPResponse::SetStatusCode(int code) { + assert((code >= 100 && code <= 102) || (code >= 200 && code <= 207) || (code >= 400 && code <= 409) || + (code == 416) || (code >= 500 && code <= 509)); + status_code_ = code; +} + +void HTTPResponse::SetHeaders(const std::string& key, const std::string& value) { headers_[key] = value; } + +void HTTPResponse::SetHeaders(const std::string& key, const size_t value) { headers_[key] = std::to_string(value); } + +void HTTPResponse::SetContentLength(uint64_t size) { + remain_send_len_ = size; + if (headers_.count("Content-Length") || (headers_.count("content-length"))) { + return; + } + SetHeaders("Content-Length", size); +} + +bool HTTPResponse::Flush() { + if (resp_status_ == kPrepareHeader) { + if (!SerializeHeader() || buf_len_ > kHTTPMaxHeader) { + return false; + } + resp_status_ = kSendingHeader; + } + if (resp_status_ == kSendingHeader) { + ssize_t nwritten; +#ifdef __ENABLE_SSL + if (conn_->security_) { + nwritten = SSL_write(conn_->ssl(), wbuf_ + wbuf_pos_, static_cast(buf_len_)); + if (nwritten <= 0) { + // FIXME (gaodq) + int sslerr = SSL_get_error(conn_->ssl(), static_cast(nwritten)); + switch (sslerr) { + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_WRITE: + return true; + case SSL_ERROR_SYSCALL: + break; + case SSL_ERROR_SSL: + default: + return false; + } + } + } else +#endif + { + nwritten = write(conn_->fd(), wbuf_ + wbuf_pos_, buf_len_); + } + if (nwritten == -1 && errno == EAGAIN) { + return true; + } else if (nwritten <= 0) { + // Connection close + return false; + } else if (nwritten == static_cast(buf_len_)) { + // Complete sending header + wbuf_pos_ = 0; + buf_len_ = 0; + if (status_code_ == 100) { + // Sending 100-continue, no body + resp_status_ = kPrepareHeader; + finished_ = true; + return true; + } + resp_status_ = kSendingBody; + } else { + wbuf_pos_ += nwritten; + buf_len_ -= nwritten; + } + } + if (resp_status_ == kSendingBody) { + if (remain_send_len_ == 0) { + // Complete response + finished_ = true; + resp_status_ = kPrepareHeader; + return true; + } + if (buf_len_ == 0) { + size_t remain_buf = static_cast(kHTTPMaxMessage) - wbuf_pos_; + size_t needed_size = std::min(remain_buf, remain_send_len_); + buf_len_ = conn_->handles_->WriteResponseBody(wbuf_ + wbuf_pos_, needed_size); + } + + if (buf_len_ == -1) { + return false; + } + + ssize_t nwritten; +#ifdef __ENABLE_SSL + if (conn_->security_) { + nwritten = SSL_write(conn_->ssl(), wbuf_ + wbuf_pos_, static_cast(buf_len_)); + if (nwritten <= 0) { + // FIXME (gaodq) + int sslerr = SSL_get_error(conn_->ssl(), static_cast(nwritten)); + switch (sslerr) { + case SSL_ERROR_WANT_READ: + case SSL_ERROR_WANT_WRITE: + return true; + case SSL_ERROR_SYSCALL: + break; + case SSL_ERROR_SSL: + default: + return false; + } + } + } else +#endif + { + nwritten = write(conn_->fd(), wbuf_ + wbuf_pos_, buf_len_); + } + if (nwritten == -1 && errno == EAGAIN) { + return true; + } else if (nwritten <= 0) { + // Connection close + return false; + } else { + wbuf_pos_ += nwritten; + if (wbuf_pos_ == kHTTPMaxMessage) { + wbuf_pos_ = 0; + } + buf_len_ -= nwritten; + remain_send_len_ -= nwritten; + } + } + + // Continue + return true; +} + +WriteStatus HTTPConn::SendReply() { + if (!response_->Flush()) { + return kWriteError; + } + if (response_->Finished()) { + return kWriteAll; + } + return kWriteHalf; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_cli.cc b/tools/pika_migrate/src/net/src/net_cli.cc new file mode 100644 index 0000000000..76196826b7 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_cli.cc @@ -0,0 +1,307 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/net_cli.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using pstd::Status; + +namespace net { + +struct NetCli::Rep { + std::string peer_ip; + int peer_port; + int send_timeout{0}; + int recv_timeout{0}; + int connect_timeout{1000}; + bool keep_alive{false}; + bool is_block{true}; + int sockfd{-1}; + bool available{false}; + + Rep() = default; + + Rep(std::string ip, int port) : peer_ip(std::move(ip)),peer_port(port) {} +}; + +NetCli::NetCli(const std::string& ip, const int port) : rep_(std::make_unique(ip, port)) {} + +NetCli::~NetCli() { Close(); } + +bool NetCli::Available() const { return rep_->available; } + +Status NetCli::Connect(const std::string& bind_ip) { return Connect(rep_->peer_ip, rep_->peer_port, bind_ip); } + +Status NetCli::Connect(const std::string& ip, const int port, const std::string& bind_ip) { + std::unique_ptr& r = rep_; + Status s; + int rv; + char cport[6]; + struct addrinfo hints; + struct addrinfo *servinfo; + struct addrinfo *p; + snprintf(cport, sizeof(cport), "%d", port); + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + + // We do not handle IPv6 + if ((rv = getaddrinfo(ip.c_str(), cport, &hints, &servinfo)) != 0) { + return Status::IOError("connect getaddrinfo error for ", ip); + } + for (p = servinfo; p != nullptr; p = p->ai_next) { + if ((r->sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol)) == -1) { + continue; + } + + // bind if needed + if (!bind_ip.empty()) { + struct sockaddr_in localaddr; + localaddr.sin_family = AF_INET; + localaddr.sin_addr.s_addr = inet_addr(bind_ip.c_str()); + localaddr.sin_port = 0; // Any local port will do + if (bind(r->sockfd, reinterpret_cast(&localaddr), sizeof(localaddr)) < 0) { + close(r->sockfd); + continue; + } + } + + int flags = fcntl(r->sockfd, F_GETFL, 0); + fcntl(r->sockfd, F_SETFL, flags | O_NONBLOCK); + fcntl(r->sockfd, F_SETFD, fcntl(r->sockfd, F_GETFD) | FD_CLOEXEC); + + if (connect(r->sockfd, p->ai_addr, p->ai_addrlen) == -1) { + if (errno == EHOSTUNREACH) { + close(r->sockfd); + continue; + } else if (errno == EINPROGRESS || errno == EAGAIN || errno == EWOULDBLOCK) { + struct pollfd wfd[1]; + + wfd[0].fd = r->sockfd; + wfd[0].events = POLLOUT; + + int res; + if ((res = poll(wfd, 1, r->connect_timeout)) == -1) { + close(r->sockfd); + freeaddrinfo(servinfo); + return Status::IOError("EHOSTUNREACH", "connect poll error"); + } else if (res == 0) { + close(r->sockfd); + freeaddrinfo(servinfo); + return Status::Timeout(""); + } + int val = 0; + socklen_t lon = sizeof(int); + + if (getsockopt(r->sockfd, SOL_SOCKET, SO_ERROR, &val, &lon) == -1) { + close(r->sockfd); + freeaddrinfo(servinfo); + return Status::IOError("EHOSTUNREACH", "connect host getsockopt error"); + } + + if (val != 0) { + close(r->sockfd); + freeaddrinfo(servinfo); + return Status::IOError("EHOSTUNREACH", "connect host error"); + } + } else { + close(r->sockfd); + freeaddrinfo(servinfo); + return Status::IOError("EHOSTUNREACH", "The target host cannot be reached"); + } + } + + struct sockaddr_in laddr; + socklen_t llen = sizeof(laddr); + getsockname(r->sockfd, reinterpret_cast(&laddr), &llen); + std::string lip(inet_ntoa(laddr.sin_addr)); + int lport = ntohs(laddr.sin_port); + if (ip == lip && port == lport) { + return Status::IOError("EHOSTUNREACH", "same ip port"); + } + + flags = fcntl(r->sockfd, F_GETFL, 0); + fcntl(r->sockfd, F_SETFL, flags & ~O_NONBLOCK); + freeaddrinfo(servinfo); + + // connect ok + rep_->available = true; + return s; + } + if (!p) { + s = Status::IOError(strerror(errno), "Can't create socket "); + return s; + } + freeaddrinfo(servinfo); + freeaddrinfo(p); + set_tcp_nodelay(); + return s; +} + +static int PollFd(int fd, int events, int ms) { + pollfd fds[1]; + fds[0].fd = fd; + fds[0].events = static_cast(events); + fds[0].revents = 0; + + int ret = ::poll(fds, 1, ms); + if (ret > 0) { + return fds[0].revents; + } + + return ret; +} + +static int CheckSockAliveness(int fd) { + char buf[1]; + int ret; + + ret = PollFd(fd, POLLIN | POLLPRI, 0); + if (0 < ret) { + int64_t num = ::recv(fd, buf, 1, MSG_PEEK); + if (num == 0) { + return -1; + } + if (num == -1) { + int errnum = errno; + if (errnum != EINTR && errnum != EAGAIN && errnum != EWOULDBLOCK) { + return -1; + } + } + } + + return 0; +} + +int NetCli::CheckAliveness() { + int flag; + bool block; + int sock = fd(); + + if (sock < 0) { + return -1; + } + + flag = fcntl(sock, F_GETFL, 0); + block = ((flag & O_NONBLOCK) == 0); + if (block) { + fcntl(sock, F_SETFL, flag | O_NONBLOCK); + } + + int ret = CheckSockAliveness(sock); + + if (block) { + fcntl(sock, F_SETFL, flag); + } + + return ret; +} + +Status NetCli::SendRaw(void* buf, size_t count) { + char* wbuf = reinterpret_cast(buf); + size_t nleft = count; + ssize_t pos = 0; + ssize_t nwritten; + + while (nleft > 0) { + if ((nwritten = write(rep_->sockfd, wbuf + pos, nleft)) < 0) { + if (errno == EINTR) { + continue; + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { + return Status::Timeout("Send timeout"); + } else { + return Status::IOError("write error " + std::string(strerror(errno))); + } + } else if (nwritten == 0) { + return Status::IOError("write nothing"); + } + + nleft -= nwritten; + pos += nwritten; + } + + return Status::OK(); +} + +Status NetCli::RecvRaw(void* buf, size_t* count) { + std::unique_ptr& r = rep_; + char* rbuf = reinterpret_cast(buf); + size_t nleft = *count; + size_t pos = 0; + ssize_t nread; + + while (nleft > 0) { + if ((nread = read(r->sockfd, rbuf + pos, nleft)) < 0) { + if (errno == EINTR) { + continue; + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { + return Status::Timeout("Send timeout"); + } else { + return Status::IOError("read error " + std::string(strerror(errno))); + } + } else if (nread == 0) { + return Status::EndFile("socket closed"); + } + nleft -= nread; + pos += nread; + } + + *count = pos; + return Status::OK(); +} + +int NetCli::fd() const { return rep_->sockfd; } + +void NetCli::Close() { + if (rep_->available) { + close(rep_->sockfd); + rep_->available = false; + rep_->sockfd = -1; + } +} + +void NetCli::set_connect_timeout(int connect_timeout) { rep_->connect_timeout = connect_timeout; } + +int NetCli::set_send_timeout(int send_timeout) { + std::unique_ptr& r = rep_; + int ret = 0; + if (send_timeout > 0) { + r->send_timeout = send_timeout; + struct timeval timeout = {r->send_timeout / 1000, (r->send_timeout % 1000) * 1000}; + ret = setsockopt(r->sockfd, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout)); + } + return ret; +} + +int NetCli::set_recv_timeout(int recv_timeout) { + std::unique_ptr& r = rep_; + int ret = 0; + if (recv_timeout > 0) { + r->recv_timeout = recv_timeout; + struct timeval timeout = {r->recv_timeout / 1000, (r->recv_timeout % 1000) * 1000}; + ret = setsockopt(r->sockfd, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout)); + } + return ret; +} + +int NetCli::set_tcp_nodelay() { + std::unique_ptr& r = rep_; + int val = 1; + int ret = 0; + ret = setsockopt(r->sockfd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)); + return ret; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_conn.cc b/tools/pika_migrate/src/net/src/net_conn.cc new file mode 100644 index 0000000000..d392fc5a10 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_conn.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include + +#include + +#include "net/include/net_conn.h" +#include "net/include/net_thread.h" +#include "net/src/net_util.h" +#include "pstd/include/xdebug.h" + +namespace net { + +NetConn::NetConn(const int fd, std::string ip_port, Thread* thread, NetMultiplexer* net_mpx) + : fd_(fd), + ip_port_(std::move(ip_port)), +#ifdef __ENABLE_SSL + ssl_(nullptr), +#endif + thread_(thread), + net_multiplexer_(net_mpx) { + gettimeofday(&last_interaction_, nullptr); +} + +#ifdef __ENABLE_SSL +NetConn::~NetConn() { + SSL_free(ssl_); + ssl_ = nullptr; +} +#endif + +void NetConn::SetClose(bool close) { + close_ = close; +} + +bool NetConn::SetNonblock() { + flags_ = Setnonblocking(fd()); + return flags_ != -1; +} + +#ifdef __ENABLE_SSL +bool NetConn::CreateSSL(SSL_CTX* ssl_ctx) { + ssl_ = SSL_new(ssl_ctx); + if (!ssl_) { + LOG(WARNING) << "SSL_new() failed"; + return false; + } + + if (SSL_set_fd(ssl_, fd_) == 0) { + LOG(WARNING) << "SSL_set_fd() failed"; + return false; + } + + SSL_set_accept_state(ssl_); + + return true; +} +#endif + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_epoll.cc b/tools/pika_migrate/src/net/src/net_epoll.cc new file mode 100644 index 0000000000..2215a62764 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_epoll.cc @@ -0,0 +1,104 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/src/net_epoll.h" + +#include +#include +#include + +#include + +#include "net/include/net_define.h" +#include "pstd/include/xdebug.h" + +namespace net { + +NetMultiplexer* CreateNetMultiplexer(int limit) { return new NetEpoll(limit); } + +NetEpoll::NetEpoll(int queue_limit) : NetMultiplexer(queue_limit) { +#if defined(EPOLL_CLOEXEC) + multiplexer_ = epoll_create1(EPOLL_CLOEXEC); +#else + multiplexer_ = epoll_create(1024); +#endif + + fcntl(multiplexer_, F_SETFD, fcntl(multiplexer_, F_GETFD) | FD_CLOEXEC); + + if (multiplexer_ < 0) { + LOG(ERROR) << "epoll create fail"; + exit(1); + } + + events_.resize(NET_MAX_CLIENTS); +} + +int NetEpoll::NetAddEvent(int fd, int mask) { + struct epoll_event ee; + ee.data.fd = fd; + ee.events = 0; + + if (mask & kReadable) { + ee.events |= EPOLLIN; + } + if (mask & kWritable) { + ee.events |= EPOLLOUT; + } + + return epoll_ctl(multiplexer_, EPOLL_CTL_ADD, fd, &ee); +} + +int NetEpoll::NetModEvent(int fd, int old_mask, int mask) { + struct epoll_event ee; + ee.data.fd = fd; + ee.events = (old_mask | mask); + ee.events = 0; + + if ((old_mask | mask) & kReadable) { + ee.events |= EPOLLIN; + } + if ((old_mask | mask) & kWritable) { + ee.events |= EPOLLOUT; + } + return epoll_ctl(multiplexer_, EPOLL_CTL_MOD, fd, &ee); +} + +int NetEpoll::NetDelEvent(int fd, [[maybe_unused]] int mask) { + /* + * Kernel < 2.6.9 need a non null event point to EPOLL_CTL_DEL + */ + struct epoll_event ee; + ee.data.fd = fd; + return epoll_ctl(multiplexer_, EPOLL_CTL_DEL, fd, &ee); +} + +int NetEpoll::NetPoll(int timeout) { + int num_events = epoll_wait(multiplexer_, &events_[0], NET_MAX_CLIENTS, timeout); + if (num_events <= 0) { + return 0; + } + + for (int i = 0; i < num_events; i++) { + NetFiredEvent& ev = fired_events_[i]; + ev.fd = events_[i].data.fd; + ev.mask = 0; + + if (events_[i].events & EPOLLIN) { + ev.mask |= kReadable; + } + + if (events_[i].events & EPOLLOUT) { + ev.mask |= kWritable; + } + + if (events_[i].events & (EPOLLERR | EPOLLHUP)) { + ev.mask |= kErrorEvent; + } + } + + return num_events; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_epoll.h b/tools/pika_migrate/src/net/src/net_epoll.h new file mode 100644 index 0000000000..4ab14e7443 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_epoll.h @@ -0,0 +1,32 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_NET_EPOLL_H_ +#define NET_SRC_NET_EPOLL_H_ +#include + +#include + +#include "net/src/net_multiplexer.h" + +namespace net { + +class NetEpoll final : public NetMultiplexer { + public: + NetEpoll(int queue_limit = kUnlimitedQueue); + ~NetEpoll() override = default; + + int NetAddEvent(int fd, int mask) override; + int NetDelEvent(int fd, [[maybe_unused]] int mask) override; + int NetModEvent(int fd, int old_mask, int mask) override; + + int NetPoll(int timeout) override; + + private: + std::vector events_; +}; + +} // namespace net +#endif // NET_SRC_NET_EPOLL_H_ diff --git a/tools/pika_migrate/src/net/src/net_interfaces.cc b/tools/pika_migrate/src/net/src/net_interfaces.cc new file mode 100644 index 0000000000..89061dd5b1 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_interfaces.cc @@ -0,0 +1,154 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/net_interfaces.h" + +#include + +#include + +#include +#include + +#if defined(__APPLE__) || defined(__FreeBSD__) +# include +# include +# include +# include +# include +# include +# include +# include + +# include "pstd/include/pstd_defer.h" + +#else +# include +# include +# include +# include + +#endif + +#include "pstd/include/xdebug.h" + +std::string GetDefaultInterface() { +#if defined(__APPLE__) || defined(__FreeBSD__) + std::string name("lo0"); + + int fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) { + return name; + } + + DEFER { close(fd); }; + + struct ifreq* ifreq; + struct ifconf ifconf; + char buf[16384]; + + ifconf.ifc_len = sizeof buf; + ifconf.ifc_buf = buf; + if (ioctl(fd, SIOCGIFCONF, &ifconf) != 0) { + LOG(ERROR) << "ioctl(SIOCGIFCONF) failed"; + return name; + } + + ifreq = ifconf.ifc_req; + for (unsigned int i = 0; i < ifconf.ifc_len;) { + /* some systems have ifr_addr.sa_len and adjust the length that + * way, but not mine. weird */ + size_t len = IFNAMSIZ + ifreq->ifr_addr.sa_len; + name = ifreq->ifr_name; + if (!name.empty()) { + LOG(INFO) << "got interface " << name; + break; + } + + ifreq = reinterpret_cast(reinterpret_cast(ifreq) + len); + i += len; + } + + return name; +#else + std::string name("eth0"); + std::ifstream routeFile("/proc/net/route", std::ios_base::in); + if (!routeFile.good()) { + return name; + } + + std::string line; + std::vector tokens; + while (std::getline(routeFile, line)) { + std::istringstream stream(line); + std::copy(std::istream_iterator(stream), std::istream_iterator(), + std::back_inserter >(tokens)); + + // the default interface is the one having the second + // field, Destination, set to "00000000" + if ((tokens.size() >= 2) && (tokens[1] == std::string("00000000"))) { + name = tokens[0]; + break; + } + + tokens.clear(); + } + + return name; +#endif +} + +std::string GetIpByInterface(const std::string& network_interface) { + if (network_interface.empty()) { + return ""; + } + + LOG(INFO) << "Using Networker Interface: " << network_interface; + + struct ifaddrs* ifAddrStruct = nullptr; + struct ifaddrs* ifa = nullptr; + void* tmpAddrPtr = nullptr; + + if (getifaddrs(&ifAddrStruct) == -1) { + LOG(ERROR) << "getifaddrs failed"; + return ""; + } + + std::string host; + for (ifa = ifAddrStruct; ifa != nullptr; ifa = ifa->ifa_next) { + if (!(ifa->ifa_addr)) { + continue; + } + + if (ifa->ifa_addr->sa_family == AF_INET) { // Check it is a valid IPv4 address + tmpAddrPtr = &(reinterpret_cast(ifa->ifa_addr))->sin_addr; + char addressBuffer[INET_ADDRSTRLEN]; + inet_ntop(AF_INET, tmpAddrPtr, addressBuffer, INET_ADDRSTRLEN); + if (std::string(ifa->ifa_name) == network_interface) { + host = addressBuffer; + break; + } + } else if (ifa->ifa_addr->sa_family == AF_INET6) { // Check it is a valid IPv6 address + tmpAddrPtr = &(reinterpret_cast(ifa->ifa_addr))->sin6_addr; + char addressBuffer[INET6_ADDRSTRLEN]; + inet_ntop(AF_INET6, tmpAddrPtr, addressBuffer, INET6_ADDRSTRLEN); + if (std::string(ifa->ifa_name) == network_interface) { + host = addressBuffer; + break; + } + } + } + + if (ifAddrStruct) { + freeifaddrs(ifAddrStruct); + } + + if (!ifa) { + LOG(ERROR) << "error network interface: " << network_interface; + } + + LOG(INFO) << "got ip " << host; + return host; +} diff --git a/tools/pika_migrate/src/net/src/net_item.h b/tools/pika_migrate/src/net/src/net_item.h new file mode 100644 index 0000000000..a6863e376b --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_item.h @@ -0,0 +1,37 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_NET_ITEM_H_ +#define NET_SRC_NET_ITEM_H_ + +#include +#include + +#include "net/include/net_define.h" + +namespace net { + +class NetItem { + public: + NetItem() = default; + NetItem(const int fd, std::string ip_port, const NotifyType& type = kNotiConnect) + : fd_(fd), ip_port_(std::move(ip_port)), notify_type_(type) {} + + int fd() const { return fd_; } + std::string ip_port() const { return ip_port_; } + std::string String() const { + return std::to_string(fd_) + ":" + ip_port_ + ":" + std::to_string(notify_type_); + } + + NotifyType notify_type() const { return notify_type_; } + + private: + int fd_ = -1; + std::string ip_port_; + NotifyType notify_type_ = kNotiConnect; +}; + +} // namespace net +#endif // NET_SRC_NET_ITEM_H_ diff --git a/tools/pika_migrate/src/net/src/net_kqueue.cc b/tools/pika_migrate/src/net/src/net_kqueue.cc new file mode 100644 index 0000000000..16c831ff37 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_kqueue.cc @@ -0,0 +1,117 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/src/net_kqueue.h" + +#include +#include +#include + +#include + +#include "net/include/net_define.h" +#include "pstd/include/xdebug.h" + +namespace net { + +NetMultiplexer* CreateNetMultiplexer(int limit) { return new NetKqueue(limit); } + +NetKqueue::NetKqueue(int queue_limit) : NetMultiplexer(queue_limit) { + multiplexer_ = ::kqueue(); + LOG(INFO) << "create kqueue"; + + if (multiplexer_ < 0) { + LOG(ERROR) << "kqueue create fail"; + exit(1); + } + + fcntl(multiplexer_, F_SETFD, fcntl(multiplexer_, F_GETFD) | FD_CLOEXEC); + + events_.resize(NET_MAX_CLIENTS); +} + +int NetKqueue::NetAddEvent(int fd, int mask) { + int cnt = 0; + struct kevent change[2]; + + if (mask & kReadable) { + EV_SET(change + cnt, fd, EVFILT_READ, EV_ADD, 0, 0, nullptr); + ++cnt; + } + + if (mask & kWritable) { + EV_SET(change + cnt, fd, EVFILT_WRITE, EV_ADD, 0, 0, nullptr); + ++cnt; + } + + return kevent(multiplexer_, change, cnt, nullptr, 0, nullptr); +} + +int NetKqueue::NetModEvent(int fd, int /*old_mask*/, int mask) { + int ret = NetDelEvent(fd, kReadable | kWritable); + if (mask == 0) { + return ret; + } + + return NetAddEvent(fd, mask); +} + +int NetKqueue::NetDelEvent(int fd, int mask) { + int cnt = 0; + struct kevent change[2]; + + if (mask & kReadable) { + EV_SET(change + cnt, fd, EVFILT_READ, EV_DELETE, 0, 0, nullptr); + ++cnt; + } + + if (mask & kWritable) { + EV_SET(change + cnt, fd, EVFILT_WRITE, EV_DELETE, 0, 0, nullptr); + ++cnt; + } + + if (cnt == 0) { + return -1; + } + + return kevent(multiplexer_, change, cnt, nullptr, 0, nullptr); +} + +int NetKqueue::NetPoll(int timeout) { + struct timespec* p_timeout = nullptr; + struct timespec s_timeout; + if (timeout >= 0) { + p_timeout = &s_timeout; + s_timeout.tv_sec = timeout / 1000; + s_timeout.tv_nsec = timeout % 1000 * 1000000; + } + + int num_events = ::kevent(multiplexer_, nullptr, 0, &events_[0], NET_MAX_CLIENTS, p_timeout); + if (num_events <= 0) { + return 0; + } + + for (int i = 0; i < num_events; i++) { + NetFiredEvent& ev = fired_events_[i]; + ev.fd = events_[i].ident; + ev.mask = 0; + + if (events_[i].filter == EVFILT_READ) { + ev.mask |= kReadable; + } + + if (events_[i].filter == EVFILT_WRITE) { + ev.mask |= kWritable; + } + + if (events_[i].flags & EV_ERROR) { + ev.mask |= kErrorEvent; + } + } + + return num_events; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_kqueue.h b/tools/pika_migrate/src/net/src/net_kqueue.h new file mode 100644 index 0000000000..402a8b2a22 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_kqueue.h @@ -0,0 +1,32 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_NET_KQUEUE_H_ +#define NET_SRC_NET_KQUEUE_H_ +#include + +#include + +#include "net/src/net_multiplexer.h" + +namespace net { + +class NetKqueue final : public NetMultiplexer { + public: + NetKqueue(int queue_limit = kUnlimitedQueue); + ~NetKqueue() override = default; + + int NetAddEvent(int fd, int mask) override; + int NetDelEvent(int fd, int mask) override; + int NetModEvent(int fd, int old_mask, int mask) override; + + int NetPoll(int timeout) override; + + private: + std::vector events_; +}; + +} // namespace net +#endif // NET_SRC_NET_EPOLL_H_ diff --git a/tools/pika_migrate/src/net/src/net_multiplexer.cc b/tools/pika_migrate/src/net/src/net_multiplexer.cc new file mode 100644 index 0000000000..09eb50a09b --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_multiplexer.cc @@ -0,0 +1,75 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/src/net_multiplexer.h" + +#include +#include +#include + +#include + +#include "pstd/include/xdebug.h" + +namespace net { + +NetMultiplexer::NetMultiplexer(int queue_limit) : queue_limit_(queue_limit), fired_events_(NET_MAX_CLIENTS) { + int fds[2]; + if (pipe(fds) != 0) { + exit(-1); + } + notify_receive_fd_ = fds[0]; + notify_send_fd_ = fds[1]; + + fcntl(notify_receive_fd_, F_SETFD, fcntl(notify_receive_fd_, F_GETFD) | FD_CLOEXEC); + fcntl(notify_send_fd_, F_SETFD, fcntl(notify_send_fd_, F_GETFD) | FD_CLOEXEC); +} + +NetMultiplexer::~NetMultiplexer() { + if (multiplexer_ != -1) { + ::close(multiplexer_); + } +} + +void NetMultiplexer::Initialize() { + NetAddEvent(notify_receive_fd_, kReadable); + init_ = true; +} + +NetItem NetMultiplexer::NotifyQueuePop() { + if (!init_) { + LOG(ERROR) << "please call NetMultiplexer::Initialize()"; + std::abort(); + } + + NetItem it; + notify_queue_protector_.lock(); + it = notify_queue_.front(); + notify_queue_.pop(); + notify_queue_protector_.unlock(); + return it; +} + +bool NetMultiplexer::Register(const NetItem& it, bool force) { + if (!init_) { + LOG(ERROR) << "please call NetMultiplexer::Initialize()"; + return false; + } + + bool success = false; + notify_queue_protector_.lock(); + if (force || queue_limit_ == kUnlimitedQueue || notify_queue_.size() < static_cast(queue_limit_)) { + notify_queue_.push(it); + success = true; + } + notify_queue_protector_.unlock(); + if (success) { + ssize_t n = write(notify_send_fd_, "", 1); + (void)(n); + } + return success; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_multiplexer.h b/tools/pika_migrate/src/net/src/net_multiplexer.h new file mode 100644 index 0000000000..7e042a1c3b --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_multiplexer.h @@ -0,0 +1,68 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_NET_MULTIPLEXER_H_ +#define NET_SRC_NET_MULTIPLEXER_H_ +#include +#include + +#include "net/src/net_item.h" +#include "pstd/include/pstd_mutex.h" + +namespace net { + +struct NetFiredEvent { + int fd = -1; + int mask = 0; // EventStatus +}; + +class NetMultiplexer { + public: + explicit NetMultiplexer(int queue_limit); + virtual ~NetMultiplexer(); + + virtual int NetAddEvent(int fd, int mask) = 0; + virtual int NetDelEvent(int fd, int mask) = 0; + virtual int NetModEvent(int fd, int old_mask, int mask) = 0; + virtual int NetPoll(int timeout) = 0; + + void Initialize(); + + NetFiredEvent* FiredEvents() { return &fired_events_[0]; } + + int NotifyReceiveFd() const { return notify_receive_fd_; } + int NotifySendFd() const { return notify_send_fd_; } + NetItem NotifyQueuePop(); + + bool Register(const NetItem& it, bool force); + + static const int kUnlimitedQueue = -1; + + int GetMultiplexer(){ + return multiplexer_; + } + protected: + int multiplexer_ = -1; + /* + * The PbItem queue is the fd queue, receive from dispatch thread + */ + int queue_limit_ = kUnlimitedQueue; + pstd::Mutex notify_queue_protector_; + std::queue notify_queue_; + std::vector fired_events_; + + /* + * These two fd receive the notify from dispatch thread + */ + int notify_receive_fd_ = -1; + int notify_send_fd_ = -1; + + bool init_ = false; +}; + +NetMultiplexer* CreateNetMultiplexer(int queue_limit = NetMultiplexer::kUnlimitedQueue); + +} // namespace net +#endif // NET_SRC_NET_EPOLL_H_ diff --git a/tools/pika_migrate/src/net/src/net_pubsub.cc b/tools/pika_migrate/src/net/src/net_pubsub.cc new file mode 100644 index 0000000000..110144ba14 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_pubsub.cc @@ -0,0 +1,617 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "net/src/worker_thread.h" + +#include "net/include/net_conn.h" +#include "net/include/net_pubsub.h" + +namespace net { + +static std::string ConstructPublishResp(const std::string& subscribe_channel, const std::string& publish_channel, + const std::string& msg, const bool pattern) { + std::stringstream resp; + std::string common_msg = "message"; + std::string pattern_msg = "pmessage"; + if (pattern) { + resp << "*4\r\n" + << "$" << pattern_msg.length() << "\r\n" + << pattern_msg << "\r\n" + << "$" << subscribe_channel.length() << "\r\n" + << subscribe_channel << "\r\n" + << "$" << publish_channel.length() << "\r\n" + << publish_channel << "\r\n" + << "$" << msg.length() << "\r\n" + << msg << "\r\n"; + } else { + resp << "*3\r\n" + << "$" << common_msg.length() << "\r\n" + << common_msg << "\r\n" + << "$" << publish_channel.length() << "\r\n" + << publish_channel << "\r\n" + << "$" << msg.length() << "\r\n" + << msg << "\r\n"; + } + return resp.str(); +} + +void CloseFd(const std::shared_ptr& conn) { close(conn->fd()); } + +void PubSubThread::ConnHandle::UpdateReadyState(const ReadyState& state) { ready_state = state; } + +bool PubSubThread::ConnHandle::IsReady() { return ready_state == PubSubThread::ReadyState::kReady; } + +PubSubThread::PubSubThread() { + set_thread_name("PubSubThread"); + net_multiplexer_.reset(CreateNetMultiplexer()); + net_multiplexer_->Initialize(); + if (pipe(msg_pfd_)) { + exit(-1); + } + fcntl(msg_pfd_[0], F_SETFD, fcntl(msg_pfd_[0], F_GETFD) | FD_CLOEXEC); + fcntl(msg_pfd_[1], F_SETFD, fcntl(msg_pfd_[1], F_GETFD) | FD_CLOEXEC); + + net_multiplexer_->NetAddEvent(msg_pfd_[0], kReadable); +} + +PubSubThread::~PubSubThread() { StopThread(); } + +void PubSubThread::MoveConnOut(const std::shared_ptr& conn) { + RemoveConn(conn); + + net_multiplexer_->NetDelEvent(conn->fd(), 0); + { + std::lock_guard l(rwlock_); + conns_.erase(conn->fd()); + } +} + +void PubSubThread::MoveConnIn(const std::shared_ptr& conn, const NotifyType& notify_type) { + NetItem it(conn->fd(), conn->ip_port(), notify_type); + net_multiplexer_->Register(it, true); + { + std::lock_guard l(rwlock_); + conns_[conn->fd()] = std::make_shared(conn); + } + conn->set_net_multiplexer(net_multiplexer_.get()); +} + +void PubSubThread::UpdateConnReadyState(int fd, const ReadyState& state) { + std::lock_guard l(rwlock_); + const auto& it = conns_.find(fd); + if (it == conns_.end()) { + return; + } + it->second->UpdateReadyState(state); +} + +bool PubSubThread::IsReady(int fd) { + std::shared_lock l(rwlock_); + const auto& it = conns_.find(fd); + if (it != conns_.end()) { + return it->second->IsReady(); + } + return false; +} + +int PubSubThread::ClientPubSubChannelSize(const std::shared_ptr& conn) { + int subscribed = 0; + std::lock_guard l(channel_mutex_); + for (auto& channel : pubsub_channel_) { + auto conn_ptr = std::find(channel.second.begin(), channel.second.end(), conn); + if (conn_ptr != channel.second.end()) { + subscribed++; + } + } + return subscribed; +} + +int PubSubThread::ClientPubSubChannelPatternSize(const std::shared_ptr& conn) { + int subscribed = 0; + std::lock_guard l(pattern_mutex_); + for (auto& channel : pubsub_pattern_) { + auto conn_ptr = std::find(channel.second.begin(), channel.second.end(), conn); + if (conn_ptr != channel.second.end()) { + subscribed++; + } + } + return subscribed; +} + +void PubSubThread::RemoveConn(const std::shared_ptr& conn) { + { + std::lock_guard lock(pattern_mutex_); + for (auto& it : pubsub_pattern_) { + for (auto conn_ptr = it.second.begin(); conn_ptr != it.second.end(); conn_ptr++) { + if ((*conn_ptr) == conn) { + conn_ptr = it.second.erase(conn_ptr); + break; + } + } + } + } + + { + std::lock_guard lock(channel_mutex_); + for (auto& it : pubsub_channel_) { + for (auto conn_ptr = it.second.begin(); conn_ptr != it.second.end(); conn_ptr++) { + if ((*conn_ptr) == conn) { + conn_ptr = it.second.erase(conn_ptr); + break; + } + } + } + } +} + +void PubSubThread::CloseConn(const std::shared_ptr& conn) { + net_multiplexer_->NetDelEvent(conn->fd(), 0); + CloseFd(conn); + { + std::lock_guard l(rwlock_); + conns_.erase(conn->fd()); + } +} + +void PubSubThread::CloseAllConns() { + { + std::lock_guard l(channel_mutex_); + pubsub_channel_.clear(); + } + { + std::lock_guard l(pattern_mutex_); + pubsub_pattern_.clear(); + } + { + std::lock_guard l(rwlock_); + for (auto& pair : conns_) { + net_multiplexer_->NetDelEvent(pair.second->conn->fd(), 0); + CloseFd(pair.second->conn); + } + std::map> tmp; + conns_.swap(tmp); + } +} + +int PubSubThread::Publish(const std::string& channel, const std::string& msg) { + // TODO(LIBA-S): change the Publish Mode to Asynchronous + std::lock_guard lk(pub_mutex_); + channel_ = channel; + message_ = msg; + // Send signal to ThreadMain() + ssize_t n = write(msg_pfd_[1], "", 1); + (void)(n); + std::unique_lock lock(receiver_mutex_); + receiver_rsignal_.wait(lock, [this]() { return receivers_ != -1; }); + + int receivers = receivers_; + receivers_ = -1; + + return receivers; +} + +/* + * return the number of channels that the specific connection currently subscribed + */ +int PubSubThread::ClientChannelSize(const std::shared_ptr& conn) { + int subscribed = 0; + + channel_mutex_.lock(); + for (auto& channel : pubsub_channel_) { + auto conn_ptr = std::find(channel.second.begin(), channel.second.end(), conn); + if (conn_ptr != channel.second.end()) { + subscribed++; + } + } + channel_mutex_.unlock(); + + pattern_mutex_.lock(); + for (auto& channel : pubsub_pattern_) { + auto conn_ptr = std::find(channel.second.begin(), channel.second.end(), conn); + if (conn_ptr != channel.second.end()) { + subscribed++; + } + } + pattern_mutex_.unlock(); + + return subscribed; +} + +void PubSubThread::Subscribe(const std::shared_ptr& conn, const std::vector& channels, + const bool pattern, std::vector>* result) { + int subscribed = ClientChannelSize(conn); + + if (subscribed == 0) { + MoveConnIn(conn, net::NotifyType::kNotiWait); + } + + for (const auto& channel : channels) { + if (pattern) { // if pattern mode, register channel to map + std::lock_guard channel_lock(pattern_mutex_); + if (pubsub_pattern_.find(channel) != pubsub_pattern_.end()) { + auto conn_ptr = std::find(pubsub_pattern_[channel].begin(), pubsub_pattern_[channel].end(), conn); + if (conn_ptr == pubsub_pattern_[channel].end()) { // the connection first subscrbied + pubsub_pattern_[channel].push_back(conn); + ++subscribed; + } + } else { // the channel first subscribed + std::vector> conns = {conn}; + pubsub_pattern_[channel] = conns; + ++subscribed; + } + result->emplace_back(channel, subscribed); + } else { // if general mode, reigster channel to map + std::lock_guard channel_lock(channel_mutex_); + if (pubsub_channel_.find(channel) != pubsub_channel_.end()) { + auto conn_ptr = std::find(pubsub_channel_[channel].begin(), pubsub_channel_[channel].end(), conn); + if (conn_ptr == pubsub_channel_[channel].end()) { // the connection first subscribed + pubsub_channel_[channel].push_back(conn); + ++subscribed; + } + } else { // the channel first subscribed + std::vector> conns = {conn}; + pubsub_channel_[channel] = conns; + ++subscribed; + } + result->emplace_back(channel, subscribed); + } + } +} + +/* + * Unsubscribes the client from the given channels, or from all of them if none + * is given. + */ +int PubSubThread::UnSubscribe(const std::shared_ptr& conn, const std::vector& channels, + const bool pattern, std::vector>* result) { + int subscribed = ClientChannelSize(conn); + bool exist = true; + if (subscribed == 0) { + exist = false; + } + if (channels.empty()) { // if client want to unsubscribe all of channels + if (pattern) { // all of pattern channels + std::lock_guard l(pattern_mutex_); + for (auto& channel : pubsub_pattern_) { + auto conn_ptr = std::find(channel.second.begin(), channel.second.end(), conn); + if (conn_ptr != channel.second.end()) { + result->emplace_back(channel.first, --subscribed); + } + } + } else { + std::lock_guard l(channel_mutex_); + for (auto& channel : pubsub_channel_) { + auto conn_ptr = std::find(channel.second.begin(), channel.second.end(), conn); + if (conn_ptr != channel.second.end()) { + result->emplace_back(channel.first, --subscribed); + } + } + } + if (exist) { + MoveConnOut(conn); + } + return 0; + } + + for (const auto& channel : channels) { + if (pattern) { // if pattern mode, unsubscribe the channels of specified + std::lock_guard l(pattern_mutex_); + auto channel_ptr = pubsub_pattern_.find(channel); + if (channel_ptr != pubsub_pattern_.end()) { + auto it = std::find(channel_ptr->second.begin(), channel_ptr->second.end(), conn); + if (it != channel_ptr->second.end()) { + channel_ptr->second.erase(std::remove(channel_ptr->second.begin(), channel_ptr->second.end(), conn), + channel_ptr->second.end()); + result->emplace_back(channel, --subscribed); + } else { + result->emplace_back(channel, subscribed); + } + } else { + result->emplace_back(channel, 0); + } + } else { // if general mode, unsubscribe the channels of specified + std::lock_guard l(channel_mutex_); + auto channel_ptr = pubsub_channel_.find(channel); + if (channel_ptr != pubsub_channel_.end()) { + auto it = std::find(channel_ptr->second.begin(), channel_ptr->second.end(), conn); + if (it != channel_ptr->second.end()) { + channel_ptr->second.erase(std::remove(channel_ptr->second.begin(), channel_ptr->second.end(), conn), + channel_ptr->second.end()); + result->emplace_back(channel, --subscribed); + } else { + result->emplace_back(channel, subscribed); + } + } else { + result->emplace_back(channel, 0); + } + } + } + // The number of channels this client currently subscibred + // include general mode and pattern mode + subscribed = ClientChannelSize(conn); + if (subscribed == 0 && exist) { + MoveConnOut(conn); + } + return subscribed; +} + +void PubSubThread::PubSubChannels(const std::string& pattern, std::vector* result) { + if (pattern.empty()) { + std::lock_guard l(channel_mutex_); + for (auto& channel : pubsub_channel_) { + if (!channel.second.empty()) { + result->push_back(channel.first); + } + } + } else { + std::lock_guard l(channel_mutex_); + for (auto& channel : pubsub_channel_) { + if (pstd::stringmatchlen(channel.first.c_str(), static_cast(channel.first.size()), pattern.c_str(), + static_cast(pattern.size()), 0)) { + if (!channel.second.empty()) { + result->push_back(channel.first); + } + } + } + } +} + +void PubSubThread::PubSubNumSub(const std::vector& channels, + std::vector>* result) { + int subscribed; + std::lock_guard l(channel_mutex_); + for (const auto& i : channels) { + subscribed = 0; + for (auto& channel : pubsub_channel_) { + if (channel.first == i) { + subscribed = static_cast(channel.second.size()); + } + } + result->emplace_back(i, subscribed); + } +} + +int PubSubThread::PubSubNumPat() { + int subscribed = 0; + std::lock_guard l(pattern_mutex_); + for (auto& channel : pubsub_pattern_) { + subscribed += static_cast(channel.second.size()); + } + return subscribed; +} + +void PubSubThread::ConnCanSubscribe(const std::vector& allChannel, + const std::function&)>& func) { + { + std::lock_guard l(channel_mutex_); + for (auto& item : pubsub_channel_) { + for (auto it = item.second.rbegin(); it != item.second.rend(); it++) { + if (func(*it) && (allChannel.empty() || !std::count(allChannel.begin(), allChannel.end(), item.first))) { + item.second.erase(std::next(it).base()); + CloseConn(*it); + } + } // for end + } + } + + { + std::lock_guard l(pattern_mutex_); + for (auto& item : pubsub_pattern_) { + for (auto it = item.second.rbegin(); it != item.second.rend(); it++) { + bool kill = false; + if (func(*it)) { + if (allChannel.empty()) { + kill = true; + } + for (const auto& channelName : allChannel) { + if (kill || !pstd::stringmatchlen(channelName.c_str(), static_cast(channelName.size()), + item.first.c_str(), static_cast(item.first.size()), 0)) { + kill = true; + break; + } + } + } + if (kill) { + item.second.erase(std::next(it).base()); + CloseConn(*it); + } + } + } + } +} + +void* PubSubThread::ThreadMain() { + int nfds; + NetFiredEvent* pfe; + pstd::Status s; + std::shared_ptr in_conn = nullptr; + char triger[1]; + + while (!should_stop()) { + + if (close_all_conn_sig_.load()) { + close_all_conn_sig_.store(false); + CloseAllConns(); + } + + nfds = net_multiplexer_->NetPoll(NET_CRON_INTERVAL); + for (int i = 0; i < nfds; i++) { + pfe = (net_multiplexer_->FiredEvents()) + i; + if (pfe->fd == net_multiplexer_->NotifyReceiveFd()) { // New connection comming + if (pfe->mask & kReadable) { + ssize_t n = read(net_multiplexer_->NotifyReceiveFd(), triger, 1); + (void)(n); + { + NetItem ti = net_multiplexer_->NotifyQueuePop(); + if (ti.notify_type() == kNotiClose) { + } else if (ti.notify_type() == kNotiEpollout) { + net_multiplexer_->NetModEvent(ti.fd(), 0, kWritable); + } else if (ti.notify_type() == kNotiEpollin) { + net_multiplexer_->NetModEvent(ti.fd(), 0, kReadable); + } else if (ti.notify_type() == kNotiEpolloutAndEpollin) { + net_multiplexer_->NetModEvent(ti.fd(), 0, kWritable | kReadable); + } else if (ti.notify_type() == kNotiWait) { + // do not register events + net_multiplexer_->NetAddEvent(ti.fd(), 0); + } + } + continue; + } + } + if (pfe->fd == msg_pfd_[0]) { // Publish message + if (pfe->mask & kReadable) { + ssize_t n = read(msg_pfd_[0], triger, 1); + (void)(n); + std::string channel; + std::string msg; + int32_t receivers = 0; + channel = channel_; + msg = message_; + channel_.clear(); + message_.clear(); + + // Send message to a channel's clients + channel_mutex_.lock(); + auto it = pubsub_channel_.find(channel); + if (it != pubsub_channel_.end()) { + for (size_t i = 0; i < it->second.size(); i++) { + auto& conn = it->second[i]; + if (!IsReady(conn->fd())) { + continue; + } + std::string resp = ConstructPublishResp(it->first, channel, msg, false); + conn->WriteResp(resp); + WriteStatus write_status = conn->SendReply(); + if (write_status == kWriteHalf) { + net_multiplexer_->NetModEvent(conn->fd(), kReadable, kWritable); + } else if (write_status == kWriteError) { + channel_mutex_.unlock(); + + MoveConnOut(conn); + + channel_mutex_.lock(); + CloseFd(conn); + } else if (write_status == kWriteAll) { + receivers++; + } + } + } + channel_mutex_.unlock(); + + // Send message to a channel pattern's clients + pattern_mutex_.lock(); + for (auto& it : pubsub_pattern_) { + if (pstd::stringmatchlen(it.first.c_str(), static_cast(it.first.size()), channel.c_str(), + static_cast(channel.size()), 0)) { + for (size_t i = 0; i < it.second.size(); i++) { + auto& conn = it.second[i]; + if (!IsReady(conn->fd())) { + continue; + } + std::string resp = ConstructPublishResp(it.first, channel, msg, true); + conn->WriteResp(resp); + WriteStatus write_status = conn->SendReply(); + if (write_status == kWriteHalf) { + net_multiplexer_->NetModEvent(conn->fd(), kReadable, kWritable); + } else if (write_status == kWriteError) { + pattern_mutex_.unlock(); + + MoveConnOut(conn); + + pattern_mutex_.lock(); + CloseFd(conn); + } else if (write_status == kWriteAll) { + receivers++; + } + } + } + } + pattern_mutex_.unlock(); + + receiver_mutex_.lock(); + receivers_ = receivers; + receiver_rsignal_.notify_one(); + receiver_mutex_.unlock(); + } else { + continue; + } + } else { + in_conn = nullptr; + bool should_close = false; + + { + std::shared_lock l(rwlock_); + if (auto iter = conns_.find(pfe->fd); iter == conns_.end()) { + net_multiplexer_->NetDelEvent(pfe->fd, 0); + continue; + } else { + + in_conn = iter->second->conn; + } + } + + // Send reply + if ((pfe->mask & kWritable) && in_conn->is_ready_to_reply()) { + WriteStatus write_status = in_conn->SendReply(); + if (write_status == kWriteAll) { + in_conn->set_is_reply(false); + net_multiplexer_->NetModEvent(pfe->fd, 0, kReadable); // Remove kWritable + } else if (write_status == kWriteHalf) { + continue; // send all write buffer, + // in case of next GetRequest() + // pollute the write buffer + } else if (write_status == kWriteError) { + should_close = true; + } + } + + // Client request again + if (!should_close && (pfe->mask & kReadable)) { + ReadStatus getRes = in_conn->GetRequest(); + // Do not response to client when we leave the pub/sub status here + if (getRes != kReadAll && getRes != kReadHalf) { + // kReadError kReadClose kFullError kParseError kDealError + should_close = true; + } else if (in_conn->is_ready_to_reply()) { + WriteStatus write_status = in_conn->SendReply(); + if (write_status == kWriteAll) { + in_conn->set_is_reply(false); + } else if (write_status == kWriteHalf) { + net_multiplexer_->NetModEvent(pfe->fd, kReadable, kWritable); + } else if (write_status == kWriteError) { + should_close = true; + } + } else { + continue; + } + } + // Error + if ((pfe->mask & kErrorEvent) || should_close) { + MoveConnOut(in_conn); + CloseFd(in_conn); + in_conn = nullptr; + } + } + } + } + Cleanup(); + return nullptr; +} + +void PubSubThread::Cleanup() { + std::lock_guard l(rwlock_); + for (auto& iter : conns_) { + CloseFd(iter.second->conn); + } + conns_.clear(); +} +void PubSubThread::NotifyCloseAllConns() { + close_all_conn_sig_.store(true); +} +}; // namespace net diff --git a/tools/pika_migrate/src/net/src/net_stats.cc b/tools/pika_migrate/src/net/src/net_stats.cc new file mode 100644 index 0000000000..80f64a0be0 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_stats.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include "net/include/net_stats.h" + +std::unique_ptr g_network_statistic; + +namespace net { + +size_t NetworkStatistic::NetInputBytes() { + return stat_net_input_bytes.load(std::memory_order_relaxed); +} + +size_t NetworkStatistic::NetOutputBytes() { + return stat_net_output_bytes.load(std::memory_order_relaxed); +} + +size_t NetworkStatistic::NetReplInputBytes() { + return stat_net_repl_input_bytes.load(std::memory_order_relaxed); +} + +size_t NetworkStatistic::NetReplOutputBytes() { + return stat_net_repl_output_bytes.load(std::memory_order_relaxed); +} + +void NetworkStatistic::IncrRedisInputBytes(uint64_t bytes) { + stat_net_input_bytes.fetch_add(bytes, std::memory_order_relaxed); +} + +void NetworkStatistic::IncrRedisOutputBytes(uint64_t bytes) { + stat_net_output_bytes.fetch_add(bytes, std::memory_order_relaxed); +} + +void NetworkStatistic::IncrReplInputBytes(uint64_t bytes) { + stat_net_repl_input_bytes.fetch_add(bytes, std::memory_order_relaxed); +} + +void NetworkStatistic::IncrReplOutputBytes(uint64_t bytes) { + stat_net_repl_output_bytes.fetch_add(bytes, std::memory_order_relaxed); +} + +} \ No newline at end of file diff --git a/tools/pika_migrate/src/net/src/net_thread.cc b/tools/pika_migrate/src/net/src/net_thread.cc new file mode 100644 index 0000000000..a6a7b08994 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_thread.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/net_thread.h" +#include "net/include/net_define.h" +#include "net/src/net_thread_name.h" +#include "pstd/include/xdebug.h" + +namespace net { + +Thread::Thread() : should_stop_(false) {} + +Thread::~Thread() = default; + +void* Thread::RunThread(void* arg) { + auto thread = reinterpret_cast(arg); + if (!(thread->thread_name().empty())) { + SetThreadName(pthread_self(), thread->thread_name()); + } + thread->ThreadMain(); + return nullptr; +} + +int Thread::StartThread() { + if (!should_stop() && is_running()) { + return 0; + } + std::lock_guard l(running_mu_); + should_stop_ = false; + if (!running_) { + running_ = true; + return pthread_create(&thread_id_, nullptr, RunThread, this); + } + return 0; +} + +int Thread::StopThread() { + if (should_stop() && !is_running()) { + return 0; + } + std::lock_guard l(running_mu_); + should_stop_ = true; + if (running_) { + running_ = false; + return pthread_join(thread_id_, nullptr); + } + return 0; +} + +int Thread::JoinThread() { return pthread_join(thread_id_, nullptr); } + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_thread_name.h b/tools/pika_migrate/src/net/src/net_thread_name.h new file mode 100644 index 0000000000..5d8dc78db8 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_thread_name.h @@ -0,0 +1,34 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_THREAD_NAME_H +#define NET_THREAD_NAME_H + +#include +#include + +namespace net { + +#if defined(__GLIBC__) && !defined(__APPLE__) && !defined(__ANDROID__) +# if __GLIBC_PREREQ(2, 12) +// has pthread_setname_np(pthread_t, const char*) (2 params) +# define HAS_PTHREAD_SETNAME_NP 1 +# endif +#endif + +#ifdef HAS_PTHREAD_SETNAME_NP +inline bool SetThreadName(pthread_t id, const std::string& name) { + // printf ("use pthread_setname_np(%s)\n", name.substr(0, 15).c_str()); + return 0 == pthread_setname_np(id, name.substr(0, 15).c_str()); +} +#else +inline bool SetThreadName(pthread_t id, const std::string& name) { + // printf ("no pthread_setname\n"); + return pthread_setname_np(name.c_str()) == 0; +} +#endif +} // namespace net + +#endif diff --git a/tools/pika_migrate/src/net/src/net_util.cc b/tools/pika_migrate/src/net/src/net_util.cc new file mode 100644 index 0000000000..c52c07f80d --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_util.cc @@ -0,0 +1,141 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/src/net_util.h" +#include +#include +#include +#include + +#include "net/include/net_define.h" + +namespace net { + +int Setnonblocking(int sockfd) { + int flags; + if ((flags = fcntl(sockfd, F_GETFL, 0)) < 0) { + close(sockfd); + return -1; + } + flags |= O_NONBLOCK; + if (fcntl(sockfd, F_SETFL, flags) < 0) { + close(sockfd); + return -1; + } + return flags; +} + +TimerTaskID TimerTaskManager::AddTimerTask(const std::string& task_name, int interval_ms, bool repeat_exec, + const std::function& task) { + TimedTask new_task = {last_task_id_++, task_name, interval_ms, repeat_exec, task}; + id_to_task_[new_task.task_id] = new_task; + + int64_t next_expired_time = NowInMs() + interval_ms; + exec_queue_.insert({next_expired_time, new_task.task_id}); + + // return the id of this task + return new_task.task_id; +} + +int64_t TimerTaskManager::NowInMs() { + auto now = std::chrono::system_clock::now(); + return std::chrono::time_point_cast(now).time_since_epoch().count(); +} + +int64_t TimerTaskManager::ExecTimerTask() { + std::vector fired_tasks_; + int64_t now_in_ms = NowInMs(); + // traverse in ascending order, and exec expired tasks + for (const auto& task : exec_queue_) { + if (task.exec_ts <= now_in_ms) { + auto it = id_to_task_.find(task.id); + assert(it != id_to_task_.end()); + it->second.fun(); + fired_tasks_.push_back({task.exec_ts, task.id}); + now_in_ms = NowInMs(); + } else { + break; + } + } + + for (auto task : fired_tasks_) { + exec_queue_.erase(task); + auto it = id_to_task_.find(task.id); + assert(it != id_to_task_.end()); + if (it->second.repeat_exec) { + // this task need to be repeatedly exec, register it again + exec_queue_.insert({now_in_ms + it->second.interval_ms, task.id}); + } else { + // this task only need to be exec once, completely remove this task + id_to_task_.erase(task.id); + } + } + + if (exec_queue_.empty()) { + //to avoid wasting of cpu resources, epoll use 5000ms as timeout value when no task to exec + return 5000; + } + + int64_t gap_between_now_and_next_task = exec_queue_.begin()->exec_ts - NowInMs(); + gap_between_now_and_next_task = gap_between_now_and_next_task < 0 ? 0 : gap_between_now_and_next_task; + return gap_between_now_and_next_task; +} + +bool TimerTaskManager::DelTimerTaskByTaskId(TimerTaskID task_id) { + // remove the task + auto task_to_del = id_to_task_.find(task_id); + if (task_to_del == id_to_task_.end()) { + return false; + } + int interval_del = task_to_del->second.interval_ms; + id_to_task_.erase(task_to_del); + + // remove from exec queue + ExecTsWithId target_key = {-1, 0}; + for (auto pair : exec_queue_) { + if (pair.id == task_id) { + target_key = {pair.exec_ts, pair.id}; + break; + } + } + if (target_key.exec_ts != -1) { + exec_queue_.erase(target_key); + } + return true; +} + +TimerTaskThread::~TimerTaskThread() { + if (!timer_task_manager_.Empty()) { + LOG(INFO) << "TimerTaskThread exit !!!"; + } +} +int TimerTaskThread::StartThread() { + if (timer_task_manager_.Empty()) { + LOG(INFO) << "No Timer task registered, TimerTaskThread won't be created."; + // if there is no timer task registered, no need of start the thread + return -1; + } + set_thread_name("TimerTask"); + LOG(INFO) << "TimerTaskThread Starting..."; + return Thread::StartThread(); +} +int TimerTaskThread::StopThread() { + if (timer_task_manager_.Empty()) { + LOG(INFO) << "TimerTaskThread::StopThread : TimerTaskThread didn't create, no need to stop it."; + // if there is no timer task registered, the thread didn't even start + return -1; + } + return Thread::StopThread(); +} + +void* TimerTaskThread::ThreadMain() { + int32_t timeout; + while (!should_stop()) { + timeout = static_cast(timer_task_manager_.ExecTimerTask()); + net_multiplexer_->NetPoll(timeout); + } + return nullptr; +} +} // namespace net diff --git a/tools/pika_migrate/src/net/src/net_util.h b/tools/pika_migrate/src/net/src/net_util.h new file mode 100644 index 0000000000..b30806c3b0 --- /dev/null +++ b/tools/pika_migrate/src/net/src/net_util.h @@ -0,0 +1,100 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_NET_UTIL_H_ +#define NET_SRC_NET_UTIL_H_ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "net/src/net_multiplexer.h" +#include "net/include/net_thread.h" + +namespace net { + +int Setnonblocking(int sockfd); +using TimerTaskID = int64_t; +struct TimedTask{ + TimerTaskID task_id; + std::string task_name; + int interval_ms; + bool repeat_exec; + std::function fun; +}; + +struct ExecTsWithId { + //the next exec time of the task, unit in ms + int64_t exec_ts; + //id of the task to be exec + TimerTaskID id; + + bool operator<(const ExecTsWithId& other) const{ + if(exec_ts == other.exec_ts){ + return id < other.id; + } + return exec_ts < other.exec_ts; + } + bool operator==(const ExecTsWithId& other) const { + return exec_ts == other.exec_ts && id == other.id; + } +}; + +class TimerTaskManager { + public: + TimerTaskManager() = default; + ~TimerTaskManager() = default; + TimerTaskID AddTimerTask(const std::string& task_name, int interval_ms, bool repeat_exec, const std::function &task); + //return the time gap between now and next task-expired time, which can be used as the timeout value of epoll + int64_t ExecTimerTask(); + bool DelTimerTaskByTaskId(TimerTaskID task_id); + int64_t NowInMs(); + bool Empty() const { return exec_queue_.empty(); } + private: + //items stored in std::set are ascending ordered, we regard it as an auto sorted queue + std::set exec_queue_; + std::unordered_map id_to_task_; + TimerTaskID last_task_id_{0}; +}; + + +/* + * For simplicity, current version of TimerTaskThread has no lock inside and all task should be registered before TimerTaskThread started, + * but if you have the needs of dynamically add/remove timer task after TimerTaskThread started, you can simply add a mutex to protect the timer_task_manager_ + */ +class TimerTaskThread : public Thread { + public: + TimerTaskThread(){ + net_multiplexer_.reset(CreateNetMultiplexer()); + net_multiplexer_->Initialize(); + } + ~TimerTaskThread() override; + int StartThread() override; + int StopThread() override; + void set_thread_name(const std::string& name) override { Thread::set_thread_name(name); } + + TimerTaskID AddTimerTask(const std::string& task_name, int interval_ms, bool repeat_exec, const std::function &task){ + return timer_task_manager_.AddTimerTask(task_name, interval_ms, repeat_exec, task); + }; + + bool DelTimerTaskByTaskId(TimerTaskID task_id){ + return timer_task_manager_.DelTimerTaskByTaskId(task_id); +}; + + private: + void* ThreadMain() override; + + TimerTaskManager timer_task_manager_; + std::unique_ptr net_multiplexer_; +}; + +} // namespace net + +#endif // NET_SRC_NET_UTIL_H_ diff --git a/tools/pika_migrate/src/net/src/pb_cli.cc b/tools/pika_migrate/src/net/src/pb_cli.cc new file mode 100644 index 0000000000..fbea5d1267 --- /dev/null +++ b/tools/pika_migrate/src/net/src/pb_cli.cc @@ -0,0 +1,91 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "net/include/net_cli.h" +#include "net/include/net_define.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/xdebug.h" +#include "pstd/include/noncopyable.h" + + +using pstd::Status; + +namespace net { + +// Default PBCli is block IO; +class PbCli : public NetCli { + public: + PbCli(const std::string& ip, int port); + ~PbCli() override; + + // msg should have been parsed + Status Send(void* msg_req) override; + + // Read, parse and store the reply + Status Recv(void* msg_res) override; + + private: + // BuildWbuf need to access rbuf_, wbuf_; + char* rbuf_; + char* wbuf_; + +}; + +PbCli::PbCli(const std::string& ip, const int port) : NetCli(ip, port) { + rbuf_ = reinterpret_cast(malloc(sizeof(char) * kProtoMaxMessage)); + wbuf_ = reinterpret_cast(malloc(sizeof(char) * kProtoMaxMessage)); +} + +PbCli::~PbCli() { + free(wbuf_); + free(rbuf_); +} + +Status PbCli::Send(void* msg) { + auto req = reinterpret_cast(msg); + + size_t wbuf_len = req->ByteSizeLong(); + req->SerializeToArray(wbuf_ + kCommandHeaderLength, static_cast(wbuf_len)); + uint32_t len = htonl(static_cast(wbuf_len)); + memcpy(wbuf_, &len, sizeof(len)); + wbuf_len += kCommandHeaderLength; + + return NetCli::SendRaw(wbuf_, wbuf_len); +} + +Status PbCli::Recv(void* msg_res) { + auto res = reinterpret_cast(msg_res); + + // Read Header + size_t read_len = kCommandHeaderLength; + Status s = RecvRaw(reinterpret_cast(rbuf_), &read_len); + if (!s.ok()) { + return s; + } + + uint32_t integer; + memcpy(reinterpret_cast(&integer), rbuf_, sizeof(uint32_t)); + size_t packet_len = ntohl(integer); + + // Read Packet + s = RecvRaw(reinterpret_cast(rbuf_), &packet_len); + if (!s.ok()) { + return s; + } + + if (!res->ParseFromArray(rbuf_, static_cast(packet_len))) { + return Status::Corruption("PbCli::Recv Protobuf ParseFromArray error"); + } + return Status::OK(); +} + +NetCli* NewPbCli(const std::string& peer_ip, const int peer_port) { return new PbCli(peer_ip, peer_port); } + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/pb_conn.cc b/tools/pika_migrate/src/net/src/pb_conn.cc new file mode 100644 index 0000000000..4a9b7176ba --- /dev/null +++ b/tools/pika_migrate/src/net/src/pb_conn.cc @@ -0,0 +1,208 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/pb_conn.h" + +#include +#include + +#include + +#include "net/include/net_define.h" +#include "net/include/net_stats.h" +#include "pstd/include/xdebug.h" + +extern std::unique_ptr g_network_statistic; + +namespace net { + +PbConn::PbConn(const int fd, const std::string& ip_port, Thread* thread, NetMultiplexer* mpx) + : NetConn(fd, ip_port, thread, mpx), + + write_buf_(0) + { + rbuf_ = reinterpret_cast(malloc(sizeof(char) * PB_IOBUF_LEN)); + rbuf_len_ = PB_IOBUF_LEN; +} + +PbConn::~PbConn() { free(rbuf_); } + +// Msg is [ length(COMMAND_HEADER_LENGTH) | body(length bytes) ] +// step 1. kHeader, we read COMMAND_HEADER_LENGTH bytes; +// step 2. kPacket, we read header_len bytes; +ReadStatus PbConn::GetRequest() { + while (true) { + switch (connStatus_) { + case kHeader: { + ssize_t nread = read(fd(), rbuf_ + cur_pos_, COMMAND_HEADER_LENGTH - cur_pos_); + if (nread == -1) { + if (errno == EAGAIN) { + return kReadHalf; + } else { + return kReadError; + } + } else if (nread == 0) { + return kReadClose; + } else { + g_network_statistic->IncrReplInputBytes(nread); + cur_pos_ += nread; + if (cur_pos_ == COMMAND_HEADER_LENGTH) { + uint32_t integer = 0; + memcpy(reinterpret_cast(&integer), rbuf_, sizeof(uint32_t)); + header_len_ = ntohl(integer); + remain_packet_len_ = static_cast(header_len_); + connStatus_ = kPacket; + continue; + } + return kReadHalf; + } + } + case kPacket: { + if (header_len_ > rbuf_len_ - COMMAND_HEADER_LENGTH) { + uint32_t new_size = header_len_ + COMMAND_HEADER_LENGTH; + if (new_size < kProtoMaxMessage) { + rbuf_ = reinterpret_cast(realloc(rbuf_, sizeof(char) * new_size)); + if (!rbuf_) { + return kFullError; + } + rbuf_len_ = new_size; + LOG(INFO) << "Thread_id " << pthread_self() << " Expand rbuf to " << new_size << ", cur_pos_ " << cur_pos_; + } else { + return kFullError; + } + } + // read msg body + ssize_t nread = read(fd(), rbuf_ + cur_pos_, remain_packet_len_); + if (nread == -1) { + if (errno == EAGAIN) { + return kReadHalf; + } else { + return kReadError; + } + } else if (nread == 0) { + return kReadClose; + } + g_network_statistic->IncrReplInputBytes(nread); + cur_pos_ += static_cast(nread); + remain_packet_len_ -= static_cast(nread); + if (remain_packet_len_ == 0) { + connStatus_ = kComplete; + continue; + } + return kReadHalf; + } + case kComplete: { + if (DealMessage() != 0) { + return kDealError; + } + connStatus_ = kHeader; + cur_pos_ = 0; + return kReadAll; + } + // Add this switch case just for delete compile warning + case kBuildObuf: + break; + + case kWriteObuf: + break; + } + } + + return kReadHalf; +} + +WriteStatus PbConn::SendReply() { + ssize_t nwritten = 0; + size_t item_len; + std::lock_guard l(resp_mu_); + while (!write_buf_.queue_.empty()) { + std::string item = write_buf_.queue_.front(); + item_len = item.size(); + while (item_len - write_buf_.item_pos_ > 0) { + nwritten = write(fd(), item.data() + write_buf_.item_pos_, item_len - write_buf_.item_pos_); + if (nwritten <= 0) { + break; + } + g_network_statistic->IncrReplOutputBytes(nwritten); + write_buf_.item_pos_ += nwritten; + if (write_buf_.item_pos_ == item_len) { + write_buf_.queue_.pop(); + write_buf_.item_pos_ = 0; + item_len = 0; + } + } + if (nwritten == -1) { + if (errno == EAGAIN) { + return kWriteHalf; + } else { + // Here we should close the connection + return kWriteError; + } + } + if (item_len - write_buf_.item_pos_ != 0) { + return kWriteHalf; + } + } + return kWriteAll; +} + +void PbConn::set_is_reply(const bool is_reply) { + std::lock_guard l(is_reply_mu_); + if (is_reply) { + is_reply_++; + } else { + is_reply_--; + } + if (is_reply_ < 0) { + is_reply_ = 0; + } +} + +bool PbConn::is_reply() { + std::lock_guard l(is_reply_mu_); + return is_reply_ > 0; +} + +int PbConn::WriteResp(const std::string& resp) { + std::string tag; + BuildInternalTag(resp, &tag); + std::lock_guard l(resp_mu_); + write_buf_.queue_.push(tag); + write_buf_.queue_.push(resp); + set_is_reply(true); + return 0; +} + +void PbConn::BuildInternalTag(const std::string& resp, std::string* tag) { + uint32_t resp_size = resp.size(); + resp_size = htonl(resp_size); + *tag = std::string(reinterpret_cast(&resp_size), 4); +} + +void PbConn::TryResizeBuffer() { + struct timeval now; + gettimeofday(&now, nullptr); + time_t idletime = now.tv_sec - last_interaction().tv_sec; + if (rbuf_len_ > PB_IOBUF_LEN && ((rbuf_len_ / (cur_pos_ + 1)) > 2 || idletime > 2)) { + uint32_t new_size = ((cur_pos_ + PB_IOBUF_LEN) / PB_IOBUF_LEN) * PB_IOBUF_LEN; + if (new_size < rbuf_len_) { + rbuf_ = static_cast(realloc(rbuf_, new_size)); + rbuf_len_ = new_size; + LOG(INFO) << "Thread_id " << pthread_self() << "Shrink rbuf to " << rbuf_len_ << ", cur_pos_: " << cur_pos_; + } + } +} + +void PbConn::NotifyWrite() { + net::NetItem ti(fd(), ip_port(), net::kNotiWrite); + net_multiplexer()->Register(ti, true); +} + +void PbConn::NotifyClose() { + net::NetItem ti(fd(), ip_port(), net::kNotiClose); + net_multiplexer()->Register(ti, true); +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/period_thread.cc b/tools/pika_migrate/src/net/src/period_thread.cc new file mode 100644 index 0000000000..24af85b630 --- /dev/null +++ b/tools/pika_migrate/src/net/src/period_thread.cc @@ -0,0 +1,20 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/period_thread.h" + +#include + +namespace net { + +PeriodThread::PeriodThread(struct timeval period) : period_(period) {} + +void* PeriodThread::ThreadMain() { + PeriodMain(); + select(0, nullptr, nullptr, nullptr, &period_); + return nullptr; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/redis_cli.cc b/tools/pika_migrate/src/net/src/redis_cli.cc new file mode 100644 index 0000000000..fe8fb51b75 --- /dev/null +++ b/tools/pika_migrate/src/net/src/redis_cli.cc @@ -0,0 +1,641 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/redis_cli.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "pstd/include/noncopyable.h" +#include "net/include/net_cli.h" +#include "net/include/net_define.h" + +using pstd::Status; + +namespace net { + +class RedisCli : public NetCli { + public: + RedisCli(); + ~RedisCli() override; + + // msg should have been parsed + Status Send(void* msg) override; + + // Read, parse and store the reply + Status Recv(void* trival = nullptr) override; + + private: + RedisCmdArgsType argv_; // The parsed result + + char* rbuf_; + int32_t rbuf_size_{REDIS_IOBUF_LEN}; + int32_t rbuf_pos_{0}; + int32_t rbuf_offset_{0}; + int elements_; // the elements number of this current reply + int err_; + + int GetReply(); + int GetReplyFromReader(); + + int ProcessLineItem(); + int ProcessBulkItem(); + int ProcessMultiBulkItem(); + + ssize_t BufferRead(); + char* ReadBytes(unsigned int bytes); + char* ReadLine(int* _len); + +}; + +enum REDIS_STATUS { + REDIS_ETIMEOUT = -5, + REDIS_EREAD_NULL = -4, + REDIS_EREAD = -3, // errno is set + REDIS_EPARSE_TYPE = -2, + REDIS_ERR = -1, + REDIS_OK = 0, + REDIS_HALF, + REDIS_REPLY_STRING, + REDIS_REPLY_ARRAY, + REDIS_REPLY_INTEGER, + REDIS_REPLY_NIL, + REDIS_REPLY_STATUS, + REDIS_REPLY_ERROR +}; + +RedisCli::RedisCli() { + rbuf_ = reinterpret_cast(malloc(sizeof(char) * rbuf_size_)); +} + +RedisCli::~RedisCli() { free(rbuf_); } + +// We use passed-in send buffer here +Status RedisCli::Send(void* msg) { + Status s; + + // TODO(anan) use socket_->SendRaw instead + auto storage = reinterpret_cast(msg); + const char* wbuf = storage->data(); + size_t nleft = storage->size(); + + ssize_t wbuf_pos = 0; + + ssize_t nwritten; + while (nleft > 0) { + if ((nwritten = write(fd(), wbuf + wbuf_pos, nleft)) <= 0) { + if (errno == EINTR) { + nwritten = 0; + continue; + // blocking fd after setting setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO,...) + // will return EAGAIN | EWOULDBLOCK for timeout + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { + s = Status::Timeout("Send timeout"); + } else { + s = Status::IOError("write error " + std::string(strerror(errno))); + } + return s; + } + + nleft -= nwritten; + wbuf_pos += nwritten; + } + + return s; +} + +// The result is useless +Status RedisCli::Recv(void* trival) { + argv_.clear(); + int result = GetReply(); + switch (result) { + case REDIS_OK: + if (trival) { + *static_cast(trival) = argv_; + } + return Status::OK(); + case REDIS_ETIMEOUT: + return Status::Timeout(""); + case REDIS_EREAD_NULL: + return Status::IOError("Read null"); + case REDIS_EREAD: + return Status::IOError("read failed caz " + std::string(strerror(errno))); + case REDIS_EPARSE_TYPE: + return Status::IOError("invalid type"); + default: // other error + return Status::IOError("other error, maybe " + std::string(strerror(errno))); + } +} + +ssize_t RedisCli::BufferRead() { + // memmove the remain chars to rbuf begin + if (rbuf_pos_ > 0) { + if (rbuf_offset_ > 0) { + memmove(rbuf_, rbuf_ + rbuf_pos_, rbuf_offset_); + } + rbuf_pos_ = 0; + } + + ssize_t nread; + + while (true) { + nread = read(fd(), rbuf_ + rbuf_offset_, rbuf_size_ - rbuf_offset_); + + if (nread == -1) { + if (errno == EINTR) { + continue; + // blocking fd after setting setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,...) + // will return EAGAIN for timeout + } else if (errno == EAGAIN || errno == EWOULDBLOCK) { + return REDIS_ETIMEOUT; + } else { + return REDIS_EREAD; + } + } else if (nread == 0) { // we consider read null an error + return REDIS_EREAD_NULL; + } + + rbuf_offset_ += static_cast(nread); + return nread; + } +} + +/* Find pointer to \r\n. */ +static char* seekNewline(char* s, size_t len) { + int pos = 0; + auto _len = static_cast(len - 1); + + /* Position should be < len-1 because the character at "pos" should be + * followed by a \n. Note that strchr cannot be used because it doesn't + * allow to search a limited length and the buffer that is being searched + * might not have a trailing nullptr character. */ + while (pos < _len) { + while (pos < _len && s[pos] != '\r') { + pos++; + } + if (s[pos] != '\r' || pos >= _len) { + /* Not found. */ + return nullptr; + } else { + if (s[pos + 1] == '\n') { + /* Found. */ + return s + pos; + } else { + /* Continue searching. */ + pos++; + } + } + } + return nullptr; +} + +/* Read a long long value starting at *s, under the assumption that it will be + * terminated by \r\n. Ambiguously returns -1 for unexpected input. */ +static long long readLongLong(char* s) { + long long v = 0; + int dec; + int mult = 1; + char c; + + if (*s == '-') { + mult = -1; + s++; + } else if (*s == '+') { + mult = 1; + s++; + } + + while ((c = *(s++)) != '\r') { + dec = c - '0'; + if (dec >= 0 && dec < 10) { + v *= 10; + v += dec; + } else { + /* Should not happen... */ + return -1; + } + } + + return mult * v; +} + +int RedisCli::ProcessLineItem() { + char* p; + int len; + + if (!(p = ReadLine(&len))) { + return REDIS_HALF; + } + + std::string arg(p, len); + argv_.push_back(arg); + elements_--; + + return REDIS_OK; +} + +int RedisCli::ProcessBulkItem() { + char *p; + char *s; + int len; + int bytelen; + + p = rbuf_ + rbuf_pos_; + s = seekNewline(p, rbuf_offset_); + if (s) { + bytelen = static_cast(s - p + 2); /* include \r\n */ + len = static_cast(readLongLong(p)); + + if (len == -1) { + elements_--; + + rbuf_pos_ += bytelen; /* case '$-1\r\n' */ + rbuf_offset_ -= bytelen; + return REDIS_OK; + } else if (len + 2 <= rbuf_offset_) { + argv_.push_back(std::string(p + bytelen, len)); + elements_--; + + bytelen += len + 2; /* include \r\n */ + rbuf_pos_ += bytelen; + rbuf_offset_ -= bytelen; + return REDIS_OK; + } + } + + return REDIS_HALF; +} + +int RedisCli::ProcessMultiBulkItem() { + char* p; + int len; + + if (p = ReadLine(&len); p) { + elements_ = static_cast(readLongLong(p)); + return REDIS_OK; + } + + return REDIS_HALF; +} + +int RedisCli::GetReply() { + int result = REDIS_OK; + + elements_ = 1; + while (elements_ > 0) { + // Should read again + if (rbuf_offset_ == 0 || result == REDIS_HALF) { + if ((result = static_cast(BufferRead())) < 0) { + return result; + } + } + + // stop if error occured. + if ((result = GetReplyFromReader()) < REDIS_OK) { + break; + } + } + + return result; +} + +char* RedisCli::ReadBytes(unsigned int bytes) { + char* p = nullptr; + if (static_cast(rbuf_offset_) >= bytes) { + p = rbuf_ + rbuf_pos_; + rbuf_pos_ += static_cast(bytes); + rbuf_offset_ -= static_cast(bytes); + } + return p; +} + +char* RedisCli::ReadLine(int* _len) { + char *p; + char *s; + int len; + + p = rbuf_ + rbuf_pos_; + s = seekNewline(p, rbuf_offset_); + if (s) { + len = static_cast(s - (rbuf_ + rbuf_pos_)); + rbuf_pos_ += len + 2; /* skip \r\n */ + rbuf_offset_ -= len + 2; + if (_len) { + *_len = len; + } + return p; + } + return nullptr; +} + +int RedisCli::GetReplyFromReader() { + // if (err_) { + // return REDIS_ERR; + // } + + if (rbuf_offset_ == 0) { + return REDIS_HALF; + } + + char* p; + if (!(p = ReadBytes(1))) { + return REDIS_HALF; + } + + int type; + // Check reply type + switch (*p) { + case '-': + type = REDIS_REPLY_ERROR; + break; + case '+': + type = REDIS_REPLY_STATUS; + break; + case ':': + type = REDIS_REPLY_INTEGER; + break; + case '$': + type = REDIS_REPLY_STRING; + break; + case '*': + type = REDIS_REPLY_ARRAY; + break; + default: + return REDIS_EPARSE_TYPE; + } + + switch (type) { + case REDIS_REPLY_ERROR: + case REDIS_REPLY_STATUS: + case REDIS_REPLY_INTEGER: + // elements_ = 1; + return ProcessLineItem(); + case REDIS_REPLY_STRING: + // need processBulkItem(); + // elements_ = 1; + return ProcessBulkItem(); + case REDIS_REPLY_ARRAY: + // need processMultiBulkItem(); + return ProcessMultiBulkItem(); + default: + return REDIS_EPARSE_TYPE; // Avoid warning. + } +} + +NetCli* NewRedisCli() { return new RedisCli(); } +// +// Redis protocol related funcitons +// + +// Calculate the number of bytes needed to represent an integer as string. +static int intlen(int i) { + int len = 0; + if (i < 0) { + len++; + i = -i; + } + do { + len++; + i /= 10; + } while (i != 0); + return len; +} + +// Helper that calculates the bulk length given a certain string length. +static size_t bulklen(size_t len) { return 1 + intlen(static_cast(len)) + 2 + len + 2; } + +int redisvFormatCommand(std::string* cmd, const char* format, va_list ap) { + const char* c = format; + std::string curarg; + char buf[1048576]; + std::vector args; + int touched = 0; /* was the current argument touched? */ + size_t totlen = 0; + + while (*c != '\0') { + if (*c != '%' || c[1] == '\0') { + if (*c == ' ') { + if (touched != 0) { + args.push_back(curarg); + totlen += bulklen(curarg.size()); + curarg.clear(); + touched = 0; + } + } else { + curarg.append(c, 1); + touched = 1; + } + } else { + char* arg = nullptr; + size_t size = 0; + + switch (c[1]) { + case 's': + arg = va_arg(ap, char*); + size = strlen(arg); + if (size > 0) { + curarg.append(arg, size); + } + break; + case 'b': + arg = va_arg(ap, char*); + size = va_arg(ap, size_t); + if (size > 0) { + curarg.append(arg, size); + } + break; + case '%': + curarg.append(arg, size); + break; + default: + /* Try to detect printf format */ + { + static const char intfmts[] = "diouxX"; + char _format[16]; + const char* _p = c + 1; + size_t _l = 0; + va_list _cpy; + bool fmt_valid = false; + + /* Flags */ + if (*_p != '\0' && *_p == '#') { + _p++; + } + if (*_p != '\0' && *_p == '0') { + _p++; + } + if (*_p != '\0' && *_p == '-') { + _p++; + } + if (*_p != '\0' && *_p == ' ') { + _p++; + } + if (*_p != '\0' && *_p == '+') { + _p++; + } + + /* Field width */ + while (*_p != '\0' && (isdigit(*_p) != 0)) { + _p++; + } + + /* Precision */ + if (*_p == '.') { + _p++; + while (*_p != '\0' && (isdigit(*_p) != 0)) { + _p++; + } + } + + /* Copy va_list before consuming with va_arg */ + va_copy(_cpy, ap); + + if (strchr(intfmts, *_p)) { + /* Integer conversion (without modifiers) */ + va_arg(ap, int); + fmt_valid = true; + } else if (strchr("eEfFgGaA", *_p)) { + /* Double conversion (without modifiers) */ + va_arg(ap, double); + fmt_valid = true; + } else if (_p[0] == 'h' && _p[1] == 'h') { /* Size: char */ + _p += 2; + if (*_p != '\0' && strchr(intfmts, *_p)) { + va_arg(ap, int); /* char gets promoted to int */ + fmt_valid = true; + } + } else if (_p[0] == 'h') { /* Size: short */ + _p += 1; + if (*_p != '\0' && strchr(intfmts, *_p)) { + va_arg(ap, int); /* short gets promoted to int */ + fmt_valid = true; + } + } else if (_p[0] == 'l' && _p[1] == 'l') { /* Size: long long */ + _p += 2; + if (*_p != '\0' && strchr(intfmts, *_p)) { + va_arg(ap, long long); + fmt_valid = true; + } + } else if (_p[0] == 'l') { /* Size: long */ + _p += 1; + if (*_p != '\0' && strchr(intfmts, *_p)) { + va_arg(ap, long); + fmt_valid = true; + } + } + + if (!fmt_valid) { + va_end(_cpy); + return REDIS_ERR; + } + + _l = (_p + 1) - c; + if (_l < sizeof(_format) - 2) { + memcpy(_format, c, _l); + _format[_l] = '\0'; + + int n = vsnprintf(buf, sizeof(buf), _format, _cpy); + curarg.append(buf, n); + + /* Update current position (note: outer blocks + * increment c twice so compensate here) */ + c = _p - 1; + } + + va_end(_cpy); + break; + } + } + + if (curarg.empty()) { + return REDIS_ERR; + } + + touched = 1; + c++; + } + c++; + } + + /* Add the last argument if needed */ + if (touched != 0) { + args.push_back(curarg); + totlen += bulklen(curarg.size()); + } + + /* Add bytes needed to hold multi bulk count */ + totlen += 1 + intlen(static_cast(args.size())) + 2; + + /* Build the command at protocol level */ + cmd->clear(); + cmd->reserve(totlen); + + cmd->append(1, '*'); + cmd->append(std::to_string(args.size())); + cmd->append("\r\n"); + for (auto & arg : args) { + cmd->append(1, '$'); + cmd->append(std::to_string(arg.size())); + cmd->append("\r\n"); + cmd->append(arg); + cmd->append("\r\n"); + } + assert(cmd->size() == totlen); + + return static_cast(totlen); +} + +int redisvAppendCommand(std::string* cmd, const char* format, va_list ap) { + int len = redisvFormatCommand(cmd, format, ap); + if (len == -1) { + return REDIS_ERR; + } + + return REDIS_OK; +} + +int redisFormatCommandArgv(RedisCmdArgsType argv, std::string* cmd) { + size_t argc = argv.size(); + + size_t totlen = 1 + intlen(static_cast(argc)) + 2; + for (size_t i = 0; i < argc; i++) { + totlen += bulklen(argv[i].size()); + } + + cmd->clear(); + cmd->reserve(totlen); + + cmd->append(1, '*'); + cmd->append(std::to_string(argc)); + cmd->append("\r\n"); + for (size_t i = 0; i < argc; i++) { + cmd->append(1, '$'); + cmd->append(std::to_string(argv[i].size())); + cmd->append("\r\n"); + cmd->append(argv[i]); + cmd->append("\r\n"); + } + + return REDIS_OK; +} + +int SerializeRedisCommand(std::string* cmd, const char* format, ...) { + va_list ap; + va_start(ap, format); + int result = redisvAppendCommand(cmd, format, ap); + va_end(ap); + return result; +} + +int SerializeRedisCommand(RedisCmdArgsType argv, std::string* cmd) { return redisFormatCommandArgv(std::move(argv), cmd); } + +}; // namespace net diff --git a/tools/pika_migrate/src/net/src/redis_conn.cc b/tools/pika_migrate/src/net/src/redis_conn.cc new file mode 100644 index 0000000000..e70089f323 --- /dev/null +++ b/tools/pika_migrate/src/net/src/redis_conn.cc @@ -0,0 +1,214 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/redis_conn.h" + +#include +#include + +#include + +#include "net/include/net_stats.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +extern std::unique_ptr g_network_statistic; + +namespace net { + +RedisConn::RedisConn(const int fd, const std::string& ip_port, Thread* thread, NetMultiplexer* net_mpx, + const HandleType& handle_type, const int rbuf_max_len) + : NetConn(fd, ip_port, thread, net_mpx), + handle_type_(handle_type), + + rbuf_max_len_(rbuf_max_len) + { + RedisParserSettings settings; + settings.DealMessage = ParserDealMessageCb; + settings.Complete = ParserCompleteCb; + redis_parser_.RedisParserInit(REDIS_PARSER_REQUEST, settings); + redis_parser_.data = this; +} + +RedisConn::~RedisConn() { free(rbuf_); } + +ReadStatus RedisConn::ParseRedisParserStatus(RedisParserStatus status) { + if (status == kRedisParserInitDone) { + return kOk; + } else if (status == kRedisParserHalf) { + return kReadHalf; + } else if (status == kRedisParserDone) { + return kReadAll; + } else if (status == kRedisParserError) { + RedisParserError error_code = redis_parser_.get_error_code(); + switch (error_code) { + case kRedisParserOk: + return kReadError; + case kRedisParserInitError: + return kReadError; + case kRedisParserFullError: + return kFullError; + case kRedisParserProtoError: + return kParseError; + case kRedisParserDealError: + return kDealError; + default: + return kReadError; + } + } else { + return kReadError; + } +} + +ReadStatus RedisConn::GetRequest() { + ssize_t nread = 0; + int next_read_pos = last_read_pos_ + 1; + + int64_t remain = rbuf_len_ - next_read_pos; // Remain buffer size + int64_t new_size = 0; + if (remain == 0) { + new_size = rbuf_len_ + REDIS_IOBUF_LEN; + remain += REDIS_IOBUF_LEN; + } else if (remain < bulk_len_) { + new_size = next_read_pos + bulk_len_; + remain = bulk_len_; + } + if (new_size > rbuf_len_) { + if (new_size > rbuf_max_len_) { + return kFullError; + } + rbuf_ = static_cast(realloc(rbuf_, new_size)); // NOLINT + if (!rbuf_) { + return kFullError; + } + rbuf_len_ = static_cast(new_size); + } + + nread = read(fd(), rbuf_ + next_read_pos, remain); + if (nread == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) { + nread = 0; + return kReadHalf; // HALF + } else { + // error happened, close client + return kReadError; + } + } else if (nread == 0) { + // client closed, close client + return kReadClose; + } + g_network_statistic->IncrRedisInputBytes(nread); + // assert(nread > 0); + last_read_pos_ += static_cast(nread); + msg_peak_ = last_read_pos_; + command_len_ += static_cast (nread); + if (command_len_ >= rbuf_max_len_) { + LOG(INFO) << "close conn command_len " << command_len_ << ", rbuf_max_len " << rbuf_max_len_; + return kFullError; + } + + int processed_len = 0; + RedisParserStatus ret = redis_parser_.ProcessInputBuffer(rbuf_ + next_read_pos, static_cast(nread), &processed_len); + ReadStatus read_status = ParseRedisParserStatus(ret); + if (read_status == kReadAll || read_status == kReadHalf) { + if (read_status == kReadAll) { + command_len_ = 0; + } + last_read_pos_ = -1; + bulk_len_ = redis_parser_.get_bulk_len(); + } + if (!response_.empty()) { + set_is_reply(true); + } + return read_status; // OK || HALF || FULL_ERROR || PARSE_ERROR +} + +WriteStatus RedisConn::SendReply() { + ssize_t nwritten = 0; + size_t wbuf_len = response_.size(); + while (wbuf_len > 0) { + nwritten = write(fd(), response_.data() + wbuf_pos_, wbuf_len - wbuf_pos_); + if (nwritten <= 0) { + break; + } + g_network_statistic->IncrRedisOutputBytes(nwritten); + wbuf_pos_ += nwritten; + if (wbuf_pos_ == wbuf_len) { + // Have sended all response data + if (wbuf_len > DEFAULT_WBUF_SIZE) { + std::string buf; + buf.reserve(DEFAULT_WBUF_SIZE); + response_.swap(buf); + } + response_.clear(); + + wbuf_len = 0; + wbuf_pos_ = 0; + } + } + if (nwritten == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) { + return kWriteHalf; + } else { + // Here we should close the connection + return kWriteError; + } + } + if (wbuf_len == 0) { + return kWriteAll; + } else { + return kWriteHalf; + } +} + +int RedisConn::WriteResp(const std::string& resp) { + response_.append(resp); + set_is_reply(true); + return 0; +} + +void RedisConn::TryResizeBuffer() { + struct timeval now; + gettimeofday(&now, nullptr); + time_t idletime = now.tv_sec - last_interaction().tv_sec; + if (rbuf_len_ > REDIS_MBULK_BIG_ARG && ((rbuf_len_ / (msg_peak_ + 1)) > 2 || idletime > 2)) { + int new_size = ((last_read_pos_ + REDIS_IOBUF_LEN) / REDIS_IOBUF_LEN) * REDIS_IOBUF_LEN; + if (new_size < rbuf_len_) { + rbuf_ = static_cast(realloc(rbuf_, new_size)); + rbuf_len_ = new_size; + LOG(INFO) << "Resize buffer to " << rbuf_len_ << ", last_read_pos_: " << last_read_pos_; + } + msg_peak_ = 0; + } +} + +void RedisConn::SetHandleType(const HandleType& handle_type) { handle_type_ = handle_type; } + +HandleType RedisConn::GetHandleType() { return handle_type_; } + +void RedisConn::ProcessRedisCmds(const std::vector& argvs, bool async, std::string* response) {} + +void RedisConn::NotifyEpoll(bool success) { + NetItem ti(fd(), ip_port(), success ? kNotiEpolloutAndEpollin : kNotiClose); + net_multiplexer()->Register(ti, true); +} + +int RedisConn::ParserDealMessageCb(RedisParser* parser, const RedisCmdArgsType& argv) { + auto conn = reinterpret_cast(parser->data); + if (conn->GetHandleType() == HandleType::kSynchronous) { + return conn->DealMessage(argv, &(conn->response_)); + } else { + return 0; + } +} + +int RedisConn::ParserCompleteCb(RedisParser* parser, const std::vector& argvs) { + auto conn = reinterpret_cast(parser->data); + bool async = conn->GetHandleType() == HandleType::kAsynchronous; + conn->ProcessRedisCmds(argvs, async, &(conn->response_)); + return 0; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/redis_parser.cc b/tools/pika_migrate/src/net/src/redis_parser.cc new file mode 100644 index 0000000000..93a017118b --- /dev/null +++ b/tools/pika_migrate/src/net/src/redis_parser.cc @@ -0,0 +1,407 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/redis_parser.h" + +#include /* assert */ + +#include + +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +namespace net { + +static bool IsHexDigit(char ch) { + return (ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F'); +} + +static int HexDigitToInt32(char ch) { + if (ch <= '9' && ch >= '0') { + return ch - '0'; + } else if (ch <= 'F' && ch >= 'A') { + return ch - 'A'; + } else if (ch <= 'f' && ch >= 'a') { + return ch - 'a'; + } else { + return 0; + } +} + +static int split2args(const std::string& req_buf, RedisCmdArgsType& argv) { + const char* p = req_buf.data(); + std::string arg; + + while (true) { + // skip blanks + while ((*p != 0) && (isspace(*p) != 0)) { + p++; + } + if (*p != 0) { + // get a token + int inq = 0; // set to 1 if we are in "quotes" + int insq = 0; // set to 1 if we are in 'single quotes' + int done = 0; + + arg.clear(); + while (done == 0) { + if (inq != 0) { + if (*p == '\\' && *(p + 1) == 'x' && IsHexDigit(*(p + 2)) && IsHexDigit(*(p + 3))) { + char byte = static_cast(HexDigitToInt32(*(p + 2)) * 16 + HexDigitToInt32(*(p + 3))); + arg.append(1, byte); + p += 3; + } else if (*p == '\\' && (*(p + 1) != 0)) { + char c; + + p++; + switch (*p) { + case 'n': + c = '\n'; + break; + case 'r': + c = '\r'; + break; + case 't': + c = '\t'; + break; + case 'b': + c = '\b'; + break; + case 'a': + c = '\a'; + break; + default: + c = *p; + break; + } + arg.append(1, c); + } else if (*p == '"') { + /* closing quote must be followed by a space or + * nothing at all. */ + if ((*(p + 1) != 0) && (isspace(*(p + 1)) == 0)) { + argv.clear(); + return -1; + } + done = 1; + } else if (*p == 0) { + // unterminated quotes + argv.clear(); + return -1; + } else { + arg.append(1, *p); + } + } else if (insq != 0) { + if (*p == '\\' && *(p + 1) == '\'') { + p++; + arg.append(1, '\''); + } else if (*p == '\'') { + /* closing quote must be followed by a space or + * nothing at all. */ + if ((*(p + 1) != 0) && (isspace(*(p + 1)) == 0)) { + argv.clear(); + return -1; + } + done = 1; + } else if (*p == 0) { + // unterminated quotes + argv.clear(); + return -1; + } else { + arg.append(1, *p); + } + } else { + switch (*p) { + case ' ': + case '\n': + case '\r': + case '\t': + case '\0': + done = 1; + break; + case '"': + inq = 1; + break; + case '\'': + insq = 1; + break; + default: + // current = sdscatlen(current,p,1); + arg.append(1, *p); + break; + } + } + if (*p != 0) { + p++; + } + } + argv.push_back(arg); + } else { + return 0; + } + } +} + +int RedisParser::FindNextSeparators() { + if (cur_pos_ > length_ - 1) { + return -1; + } + int pos = cur_pos_; + while (pos <= length_ - 1) { + if (input_buf_[pos] == '\n') { + return pos; + } + pos++; + } + return -1; +} + +int RedisParser::GetNextNum(int pos, long* value) { + assert(pos > cur_pos_); + // cur_pos_ pos + // | ----------| + // | | + // *3\r\n + // [cur_pos_ + 1, pos - cur_pos_ - 2] + if (pstd::string2int(input_buf_ + cur_pos_ + 1, pos - cur_pos_ - 2, value) != 0) { + return 0; // Success + } + return -1; // Failed +} + +RedisParser::RedisParser() + : redis_type_(0), bulk_len_(-1), redis_parser_type_(REDIS_PARSER_REQUEST) {} + +void RedisParser::SetParserStatus(RedisParserStatus status, RedisParserError error) { + if (status == kRedisParserHalf) { + CacheHalfArgv(); + } + status_code_ = status; + error_code_ = error; +} + +void RedisParser::CacheHalfArgv() { + std::string tmp(input_buf_ + cur_pos_, length_ - cur_pos_); + half_argv_ = tmp; + cur_pos_ = length_; +} + +RedisParserStatus RedisParser::RedisParserInit(RedisParserType type, const RedisParserSettings& settings) { + if (status_code_ != kRedisParserNone) { + SetParserStatus(kRedisParserError, kRedisParserInitError); + return status_code_; + } + if (type != REDIS_PARSER_REQUEST && type != REDIS_PARSER_RESPONSE) { + SetParserStatus(kRedisParserError, kRedisParserInitError); + return status_code_; + } + redis_parser_type_ = type; + parser_settings_ = settings; + SetParserStatus(kRedisParserInitDone); + return status_code_; +} + +RedisParserStatus RedisParser::ProcessInlineBuffer() { + int pos; + int ret; + pos = FindNextSeparators(); + if (pos == -1) { + // change rbuf_len_ to length_ + if (length_ > REDIS_INLINE_MAXLEN) { + SetParserStatus(kRedisParserError, kRedisParserFullError); + return status_code_; + } else { + SetParserStatus(kRedisParserHalf); + return status_code_; + } + } + // args \r\n + std::string req_buf(input_buf_ + cur_pos_, pos + 1 - cur_pos_); + + argv_.clear(); + ret = split2args(req_buf, argv_); + cur_pos_ = pos + 1; + + if (ret == -1) { + SetParserStatus(kRedisParserError, kRedisParserProtoError); + return status_code_; + } + SetParserStatus(kRedisParserDone); + return status_code_; +} + +RedisParserStatus RedisParser::ProcessMultibulkBuffer() { + int pos = 0; + if (multibulk_len_ == 0) { + /* The client should have been reset */ + pos = FindNextSeparators(); + if (pos != -1) { + if (GetNextNum(pos, &multibulk_len_) != 0) { + // Protocol error: invalid multibulk length + SetParserStatus(kRedisParserError, kRedisParserProtoError); + return status_code_; + } + cur_pos_ = pos + 1; + argv_.clear(); + if (cur_pos_ > length_ - 1) { + SetParserStatus(kRedisParserHalf); + return status_code_; + } + } else { + SetParserStatus(kRedisParserHalf); + return status_code_; // HALF + } + } + while (multibulk_len_ != 0) { + if (bulk_len_ == -1) { + pos = FindNextSeparators(); + if (pos != -1) { + if (input_buf_[cur_pos_] != '$') { + SetParserStatus(kRedisParserError, kRedisParserProtoError); + return status_code_; // PARSE_ERROR + } + + if (GetNextNum(pos, &bulk_len_) != 0) { + // Protocol error: invalid bulk length + SetParserStatus(kRedisParserError, kRedisParserProtoError); + return status_code_; + } + cur_pos_ = pos + 1; + } + if (pos == -1 || cur_pos_ > length_ - 1) { + SetParserStatus(kRedisParserHalf); + return status_code_; + } + } + if ((length_ - 1) - cur_pos_ + 1 < bulk_len_ + 2) { + // Data not enough + break; + } else { + argv_.emplace_back(input_buf_ + cur_pos_, bulk_len_); + cur_pos_ = static_cast(cur_pos_ + bulk_len_ + 2); + bulk_len_ = -1; + multibulk_len_--; + } + } + + if (multibulk_len_ == 0) { + SetParserStatus(kRedisParserDone); + return status_code_; // OK + } else { + SetParserStatus(kRedisParserHalf); + return status_code_; // HALF + } +} + +void RedisParser::PrintCurrentStatus() { + LOG(INFO) << "status_code " << status_code_ << " error_code " << error_code_; + LOG(INFO) << "multibulk_len_ " << multibulk_len_ << "bulk_len " << bulk_len_ << " redis_type " << redis_type_ << " redis_parser_type " << redis_parser_type_; + // for (auto& i : argv_) { + // UNUSED(i); + // log_info("parsed arguments: %s", i.c_str()); + // } + LOG(INFO) << "cur_pos : " << cur_pos_; + LOG(INFO) << "input_buf_ is clean ? " << (input_buf_ == nullptr); + if (input_buf_) { + LOG(INFO) << " input_buf " << input_buf_; + } + LOG(INFO) << "half_argv_ : " << half_argv_; + LOG(INFO) << "input_buf len " << length_; +} + +RedisParserStatus RedisParser::ProcessInputBuffer(const char* input_buf, int length, int* parsed_len) { + if (status_code_ == kRedisParserInitDone || status_code_ == kRedisParserHalf || status_code_ == kRedisParserDone) { + // TODO(): AZ: avoid copy + std::string tmp_str(input_buf, length); + input_str_ = half_argv_ + tmp_str; + input_buf_ = input_str_.c_str(); + length_ = static_cast(length + half_argv_.size()); + if (redis_parser_type_ == REDIS_PARSER_REQUEST) { + ProcessRequestBuffer(); + } else if (redis_parser_type_ == REDIS_PARSER_RESPONSE) { + ProcessResponseBuffer(); + } else { + SetParserStatus(kRedisParserError, kRedisParserInitError); + return status_code_; + } + // cur_pos_ starts from 0, val of cur_pos_ is the parsed_len + *parsed_len = cur_pos_; + ResetRedisParser(); + // PrintCurrentStatus(); + return status_code_; + } + SetParserStatus(kRedisParserError, kRedisParserInitError); + return status_code_; +} + +// TODO(): AZ +RedisParserStatus RedisParser::ProcessResponseBuffer() { + SetParserStatus(kRedisParserDone); + return status_code_; +} + +RedisParserStatus RedisParser::ProcessRequestBuffer() { + RedisParserStatus ret; + while (cur_pos_ <= length_ - 1) { + if (redis_type_ == 0) { + if (input_buf_[cur_pos_] == '*') { + redis_type_ = REDIS_REQ_MULTIBULK; + } else { + redis_type_ = REDIS_REQ_INLINE; + } + } + + if (redis_type_ == REDIS_REQ_INLINE) { + ret = ProcessInlineBuffer(); + if (ret != kRedisParserDone) { + return ret; + } + } else if (redis_type_ == REDIS_REQ_MULTIBULK) { + ret = ProcessMultibulkBuffer(); + if (ret != kRedisParserDone) { // FULL_ERROR || HALF || PARSE_ERROR + return ret; + } + } else { + // Unknown requeset type; + return kRedisParserError; + } + if (!argv_.empty()) { + argvs_.push_back(argv_); + if (parser_settings_.DealMessage) { + if (parser_settings_.DealMessage(this, argv_) != 0) { + SetParserStatus(kRedisParserError, kRedisParserDealError); + return status_code_; + } + } + } + argv_.clear(); + // Reset + ResetCommandStatus(); + } + if (parser_settings_.Complete) { + if (parser_settings_.Complete(this, argvs_) != 0) { + SetParserStatus(kRedisParserError, kRedisParserCompleteError); + return status_code_; + } + } + argvs_.clear(); + SetParserStatus(kRedisParserDone); + return status_code_; // OK +} + +void RedisParser::ResetCommandStatus() { + redis_type_ = 0; + multibulk_len_ = 0; + bulk_len_ = -1; + half_argv_.clear(); +} + +void RedisParser::ResetRedisParser() { + cur_pos_ = 0; + input_buf_ = nullptr; + input_str_.clear(); + length_ = 0; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/server_socket.cc b/tools/pika_migrate/src/net/src/server_socket.cc new file mode 100644 index 0000000000..3724e1902f --- /dev/null +++ b/tools/pika_migrate/src/net/src/server_socket.cc @@ -0,0 +1,79 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "net/include/net_define.h" +#include "net/src/net_util.h" +#include "net/src/server_socket.h" + +namespace net { + +ServerSocket::ServerSocket(int port, bool is_block) + : port_(port), + + is_block_(is_block) {} + +ServerSocket::~ServerSocket() { Close(); } + +/* + * Listen to a specific ip addr on a multi eth machine + * Return 0 if Listen success, other wise + */ +int ServerSocket::Listen(const std::string& bind_ip) { + int ret = 0; + sockfd_ = socket(AF_INET, SOCK_STREAM, 0); + memset(&servaddr_, 0, sizeof(servaddr_)); + + int yes = 1; + ret = setsockopt(sockfd_, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)); + if (ret < 0) { + return kSetSockOptError; + } + + servaddr_.sin_family = AF_INET; + if (bind_ip.empty()) { + servaddr_.sin_addr.s_addr = htonl(INADDR_ANY); + } else { + servaddr_.sin_addr.s_addr = inet_addr(bind_ip.c_str()); + } + servaddr_.sin_port = htons(port_); + + fcntl(sockfd_, F_SETFD, fcntl(sockfd_, F_GETFD) | FD_CLOEXEC); + + ret = bind(sockfd_, reinterpret_cast(&servaddr_), sizeof(servaddr_)); + if (ret < 0) { + return kBindError; + } + ret = listen(sockfd_, accept_backlog_); + if (ret < 0) { + return kListenError; + } + listening_ = true; + + if (!is_block_) { + SetNonBlock(); + } + return kSuccess; +} + +int ServerSocket::SetNonBlock() { + flags_ = Setnonblocking(sockfd()); + if (flags_ == -1) { + return -1; + } + return 0; +} + +void ServerSocket::Close() { close(sockfd_); } + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/server_socket.h b/tools/pika_migrate/src/net/src/server_socket.h new file mode 100644 index 0000000000..5e256e3f86 --- /dev/null +++ b/tools/pika_migrate/src/net/src/server_socket.h @@ -0,0 +1,78 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_SERVER_SOCKET_H_ +#define NET_SRC_SERVER_SOCKET_H_ + +#include +#include + +#include +#include + +#include "pstd/include/noncopyable.h" + +namespace net { + +class ServerSocket : public pstd::noncopyable { + public: + explicit ServerSocket(int port, bool is_block = false); + + virtual ~ServerSocket(); + + /* + * Listen to a specific ip addr on a multi eth machine + * Return 0 if Listen success, <0 other wise + */ + int Listen(const std::string& bind_ip = std::string()); + + void Close(); + + /* + * The get and set functions + */ + void set_port(int port) { port_ = port; } + + int port() { return port_; } + + void set_keep_alive(bool keep_alive) { keep_alive_ = keep_alive; } + bool keep_alive() const { return keep_alive_; } + + void set_send_timeout(int send_timeout) { send_timeout_ = send_timeout; } + int send_timeout() const { return send_timeout_; } + + void set_recv_timeout(int recv_timeout) { recv_timeout_ = recv_timeout; } + + int recv_timeout() const { return recv_timeout_; } + + int sockfd() const { return sockfd_; } + + void set_sockfd(int sockfd) { sockfd_ = sockfd; } + + private: + int SetNonBlock(); + /* + * The tcp server port and address + */ + int port_; + int flags_; + int send_timeout_{0}; + int recv_timeout_{0}; + int accept_timeout_{0}; + int accept_backlog_{1024}; + int tcp_send_buffer_{0}; + int tcp_recv_buffer_{0}; + bool keep_alive_{false}; + bool listening_{false}; + bool is_block_; + + struct sockaddr_in servaddr_; + int sockfd_; + +}; + +} // namespace net + +#endif // NET_SRC_SERVER_SOCKET_H_ diff --git a/tools/pika_migrate/src/net/src/server_thread.cc b/tools/pika_migrate/src/net/src/server_thread.cc new file mode 100644 index 0000000000..6c9e894cf3 --- /dev/null +++ b/tools/pika_migrate/src/net/src/server_thread.cc @@ -0,0 +1,352 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/server_thread.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include "dispatch_thread.h" +#include "net/src/server_socket.h" +#include "pstd/include/testutil.h" +#include "pstd/include/xdebug.h" + +namespace net { + +using pstd::Status; + +class DefaultServerHandle : public ServerHandle { + public: + void CronHandle() const override {} + void FdTimeoutHandle(int fd, const std::string& ip_port) const override { + UNUSED(fd); + UNUSED(ip_port); + } + void FdClosedHandle(int fd, const std::string& ip_port) const override { + UNUSED(fd); + UNUSED(ip_port); + } + bool AccessHandle(std::string& ip) const override { + UNUSED(ip); + return true; + } + bool AccessHandle(int fd, std::string& ip) const override { + UNUSED(fd); + UNUSED(ip); + return true; + } + int CreateWorkerSpecificData(void** data) const override { + UNUSED(data); + return 0; + } + int DeleteWorkerSpecificData(void* data) const override { + UNUSED(data); + return 0; + } +}; + +static const ServerHandle* SanitizeHandle(const ServerHandle* raw_handle) { + if (!raw_handle) { + return new DefaultServerHandle(); + } + return raw_handle; +} + +ServerThread::ServerThread(int port, int cron_interval, const ServerHandle* handle) + : cron_interval_(cron_interval), + handle_(SanitizeHandle(handle)), + own_handle_(handle_ != handle), +#ifdef __ENABLE_SSL + security_(false), +#endif + port_(port) { + net_multiplexer_.reset(CreateNetMultiplexer()); + net_multiplexer_->Initialize(); + ips_.insert("0.0.0.0"); +} + +ServerThread::ServerThread(const std::string& bind_ip, int port, int cron_interval, const ServerHandle* handle) + : cron_interval_(cron_interval), + handle_(SanitizeHandle(handle)), + own_handle_(handle_ != handle), +#ifdef __ENABLE_SSL + security_(false), +#endif + port_(port) { + net_multiplexer_.reset(CreateNetMultiplexer()); + net_multiplexer_->Initialize(); + ips_.insert(bind_ip); +} + +ServerThread::ServerThread(const std::set& bind_ips, int port, int cron_interval, + const ServerHandle* handle) + : cron_interval_(cron_interval), + handle_(SanitizeHandle(handle)), + own_handle_(handle_ != handle), +#ifdef __ENABLE_SSL + security_(false), +#endif + port_(port) { + net_multiplexer_.reset(CreateNetMultiplexer()); + net_multiplexer_->Initialize(); + ips_ = bind_ips; +} + +ServerThread::~ServerThread() { +#ifdef __ENABLE_SSL + if (security_) { + SSL_CTX_free(ssl_ctx_); + EVP_cleanup(); + } +#endif + if (own_handle_) { + delete handle_; + } +} + +int ServerThread::SetTcpNoDelay(int connfd) { + int val = 1; + return setsockopt(connfd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof(val)); +} + +int ServerThread::StartThread() { + int ret = 0; + ret = InitHandle(); + if (ret != kSuccess) { + return ret; + } + return Thread::StartThread(); +} + +int ServerThread::InitHandle() { + int ret = 0; + std::shared_ptr socket_p; + if (ips_.find("0.0.0.0") != ips_.end()) { + ips_.clear(); + ips_.insert("0.0.0.0"); + } + + for (const auto& ip : ips_) { + socket_p = std::make_shared(port_); + server_sockets_.emplace_back(socket_p); + ret = socket_p->Listen(ip); + if (ret != kSuccess) { + return ret; + } + + // init pool + net_multiplexer_->NetAddEvent(socket_p->sockfd(), kReadable | kWritable); + server_fds_.insert(socket_p->sockfd()); + } + return kSuccess; +} + +void ServerThread::DoCronTask() {} + +void ServerThread::ProcessNotifyEvents(const NetFiredEvent* pfe) { UNUSED(pfe); } + +void* ServerThread::ThreadMain() { + int nfds; + NetFiredEvent* pfe; + Status s; + struct sockaddr_in cliaddr; + socklen_t clilen = sizeof(struct sockaddr); + int fd; + int connfd; + + struct timeval when; + gettimeofday(&when, nullptr); + struct timeval now = when; + + when.tv_sec += (cron_interval_ / 1000); + when.tv_usec += ((cron_interval_ % 1000) * 1000); + int timeout = cron_interval_; + if (timeout <= 0) { + timeout = NET_CRON_INTERVAL; + } + + std::string ip_port; + char port_buf[32]; + char ip_addr[INET_ADDRSTRLEN] = ""; + + while (!should_stop()) { + if (cron_interval_ > 0) { + gettimeofday(&now, nullptr); + if (when.tv_sec > now.tv_sec || (when.tv_sec == now.tv_sec && when.tv_usec > now.tv_usec)) { + timeout = static_cast((when.tv_sec - now.tv_sec) * 1000 + (when.tv_usec - now.tv_usec) / 1000); + } else { + // Do own cron task as well as user's + DoCronTask(); + handle_->CronHandle(); + + when.tv_sec = now.tv_sec + (cron_interval_ / 1000); + when.tv_usec = now.tv_usec + ((cron_interval_ % 1000) * 1000); + timeout = cron_interval_; + } + } + + nfds = net_multiplexer_->NetPoll(timeout); + for (int i = 0; i < nfds; i++) { + pfe = (net_multiplexer_->FiredEvents()) + i; + fd = pfe->fd; + + + if (pfe->fd == net_multiplexer_->NotifyReceiveFd()) { + ProcessNotifyEvents(pfe); + continue; + } + + /* + * Handle server event + */ + if (server_fds_.find(fd) != server_fds_.end()) { + if ((pfe->mask & kReadable) != 0) { + connfd = accept(fd, reinterpret_cast(&cliaddr), &clilen); + if (connfd == -1) { + LOG(WARNING) << "accept error, errno numberis " << errno << ", error reason " << strerror(errno); + continue; + } + fcntl(connfd, F_SETFD, fcntl(connfd, F_GETFD) | FD_CLOEXEC); + + // not use nagel to avoid tcp 40ms delay + if (SetTcpNoDelay(connfd) == -1) { + LOG(WARNING) << "setsockopt error, errno numberis " << errno << ", error reason " << strerror(errno); + close(connfd); + continue; + } + + // Just ip + ip_port = inet_ntop(AF_INET, &cliaddr.sin_addr, ip_addr, sizeof(ip_addr)); + + if (!handle_->AccessHandle(ip_port) || !handle_->AccessHandle(connfd, ip_port)) { + close(connfd); + continue; + } + + ip_port.append(":"); + snprintf(port_buf, sizeof(port_buf), "%d", ntohs(cliaddr.sin_port)); + ip_port.append(port_buf); + + /* + * Handle new connection, + * implemented in derived class + */ + HandleNewConn(connfd, ip_port); + + } else if ((pfe->mask & kErrorEvent) != 0) { + /* + * this branch means there is error on the listen fd + */ + close(pfe->fd); + continue; + } + } else { + /* + * Handle connection's event + * implemented in derived class + */ + HandleConnEvent(pfe); + } + } + } + + server_sockets_.clear(); + server_fds_.clear(); + + return nullptr; +} + +#ifdef __ENABLE_SSL +static std::vector> ssl_mutex_; + +static void SSLLockingCallback(int mode, int type, const char* file, int line) { + if (mode & CRYPTO_LOCK) { + ssl_mutex_[type]->Lock(); + } else { + ssl_mutex_[type]->Unlock(); + } +} + +static unsigned long SSLIdCallback() { return (unsigned long)pthread_self(); } + +int ServerThread::EnableSecurity(const std::string& cert_file, const std::string& key_file) { + if (cert_file.empty() || key_file.empty()) { + LOG(WARNING) << "cert_file and key_file can not be empty!"; + } + // Init Security Env + // 1. Create multithread mutex used by openssl + ssl_mutex_.resize(CRYPTO_num_locks()); + for (auto& sm : ssl_mutex_) { + sm.reset(new pstd::Mutex()); + } + CRYPTO_set_locking_callback(SSLLockingCallback); + CRYPTO_set_id_callback(SSLIdCallback); + + // 2. Use default configuration + OPENSSL_config(nullptr); + + // 3. Init library, load all algorithms + SSL_library_init(); + SSL_load_error_strings(); + OpenSSL_add_all_algorithms(); + + // 4. Create ssl context + ssl_ctx_ = SSL_CTX_new(SSLv23_server_method()); + if (!ssl_ctx_) { + LOG(WARNING) << "Unable to create SSL context"; + return -1; + } + + // 5. Set cert file and key file, then check key file + if (SSL_CTX_use_certificate_file(ssl_ctx_, cert_file.c_str(), SSL_FILETYPE_PEM) != 1) { + LOG(WARNING) << "SSL_CTX_use_certificate_file(" << cert_file << ") failed"; + return -1; + } + + if (SSL_CTX_use_PrivateKey_file(ssl_ctx_, key_file.c_str(), SSL_FILETYPE_PEM) != 1) { + LOG(WARNING) << "SSL_CTX_use_PrivateKey_file(" << key_file << ")"; + return -1; + } + + if (SSL_CTX_check_private_key(ssl_ctx_) != 1) { + LOG(WARNING) << "SSL_CTX_check_private_key(" << key_file << ")"; + return -1; + } + + // https://wiki.openssl.org/index.php/Manual:SSL_CTX_set_read_ahead(3) + // read data as more as possible + SSL_CTX_set_read_ahead(ssl_ctx_, true); + + // Force using TLS 1.2 + SSL_CTX_set_options(ssl_ctx_, SSL_OP_NO_SSLv2); + SSL_CTX_set_options(ssl_ctx_, SSL_OP_NO_SSLv3); + SSL_CTX_set_options(ssl_ctx_, SSL_OP_NO_TLSv1); + + // Enable ECDH + // https://en.wikipedia.org/wiki/Elliptic_curve_Diffie%E2%80%93Hellman + // https://wiki.openssl.org/index.php/Diffie_Hellman + // https://wiki.openssl.org/index.php/Diffie-Hellman_parameters + EC_KEY* ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); + if (!ecdh) { + LOG(WARNING) << "EC_KEY_new_by_curve_name(" << NID_X9_62_prime256v1 << ")"; + return -1; + } + + SSL_CTX_set_options(ssl_ctx_, SSL_OP_SINGLE_ECDH_USE); + SSL_CTX_set_tmp_ecdh(ssl_ctx_, ecdh); + EC_KEY_free(ecdh); + + security_ = true; + return 0; +} +#endif + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/simple_http_conn.cc b/tools/pika_migrate/src/net/src/simple_http_conn.cc new file mode 100644 index 0000000000..8310f7e3d2 --- /dev/null +++ b/tools/pika_migrate/src/net/src/simple_http_conn.cc @@ -0,0 +1,454 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/simple_http_conn.h" +#include +#include +#include + +#include +#include + +#include "net/include/net_define.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +namespace net { + +static const uint32_t kHTTPMaxMessage = 1024 * 1024 * 8; +static const uint32_t kHTTPMaxHeader = 1024 * 64; + +static const std::map http_status_map = { + {100, "Continue"}, + {101, "Switching Protocols"}, + {102, "Processing"}, + + {200, "OK"}, + {201, "Created"}, + {202, "Accepted"}, + {203, "Non-Authoritative Information"}, + {204, "No Content"}, + {205, "Reset Content"}, + {206, "Partial Content"}, + {207, "Multi-Status"}, + + {400, "Bad Request"}, + {401, "Unauthorized"}, + {402, ""}, // reserve + {403, "Forbidden"}, + {404, "Not Found"}, + {405, "Method Not Allowed"}, + {406, "Not Acceptable"}, + {407, "Proxy Authentication Required"}, + {408, "Request Timeout"}, + {409, "Conflict"}, + {416, "Requested Range not satisfiable"}, + + {500, "Internal Server Error"}, + {501, "Not Implemented"}, + {502, "Bad Gateway"}, + {503, "Service Unavailable"}, + {504, "Gateway Timeout"}, + {505, "HTTP Version Not Supported"}, + {506, "Variant Also Negotiates"}, + {507, "Insufficient Storage"}, + {508, "Bandwidth Limit Exceeded"}, + {509, "Not Extended"}, +}; + +Request::Request() : method("GET"), path("/index") {} + +inline int find_lf(const char* data, int size) { + const char* c = data; + int count = 0; + while (count < size) { + if (*c == '\n') { + break; + } + c++; + count++; + } + return count; +} + +bool Request::ParseHeadLine(const char* data, int line_start, int line_end, ParseStatus* parseStatus) { + std::string param_key; + std::string param_value; + for (int i = line_start; i <= line_end; i++) { + switch (*parseStatus) { + case kHeaderMethod: + if (data[i] != ' ') { + method.push_back(data[i]); + } else { + *parseStatus = kHeaderPath; + } + break; + case kHeaderPath: + if (data[i] != ' ') { + path.push_back(data[i]); + } else { + *parseStatus = kHeaderVersion; + } + break; + case kHeaderVersion: + if (data[i] != '\r' && data[i] != '\n') { + version.push_back(data[i]); + } else if (data[i] == '\n') { + *parseStatus = kHeaderParamKey; + } + break; + case kHeaderParamKey: + if (data[i] != ':' && data[i] != ' ') { + param_key.push_back(data[i]); + } else if (data[i] == ' ') { + *parseStatus = kHeaderParamValue; + } + break; + case kHeaderParamValue: + if (data[i] != '\r' && data[i] != '\n') { + param_value.push_back(data[i]); + } else if (data[i] == '\r') { + headers[pstd::StringToLower(param_key)] = param_value; + *parseStatus = kHeaderParamKey; + } + break; + + default: + return false; + } + } + return true; +} + +bool Request::ParseGetUrl() { + // Format path + if (path.find(headers["host"]) != std::string::npos && path.size() > (7 + headers["host"].size())) { + // http://www.xxx.xxx/path/to + path.assign(path.substr(7 + headers["host"].size())); + } + size_t n = path.find('?'); + if (n == std::string::npos) { + return true; // no parameter + } + if (!ParseParameters(path, n + 1)) { + return false; + } + path.resize(n); + return true; +} + +// Parse query parameter from GET url or POST application/x-www-form-urlencoded +// format: key1=value1&key2=value2&key3=value3 +bool Request::ParseParameters(const std::string& data, size_t line_start, bool from_url) { + size_t pre = line_start; + size_t mid; + size_t end; + while (pre < data.size()) { + mid = data.find('=', pre); + if (mid == std::string::npos) { + mid = data.size(); + } + end = data.find('&', pre); + if (end == std::string::npos) { + end = data.size(); + } + if (end <= mid) { + // empty value + if (from_url) { + query_params[data.substr(pre, end - pre)] = std::string(); + } else { + post_params[data.substr(pre, end - pre)] = std::string(); + } + pre = end + 1; + } else { + if (from_url) { + query_params[data.substr(pre, mid - pre)] = data.substr(mid + 1, end - mid - 1); + } else { + post_params[data.substr(pre, mid - pre)] = data.substr(mid + 1, end - mid - 1); + } + pre = end + 1; + } + } + return true; +} + +bool Request::ParseHeadFromArray(const char* data, const int size) { + int remain_size = size; + if (remain_size <= 5) { + return false; + } + + // Parse header line + int line_start = 0; + int line_end = 0; + ParseStatus parseStatus = kHeaderMethod; + while (remain_size > 4) { + line_end += find_lf(data + line_start, remain_size); + if (line_end < line_start) { + return false; + } + if (!ParseHeadLine(data, line_start, line_end, &parseStatus)) { + return false; + } + remain_size -= (line_end - line_start + 1); + line_start = ++line_end; + } + + // Parse query parameter from url + return ParseGetUrl(); +} + +bool Request::ParseBodyFromArray(const char* data, const int size) { + content.append(data, size); + if (method == "POST" && headers["content-type"] == "application/x-www-form-urlencoded") { + return ParseParameters(content, 0, false); + } + return true; +} + +void Request::Clear() { + version.clear(); + path.clear(); + method.clear(); + query_params.clear(); + post_params.clear(); + headers.clear(); + content.clear(); +} + +void Response::Clear() { + status_code_ = 0; + reason_phrase_.clear(); + headers_.clear(); + body_.clear(); +} + +// Return bytes actual be writen, should be less than size +int Response::SerializeHeaderToArray(char* data, size_t size) { + int serial_size = 0; + int ret; + + // Serialize statues line + ret = snprintf(data, size, "HTTP/1.1 %d %s\r\n", status_code_, reason_phrase_.c_str()); + if (ret < 0 || ret == static_cast(size)) { + return ret; + } + serial_size += ret; + + // Serialize header + if (headers_.find("Content-Length") == headers_.end()) { + SetHeaders("Content-Length", static_cast(body_.size())); + } + for (auto& line : headers_) { + ret = snprintf(data + serial_size, size - serial_size, "%s: %s\r\n", line.first.c_str(), line.second.c_str()); + if (ret < 0) { + return ret; + } + serial_size += ret; + if (serial_size == static_cast(size)) { + return serial_size; + } + } + + ret = snprintf(data + serial_size, size - serial_size, "\r\n"); + serial_size += ret; + return serial_size; +} + +// Serialize body begin from 'pos', return the new pos +int Response::SerializeBodyToArray(char* data, size_t size, int* pos) { + // Serialize body + size_t actual = size; + if (body_.size() - *pos < size) { + actual = body_.size() - *pos; + } + memcpy(data, body_.data() + *pos, actual); + *pos += static_cast(actual); + return static_cast(actual); +} + +void Response::SetStatusCode(int code) { + assert((code >= 100 && code <= 102) || (code >= 200 && code <= 207) || (code >= 400 && code <= 409) || + (code == 416) || (code >= 500 && code <= 509)); + status_code_ = code; + reason_phrase_.assign(http_status_map.at(code)); +} + +SimpleHTTPConn::SimpleHTTPConn(const int fd, const std::string& ip_port, Thread* thread) + : NetConn(fd, ip_port, thread) + { + rbuf_ = reinterpret_cast(malloc(sizeof(char) * kHTTPMaxMessage)); + wbuf_ = reinterpret_cast(malloc(sizeof(char) * kHTTPMaxMessage)); + request_ = new Request(); + response_ = new Response(); +} + +SimpleHTTPConn::~SimpleHTTPConn() { + free(rbuf_); + free(wbuf_); + delete request_; + delete response_; +} + +/* + * Build request_ + */ +bool SimpleHTTPConn::BuildRequestHeader() { + request_->Clear(); + if (!request_->ParseHeadFromArray(rbuf_, static_cast(header_len_))) { + return false; + } + auto iter = request_->headers.find("content-length"); + if (iter == request_->headers.end()) { + remain_packet_len_ = 0; + } else { + long tmp = 0; + if (pstd::string2int(iter->second.data(), iter->second.size(), &tmp) != 0) { + remain_packet_len_ = tmp; + } else { + remain_packet_len_ = 0; + } + } + + if (rbuf_pos_ > header_len_) { + remain_packet_len_ -= rbuf_pos_ - header_len_; + } + return true; +} + +bool SimpleHTTPConn::AppendRequestBody() { + return request_->ParseBodyFromArray(rbuf_ + header_len_, static_cast(rbuf_pos_ - header_len_)); +} + +void SimpleHTTPConn::HandleMessage() { + response_->Clear(); + DealMessage(request_, response_); + set_is_reply(true); +} + +ReadStatus SimpleHTTPConn::GetRequest() { + ssize_t nread = 0; + while (true) { + switch (conn_status_) { + case kHeader: { + nread = read(fd(), rbuf_ + rbuf_pos_, kHTTPMaxHeader - rbuf_pos_); + if (nread == -1 && errno == EAGAIN) { + return kReadHalf; + } else if (nread <= 0) { + return kReadClose; + } else { + rbuf_pos_ += nread; + // So that strstr will not parse the expire char + rbuf_[rbuf_pos_] = '\0'; + char* sep_pos = strstr(rbuf_, "\r\n\r\n"); + if (!sep_pos) { + break; + } + header_len_ = sep_pos - rbuf_ + 4; + if (!BuildRequestHeader()) { + return kReadError; + } + + std::string sign = request_->headers.count("expect") != 0U ? request_->headers.at("expect") : ""; + if (sign == "100-continue" || sign == "100-Continue") { + // Reply 100 Continue, then receive body + response_->Clear(); + response_->SetStatusCode(100); + set_is_reply(true); + conn_status_ = kPacket; + if (remain_packet_len_ > 0) { + return kReadHalf; + } + } + conn_status_ = kPacket; + } + break; + } + case kPacket: { + if (remain_packet_len_ > 0) { + nread = read( + fd(), rbuf_ + rbuf_pos_, + (kHTTPMaxMessage - rbuf_pos_ > remain_packet_len_) ? remain_packet_len_ : kHTTPMaxMessage - rbuf_pos_); + if (nread == -1 && errno == EAGAIN) { + return kReadHalf; + } else if (nread <= 0) { + return kReadClose; + } else { + rbuf_pos_ += nread; + remain_packet_len_ -= nread; + } + } + if (remain_packet_len_ == 0 || // no more content + rbuf_pos_ == kHTTPMaxMessage) { // buffer full + AppendRequestBody(); + if (remain_packet_len_ == 0) { + conn_status_ = kComplete; + } else { + rbuf_pos_ = header_len_ = 0; // read more packet content from begin + } + } + break; + } + case kComplete: { + HandleMessage(); + conn_status_ = kHeader; + rbuf_pos_ = 0; + return kReadAll; + } + default: { + return kReadError; + } + } + // else continue + } +} + +bool SimpleHTTPConn::FillResponseBuf() { + if (response_pos_ < 0) { + // Not ever serialize response header + int actual = response_->SerializeHeaderToArray(wbuf_ + wbuf_len_, kHTTPMaxMessage - wbuf_len_); + if (actual < 0) { + return false; + } + wbuf_len_ += actual; + response_pos_ = 0; // Serialize body next time + } + while (response_->HasMoreBody(response_pos_) && wbuf_len_ < kHTTPMaxMessage) { + // Has more body and more space in wbuf_ + wbuf_len_ += response_->SerializeBodyToArray(wbuf_ + wbuf_len_, kHTTPMaxMessage - wbuf_len_, &response_pos_); + } + return true; +} + +WriteStatus SimpleHTTPConn::SendReply() { + // Fill as more as content into the buf + if (!FillResponseBuf()) { + return kWriteError; + } + + ssize_t nwritten = 0; + while (wbuf_len_ > 0) { + nwritten = write(fd(), wbuf_ + wbuf_pos_, wbuf_len_ - wbuf_pos_); + if (nwritten == -1 && errno == EAGAIN) { + return kWriteHalf; + } else if (nwritten <= 0) { + return kWriteError; + } + wbuf_pos_ += nwritten; + if (wbuf_pos_ == wbuf_len_) { + // Send all in wbuf_ and Try to fill more + wbuf_len_ = 0; + wbuf_pos_ = 0; + if (!FillResponseBuf()) { + return kWriteError; + } + } + } + response_pos_ = -1; // fill header first next time + + return kWriteAll; +} + +} // namespace net diff --git a/tools/pika_migrate/src/net/src/thread_pool.cc b/tools/pika_migrate/src/net/src/thread_pool.cc new file mode 100644 index 0000000000..8e20694244 --- /dev/null +++ b/tools/pika_migrate/src/net/src/thread_pool.cc @@ -0,0 +1,167 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/thread_pool.h" +#include "net/src/net_thread_name.h" + +#include + +#include +#include + +namespace net { + +void* ThreadPool::Worker::WorkerMain(void* arg) { + auto tp = static_cast(arg); + tp->runInThread(); + return nullptr; +} + +int ThreadPool::Worker::start() { + if (!start_.load()) { + if (pthread_create(&thread_id_, nullptr, &WorkerMain, thread_pool_) != 0) { + return -1; + } else { + start_.store(true); + std::string thread_id_str = std::to_string(reinterpret_cast(thread_id_)); + SetThreadName(thread_id_, thread_pool_->thread_pool_name() + "_Worker_" + thread_id_str); + } + } + return 0; +} + +int ThreadPool::Worker::stop() { + if (start_.load()) { + if (pthread_join(thread_id_, nullptr) != 0) { + return -1; + } else { + start_.store(false); + } + } + return 0; +} + +ThreadPool::ThreadPool(size_t worker_num, size_t max_queue_size, std::string thread_pool_name) + : worker_num_(worker_num), + max_queue_size_(max_queue_size), + thread_pool_name_(std::move(thread_pool_name)), + running_(false), + should_stop_(false) {} + +ThreadPool::~ThreadPool() { stop_thread_pool(); } + +int ThreadPool::start_thread_pool() { + if (!running_.load()) { + should_stop_.store(false); + for (size_t i = 0; i < worker_num_; ++i) { + workers_.push_back(new Worker(this)); + int res = workers_[i]->start(); + if (res != 0) { + return kCreateThreadError; + } + } + running_.store(true); + } + return kSuccess; +} + +int ThreadPool::stop_thread_pool() { + int res = 0; + if (running_.load()) { + should_stop_.store(true); + rsignal_.notify_all(); + wsignal_.notify_all(); + for (const auto worker : workers_) { + res = worker->stop(); + if (res != 0) { + break; + } else { + delete worker; + } + } + workers_.clear(); + running_.store(false); + } + return res; +} + +bool ThreadPool::should_stop() { return should_stop_.load(); } + +void ThreadPool::set_should_stop() { should_stop_.store(true); } + +void ThreadPool::Schedule(TaskFunc func, void* arg) { + std::unique_lock lock(mu_); + wsignal_.wait(lock, [this]() { return queue_.size() < max_queue_size_ || should_stop(); }); + + if (!should_stop()) { + queue_.emplace(func, arg); + rsignal_.notify_one(); + } +} + +/* + * timeout is in millisecond + */ +void ThreadPool::DelaySchedule(uint64_t timeout, TaskFunc func, void* arg) { + auto now = std::chrono::system_clock::now(); + uint64_t unow = std::chrono::duration_cast(now.time_since_epoch()).count(); + uint64_t exec_time = unow + timeout * 1000; + + std::lock_guard lock(mu_); + if (!should_stop()) { + time_queue_.emplace(exec_time, func, arg); + rsignal_.notify_all(); + } +} + +size_t ThreadPool::max_queue_size() { return max_queue_size_; } + +void ThreadPool::cur_queue_size(size_t* qsize) { + std::lock_guard lock(mu_); + *qsize = queue_.size(); +} + +void ThreadPool::cur_time_queue_size(size_t* qsize) { + std::lock_guard lock(mu_); + *qsize = time_queue_.size(); +} + +std::string ThreadPool::thread_pool_name() { return thread_pool_name_; } + +void ThreadPool::runInThread() { + while (!should_stop()) { + std::unique_lock lock(mu_); + rsignal_.wait(lock, [this]() { return !queue_.empty() || !time_queue_.empty() || should_stop(); }); + + if (should_stop()) { + break; + } + if (!time_queue_.empty()) { + auto now = std::chrono::system_clock::now(); + uint64_t unow = std::chrono::duration_cast(now.time_since_epoch()).count(); + + auto [exec_time, func, arg] = time_queue_.top(); + if (unow >= exec_time) { + time_queue_.pop(); + lock.unlock(); + (*func)(arg); + continue; + } else if (queue_.empty() && !should_stop()) { + rsignal_.wait_for(lock, std::chrono::microseconds(exec_time - unow)); + lock.unlock(); + continue; + } + } + + if (!queue_.empty()) { + auto [func, arg] = queue_.front(); + queue_.pop(); + wsignal_.notify_one(); + lock.unlock(); + (*func)(arg); + } + } +} +} // namespace net diff --git a/tools/pika_migrate/src/net/src/worker_thread.cc b/tools/pika_migrate/src/net/src/worker_thread.cc new file mode 100644 index 0000000000..c4735f46b4 --- /dev/null +++ b/tools/pika_migrate/src/net/src/worker_thread.cc @@ -0,0 +1,359 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include + +#include "net/src/worker_thread.h" +#include "pstd/include/testutil.h" + +#include "dispatch_thread.h" +#include "net/include/net_conn.h" +#include "net/src/net_item.h" + +namespace net { + +WorkerThread::WorkerThread(ConnFactory* conn_factory, ServerThread* server_thread, int queue_limit, int cron_interval) + : + server_thread_(server_thread), + conn_factory_(conn_factory), + cron_interval_(cron_interval), + keepalive_timeout_(kDefaultKeepAliveTime) { + /* + * install the protobuf handler here + */ + net_multiplexer_.reset(CreateNetMultiplexer(queue_limit)); + net_multiplexer_->Initialize(); +} + +WorkerThread::~WorkerThread() = default; + +int WorkerThread::conn_num() const { + std::shared_lock lock(rwlock_); + return static_cast(conns_.size()); +} + +std::vector WorkerThread::conns_info() const { + std::vector result; + std::shared_lock lock(rwlock_); + for (auto& conn : conns_) { + result.push_back({conn.first, conn.second->ip_port(), conn.second->last_interaction()}); + } + return result; +} + +std::shared_ptr WorkerThread::MoveConnOut(int fd) { + std::lock_guard lock(rwlock_); + if (auto iter = conns_.find(fd); iter != conns_.end()) { + int fd = iter->first; + auto conn = iter->second; + net_multiplexer_->NetDelEvent(fd, 0); + DLOG(INFO) << "move out connection " << conn->String(); + conns_.erase(iter); + return conn; + } else { + return nullptr; + } +} + +bool WorkerThread::MoveConnIn(const std::shared_ptr& conn, const NotifyType& notify_type, bool force) { + NetItem it(conn->fd(), conn->ip_port(), notify_type); + bool success = MoveConnIn(it, force); + if (success) { + std::lock_guard lock(rwlock_); + conns_[conn->fd()] = conn; + } + return success; +} + +bool WorkerThread::MoveConnIn(const NetItem& it, bool force) { return net_multiplexer_->Register(it, force); } + +void* WorkerThread::ThreadMain() { + int nfds; + NetFiredEvent* pfe = nullptr; + char bb[2048]; + NetItem ti; + + + struct timeval when; + gettimeofday(&when, nullptr); + struct timeval now = when; + + when.tv_sec += (cron_interval_ / 1000); + when.tv_usec += ((cron_interval_ % 1000) * 1000); + int timeout = cron_interval_; + if (timeout <= 0) { + timeout = NET_CRON_INTERVAL; + } + + while (!should_stop()) { + if (cron_interval_ > 0) { + gettimeofday(&now, nullptr); + if (when.tv_sec > now.tv_sec || (when.tv_sec == now.tv_sec && when.tv_usec > now.tv_usec)) { + timeout = static_cast((when.tv_sec - now.tv_sec) * 1000 + (when.tv_usec - now.tv_usec) / 1000); + } else { + DoCronTask(); + when.tv_sec = now.tv_sec + (cron_interval_ / 1000); + when.tv_usec = now.tv_usec + ((cron_interval_ % 1000) * 1000); + timeout = cron_interval_; + } + } + + nfds = net_multiplexer_->NetPoll(timeout); + + for (int i = 0; i < nfds; i++) { + pfe = (net_multiplexer_->FiredEvents()) + i; + if (!pfe) { + continue; + } + if (pfe->fd == net_multiplexer_->NotifyReceiveFd()) { + if ((pfe->mask & kReadable) != 0) { + auto nread = static_cast(read(net_multiplexer_->NotifyReceiveFd(), bb, 2048)); + if (nread == 0) { + continue; + } else { + for (int32_t idx = 0; idx < nread; ++idx) { + NetItem ti = net_multiplexer_->NotifyQueuePop(); + if (ti.notify_type() == kNotiConnect) { + std::shared_ptr tc = conn_factory_->NewNetConn(ti.fd(), ti.ip_port(), server_thread_, + private_data_, net_multiplexer_.get()); + if (!tc || !tc->SetNonblock()) { + continue; + } + +#ifdef __ENABLE_SSL + // Create SSL failed + if (server_thread_->security() && !tc->CreateSSL(server_thread_->ssl_ctx())) { + CloseFd(tc); + continue; + } +#endif + + { + std::lock_guard lock(rwlock_); + conns_[ti.fd()] = tc; + } + net_multiplexer_->NetAddEvent(ti.fd(), kReadable); + } else if (ti.notify_type() == kNotiClose) { + // should close? + } else if (ti.notify_type() == kNotiEpollout) { + net_multiplexer_->NetModEvent(ti.fd(), 0, kWritable); + } else if (ti.notify_type() == kNotiEpollin) { + net_multiplexer_->NetModEvent(ti.fd(), 0, kReadable); + } else if (ti.notify_type() == kNotiEpolloutAndEpollin) { + net_multiplexer_->NetModEvent(ti.fd(), 0, kReadable | kWritable); + } else if (ti.notify_type() == kNotiWait) { + // do not register events + net_multiplexer_->NetAddEvent(ti.fd(), 0); + } + } + } + } else { + continue; + } + } else { + std::shared_ptr in_conn = nullptr; + int should_close = 0; + + { + std::shared_lock lock(rwlock_); + if (auto iter = conns_.find(pfe->fd); iter == conns_.end()) { + net_multiplexer_->NetDelEvent(pfe->fd, 0); + continue; + } else { + in_conn = iter->second; + } + } + + if (((pfe->mask & kWritable) != 0) && in_conn->is_reply()) { + WriteStatus write_status = in_conn->SendReply(); + in_conn->set_last_interaction(now); + if (write_status == kWriteAll) { + net_multiplexer_->NetModEvent(pfe->fd, 0, kReadable); + in_conn->set_is_reply(false); + if (in_conn->IsClose()) { + should_close = 1; + LOG(INFO) << "will close client connection " << in_conn->String(); + } + } else if (write_status == kWriteHalf) { + continue; + } else { + should_close = 1; + } + } + + if ((should_close == 0) && ((pfe->mask & kReadable) != 0)) { + ReadStatus read_status = in_conn->GetRequest(); + in_conn->set_last_interaction(now); + if (read_status == kReadAll) { + net_multiplexer_->NetModEvent(pfe->fd, 0, 0); + // Wait for the conn complete asynchronous task and + // Mod Event to kWritable + } else if (read_status == kReadHalf) { + continue; + } else { + should_close = 1; + } + } + + if (((pfe->mask & kErrorEvent) != 0) || (should_close != 0)) { + net_multiplexer_->NetDelEvent(pfe->fd, 0); + CloseFd(in_conn); + in_conn = nullptr; + { + std::lock_guard lock(rwlock_); + conns_.erase(pfe->fd); + } + should_close = 0; + } + } // connection event + } // for (int i = 0; i < nfds; i++) + } // while (!should_stop()) + + Cleanup(); + return nullptr; +} + +void WorkerThread::DoCronTask() { + struct timeval now; + gettimeofday(&now, nullptr); + std::vector> to_close; + std::vector> to_timeout; + { + std::lock_guard lock(rwlock_); + + // Check whether close all connection + std::lock_guard kl(killer_mutex_); + if (deleting_conn_ipport_.count(kKillAllConnsTask) != 0U) { + for (auto& conn : conns_) { + to_close.push_back(conn.second); + } + conns_.clear(); + deleting_conn_ipport_.clear(); + } + + auto iter = conns_.begin(); + while (iter != conns_.end()) { + std::shared_ptr conn = iter->second; + // Check connection should be closed + if (deleting_conn_ipport_.count(conn->ip_port()) != 0U) { + to_close.push_back(conn); + deleting_conn_ipport_.erase(conn->ip_port()); + iter = conns_.erase(iter); + LOG(INFO) << "will close client connection " << conn->String(); + continue; + } + + // Check keepalive timeout connection + if (keepalive_timeout_ > 0 && (now.tv_sec - conn->last_interaction().tv_sec > keepalive_timeout_)) { + auto dispatchThread = dynamic_cast(server_thread_); + std::shared_lock blrpop_map_latch(dispatchThread->GetBlockMtx()); + // check if this conn is blocked by blpop/brpop + if (dispatchThread->GetMapFromConnToKeys().find(conn->fd()) != + dispatchThread->GetMapFromConnToKeys().end()) { + //this conn is blocked, prolong it's life time. + conn->set_last_interaction(now); + } else { + to_timeout.push_back(conn); + iter = conns_.erase(iter); + LOG(INFO) << "connection " << conn->String() << " keepalive timeout, the keepalive_timeout_ is " + << keepalive_timeout_.load(); + continue; + } + } + + // Maybe resize connection buffer + conn->TryResizeBuffer(); + ++iter; + } + } + /* + * How Do we kill a conn correct: + * stage 1: stop accept new request(also give up the write back of shooting request's response) + * 1.1 remove the fd from epoll and erase it from conns_ to ensure no more request will submit to threadpool + * 1.2 add to-close-conn to wait_to_close_conns_ + * stage 2: ensure there's no other shared_ptr of this conn in pika + * 2.1 in async task that exec by TheadPool, a shared_ptr of conn will hold and my case a pipe event to tell the epoll + * to back the response, we must ensure this notification is done before we really close fd(linux will reuse the fd to accept new conn) + * 2.2 we must clear all other shared_ptr of this to-close-conn, like the map of blpop/brpop and the map of watchkeys + * 2.3 for those to-close-conns that ref count drop to 1, we add them to ready-to-close-conns_ + * stage 3: after an epoll cycle(let it handle the already-invalid-writeback-notification ), we can safely close the fds of ready_to_close_conns_ + */ + + for (auto& conn : ready_to_close_conns_) { + close(conn->fd()); + server_thread_->handle_->FdClosedHandle(conn->fd(), conn->ip_port()); + } + ready_to_close_conns_.clear(); + + for (auto conn = wait_to_close_conns_.begin(); conn != wait_to_close_conns_.end();) { + if (conn->use_count() == 1) { + ready_to_close_conns_.push_back(*conn); + conn = wait_to_close_conns_.erase(conn); + } else { + ++conn; + } + } + + for (const auto& conn : to_close) { + net_multiplexer_->NetDelEvent(conn->fd(), 0); + ClearConnsRefAndOtherInfo(conn); + wait_to_close_conns_.push_back(conn); + } + for (const auto& conn : to_timeout) { + net_multiplexer_->NetDelEvent(conn->fd(), 0); + ClearConnsRefAndOtherInfo(conn); + wait_to_close_conns_.push_back(conn); + server_thread_->handle_->FdTimeoutHandle(conn->fd(), conn->ip_port()); + } +} + +void WorkerThread::ClearConnsRefAndOtherInfo(const std::shared_ptr& conn) { + if (auto dispatcher = dynamic_cast(server_thread_); dispatcher != nullptr ) { + //check if this conn disconnected from being blocked by blpop/brpop + dispatcher->ClosingConnCheckForBlrPop(std::dynamic_pointer_cast(conn)); + dispatcher->RemoveWatchKeys(conn); + } +} + +bool WorkerThread::TryKillConn(const std::string& ip_port) { + bool find = false; + if (ip_port != kKillAllConnsTask) { + std::shared_lock l(rwlock_); + for (auto& [_, conn] : conns_) { + if (conn->ip_port() == ip_port) { + find = true; + break; + } + } + } + if (find || ip_port == kKillAllConnsTask) { + std::lock_guard l(killer_mutex_); + deleting_conn_ipport_.insert(ip_port); + return true; + } + return false; +} + +void WorkerThread::CloseFd(const std::shared_ptr& conn) { + ClearConnsRefAndOtherInfo(conn); + close(conn->fd()); + server_thread_->handle_->FdClosedHandle(conn->fd(), conn->ip_port()); +} + +void WorkerThread::Cleanup() { + std::map> to_close; + { + std::lock_guard l(rwlock_); + to_close = std::move(conns_); + conns_.clear(); + } + for (const auto& iter : to_close) { + CloseFd(iter.second); + } +} + +}; // namespace net diff --git a/tools/pika_migrate/src/net/src/worker_thread.h b/tools/pika_migrate/src/net/src/worker_thread.h new file mode 100644 index 0000000000..47bab0091a --- /dev/null +++ b/tools/pika_migrate/src/net/src/worker_thread.h @@ -0,0 +1,87 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef NET_SRC_WORKER_THREAD_H_ +#define NET_SRC_WORKER_THREAD_H_ + +#include +#include +#include +#include +#include +#include + +#include "pstd/include/pstd_mutex.h" +#include "pstd/include/xdebug.h" +#include "net/include/net_define.h" +#include "net/include/net_thread.h" +#include "net/include/server_thread.h" +#include "net/src/net_multiplexer.h" +#include "net/src/dispatch_thread.h" +namespace net { + +class NetItem; +class NetFiredEvent; +class NetConn; +class ConnFactory; + +class WorkerThread : public Thread { + public: + explicit WorkerThread(ConnFactory* conn_factory, ServerThread* server_thread, int queue_limit, int cron_interval = 0); + + ~WorkerThread() override; + + void set_keepalive_timeout(int timeout) { keepalive_timeout_ = timeout; } + + int conn_num() const; + + std::vector conns_info() const; + + std::shared_ptr MoveConnOut(int fd); + + bool MoveConnIn(const std::shared_ptr& conn, const NotifyType& notify_type, bool force); + + bool MoveConnIn(const NetItem& it, bool force); + + NetMultiplexer* net_multiplexer() { return net_multiplexer_.get(); } + bool TryKillConn(const std::string& ip_port); + + void ClearConnsRefAndOtherInfo(const std::shared_ptr& conn); + + ServerThread* GetServerThread() { return server_thread_; } + + mutable pstd::RWMutex rwlock_; /* For external statistics */ + std::map> conns_; + std::vector> wait_to_close_conns_; + std::vector> ready_to_close_conns_; + + + void* private_data_ = nullptr; + + private: + ServerThread* server_thread_ = nullptr; + ConnFactory* conn_factory_ = nullptr; + int cron_interval_ = 0; + + /* + * The epoll handler + */ + std::unique_ptr net_multiplexer_; + + std::atomic keepalive_timeout_; // keepalive second + + void* ThreadMain() override; + void DoCronTask(); + + pstd::Mutex killer_mutex_; + std::set deleting_conn_ipport_; + + // clean conns + void CloseFd(const std::shared_ptr& conn); + void Cleanup(); +}; // class WorkerThread + +} // namespace net +#endif // NET_SRC_WORKER_THREAD_H_ diff --git a/tools/pika_migrate/src/net/test/CMakeLists.txt b/tools/pika_migrate/src/net/test/CMakeLists.txt new file mode 100644 index 0000000000..32a528095d --- /dev/null +++ b/tools/pika_migrate/src/net/test/CMakeLists.txt @@ -0,0 +1,36 @@ +cmake_minimum_required(VERSION 3.18) + +include(GoogleTest) +aux_source_directory(../src DIR_SRCS) +set(CMAKE_CXX_STANDARD 17) + +file(GLOB_RECURSE NET_TEST_SOURCE "${PROJECT_SOURCE_DIR}/test/*.cc") + + +foreach(net_test_source ${NET_TEST_SOURCE}) + get_filename_component(net_test_filename ${net_test_source} NAME) + string(REPLACE ".cc" "" net_test_name ${net_test_filename}) + + + add_executable(${net_test_name} ${net_test_source}) + target_include_directories(${net_test_name} + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + add_dependencies(${net_test_name} net gtest glog gflags ${LIBUNWIND_NAME} pstd) + target_link_libraries(${net_test_name} + PUBLIC net + PUBLIC ${GTEST_LIBRARY} + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + PUBLIC ${GMOCK_LIBRARY} + PUBLIC ${GTEST_MAIN_LIBRARY} + PUBLIC pstd + ) + add_test(NAME ${net_test_name} + COMMAND ${net_test_name} + WORKING_DIRECTORY .) +endforeach() \ No newline at end of file diff --git a/tools/pika_migrate/src/net/test/net_thread_test.cc b/tools/pika_migrate/src/net/test/net_thread_test.cc new file mode 100644 index 0000000000..0859dbc085 --- /dev/null +++ b/tools/pika_migrate/src/net/test/net_thread_test.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "net/include/net_thread.h" + +#include +#include + +#include "gmock/gmock.h" + +using ::testing::AtLeast; +using ::testing::Invoke; + +class MockThread : public net::Thread { + public: + MOCK_METHOD0(ThreadMain, void*()); + + void* thread_loop() { + while (!should_stop()) { + usleep(500); + } + return nullptr; + } +}; + +TEST(NetThreadTest, ThreadOps) { + MockThread t; + EXPECT_CALL(t, ThreadMain()).Times(AtLeast(1)); + + ON_CALL(t, ThreadMain()).WillByDefault(Invoke(&t, &MockThread::thread_loop)); + + EXPECT_EQ(0, t.StartThread()); + + EXPECT_EQ(true, t.is_running()); + + EXPECT_EQ(false, t.should_stop()); + + EXPECT_EQ(0, t.StopThread()); + + EXPECT_EQ(true, t.should_stop()); + + EXPECT_EQ(false, t.is_running()); +} diff --git a/tools/pika_migrate/src/pika.cc b/tools/pika_migrate/src/pika.cc new file mode 100644 index 0000000000..a530e3fbda --- /dev/null +++ b/tools/pika_migrate/src/pika.cc @@ -0,0 +1,258 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "net/include/net_stats.h" +#include "pstd/include/pika_codis_slot.h" +#include "include/pika_define.h" +#include "pstd/include/pstd_defer.h" +#include "include/pika_conf.h" +#include "pstd/include/env.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_slot_command.h" +#include "include/build_version.h" +#include "include/pika_command.h" +#include "include/pika_server.h" +#include "include/pika_version.h" +#include "include/pika_rm.h" + +std::unique_ptr g_pika_conf; +// todo : change to unique_ptr will coredump +PikaServer* g_pika_server = nullptr; +std::unique_ptr g_pika_rm; + +std::unique_ptr g_pika_cmd_table_manager; + +extern std::unique_ptr g_network_statistic; + +static void version() { + char version[32]; + snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, PIKA_MINOR, PIKA_PATCH); + std::cout << "-----------Pika server----------" << std::endl; + std::cout << "pika_version: " << version << std::endl; + std::cout << pika_build_git_sha << std::endl; + std::cout << "pika_build_compile_date: " << pika_build_compile_date << std::endl; + // fake version for client SDK + std::cout << "redis_version: " << version << std::endl; +} + +static void PrintPikaLogo() { + printf(" ............. .... ..... ..... ..... \n" + " ################# #### ##### ##### ####### \n" + " #### ##### #### ##### ##### ######### \n" + " #### ##### #### ##### ##### #### ##### \n" + " #### ##### #### ##### ##### #### ##### \n" + " ################ #### ##### ##### #### ##### \n" + " #### #### ##### ##### ################# \n" + " #### #### ##### ###### ##### ##### \n" + " #### #### ##### ###### ##### ##### \n"); +} + +static void PikaConfInit(const std::string& path) { + printf("path : %s\n", path.c_str()); + g_pika_conf = std::make_unique(path); + if (g_pika_conf->Load() != 0) { + LOG(FATAL) << "pika load conf error"; + } + version(); + printf("-----------Pika config list----------\n"); + g_pika_conf->DumpConf(); + PrintPikaLogo(); + printf("-----------Pika config end----------\n"); +} + +static void PikaGlogInit() { + if (!pstd::FileExists(g_pika_conf->log_path())) { + pstd::CreatePath(g_pika_conf->log_path()); + } + + if (!g_pika_conf->daemonize()) { + FLAGS_alsologtostderr = true; + } + FLAGS_log_dir = g_pika_conf->log_path(); + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("pika"); +} + +static void daemonize() { + if (fork()) { + exit(0); /* parent exits */ + } + setsid(); /* create a new session */ +} + +static void close_std() { + int fd; + if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { + dup2(fd, STDIN_FILENO); + dup2(fd, STDOUT_FILENO); + dup2(fd, STDERR_FILENO); + close(fd); + } +} + +static void create_pid_file() { + /* Try to write the pid file in a best-effort way. */ + std::string path(g_pika_conf->pidfile()); + + size_t pos = path.find_last_of('/'); + if (pos != std::string::npos) { + pstd::CreateDir(path.substr(0, pos)); + } else { + path = kPikaPidFile; + } + + FILE* fp = fopen(path.c_str(), "w"); + if (fp) { + fprintf(fp, "%d\n", static_cast(getpid())); + fclose(fp); + } +} + +static void IntSigHandle(const int sig) { + LOG(INFO) << "Catch Signal " << sig << ", cleanup..."; + g_pika_server->Exit(); +} + +static void PikaSignalSetup() { + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + signal(SIGINT, &IntSigHandle); + signal(SIGQUIT, &IntSigHandle); + signal(SIGTERM, &IntSigHandle); +} + +static void usage() { + char version[32]; + snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, PIKA_MINOR, PIKA_PATCH); + fprintf(stderr, + "Pika module %s\n" + "usage: pika [-hv] [-c conf/file]\n" + "\t-h -- show this help\n" + "\t-c conf/file -- config file \n" + "\t-v -- show version\n" + " example: ./output/bin/pika -c ./conf/pika.conf\n", + version); +} + +int main(int argc, char* argv[]) { + if (argc != 2 && argc != 3) { + usage(); + exit(-1); + } + + bool path_opt = false; + signed char c; + char path[1024]; + while (-1 != (c = static_cast(getopt(argc, argv, "c:hv")))) { + switch (c) { + case 'c': + snprintf(path, 1024, "%s", optarg); + path_opt = true; + break; + case 'h': + usage(); + return 0; + case 'v': + version(); + return 0; + default: + usage(); + return 0; + } + } + + if (!path_opt) { + fprintf(stderr, "Please specify the conf file path\n"); + usage(); + exit(-1); + } + g_pika_cmd_table_manager = std::make_unique(); + g_pika_cmd_table_manager->InitCmdTable(); + PikaConfInit(path); + + rlimit limit; + rlim_t maxfiles = g_pika_conf->maxclients() + PIKA_MIN_RESERVED_FDS; + if (getrlimit(RLIMIT_NOFILE, &limit) == -1) { + LOG(WARNING) << "getrlimit error: " << strerror(errno); + } else if (limit.rlim_cur < maxfiles) { + rlim_t old_limit = limit.rlim_cur; + limit.rlim_cur = maxfiles; + limit.rlim_max = maxfiles; + if (setrlimit(RLIMIT_NOFILE, &limit) != -1) { + LOG(WARNING) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; + } else { + LOG(FATAL) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) + << "), do it by yourself"; + } + } + + // daemonize if needed + if (g_pika_conf->daemonize()) { + daemonize(); + create_pid_file(); + } + + PikaGlogInit(); + PikaSignalSetup(); + + LOG(INFO) << "Server at: " << path; + g_pika_server = new PikaServer(); + g_pika_rm = std::make_unique(); + g_network_statistic = std::make_unique(); + g_pika_server->InitDBStruct(); + //the cmd table of g_pika_cmd_table_manager must be inited before calling PikaServer::InitStatistic(CmdTable* ) + g_pika_server->InitStatistic(g_pika_cmd_table_manager->GetCmdTable()); + auto status = g_pika_server->InitAcl(); + if (!status.ok()) { + LOG(FATAL) << status.ToString(); + } + + if (g_pika_conf->daemonize()) { + close_std(); + } + + DEFER { + delete g_pika_server; + g_pika_server = nullptr; + g_pika_rm.reset(); + g_pika_cmd_table_manager.reset(); + g_network_statistic.reset(); + ::google::ShutdownGoogleLogging(); + g_pika_conf.reset(); + }; + + // wash data if necessary + if (g_pika_conf->wash_data()) { + auto dbs = g_pika_server->GetDB(); + for (auto& kv : dbs) { + if (!kv.second->WashData()) { + LOG(FATAL) << "write batch error in WashData"; + return 1; + } + } + } + + g_pika_rm->Start(); + g_pika_server->Start(); + + if (g_pika_conf->daemonize()) { + unlink(g_pika_conf->pidfile().c_str()); + } + + // stop PikaReplicaManager first,avoid internal threads + // may reference to dead PikaServer + g_pika_rm->Stop(); + + return 0; +} diff --git a/tools/pika_migrate/src/pika_acl.cc b/tools/pika_migrate/src/pika_acl.cc new file mode 100644 index 0000000000..b6fe3375b7 --- /dev/null +++ b/tools/pika_migrate/src/pika_acl.cc @@ -0,0 +1,328 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/pika_acl.h" +#include "include/pika_client_conn.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" + +const static int AclGenPassMaxBit = 4096; + +extern std::unique_ptr g_pika_cmd_table_manager; + +void PikaAclCmd::Do() { + if (subCmd_ == "cat") { + Cat(); + } else if (subCmd_ == "deluser") { + DelUser(); + } else if (subCmd_ == "dryrun") { + DryRun(); + } else if (subCmd_ == "genpass") { + GenPass(); + } else if (subCmd_ == "getuser") { + GetUser(); + } else if (subCmd_ == "list") { + List(); + } else if (subCmd_ == "load") { + Load(); + } else if (subCmd_ == "log") { + Log(); + } else if (subCmd_ == "save") { + Save(); + } else if (subCmd_ == "setuser") { + SetUser(); + } else if (subCmd_ == "users") { + Users(); + } else if (subCmd_ == "whoami") { + WhoAmI(); + } else if (subCmd_ == "help") { + Help(); + } else { + res_.SetRes(CmdRes::kSyntaxErr, KCmdNameAcl); + return; + } +} + +void PikaAclCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, KCmdNameAcl); + return; + } + + subCmd_ = argv_[1]; + pstd::StringToLower(subCmd_); + + if (argv_.size() < 3) { + if (subCmd_ == "setuser" || subCmd_ == "deluser" || subCmd_ == "getuser") { + res_.SetRes(CmdRes::kWrongNum, fmt::format("'acl|{}'", subCmd_)); + return; + } + } + + if (subCmd_ == "dryrun" && argv_.size() < 4) { + res_.SetRes(CmdRes::kWrongNum, "'acl|dryrun'"); + return; + } + if (subCmd_ == "log" && argv_.size() != 2 && argv_.size() != 3) { + res_.SetRes(CmdRes::kWrongNum, "'acl|log'"); + return; + } + + if (subCmd_ == "save" || subCmd_ == "load") { + if (g_pika_conf->acl_file().empty()) { + res().SetRes(CmdRes::kErrOther, + "This Pika is not configured to use an ACL file. You may want to specify users via the " + "ACL SETUSER command and then issue a CONFIG REWRITE (assuming you have a Redis configuration file " + "set) in order to store users in the Pika configuration."); + return; + } + } +} + +void PikaAclCmd::Cat() { + if (argv_.size() > 3) { + res().SetRes(CmdRes::kErrOther, "unknown subcommand or wrong number of arguments for 'CAT'"); + return; + } + if (argv_.size() == 2) { + res().AppendStringVector(Acl::GetAllCategoryName()); + return; + } + auto category = Acl::GetCommandCategoryFlagByName(argv_[2]); + if (category == 0) { + res().SetRes(CmdRes::kErrOther, fmt::format("Unknown category '{}'", argv_[2])); + return; + } + res().AppendStringVector(g_pika_cmd_table_manager->GetAclCategoryCmdNames(category)); +} + +void PikaAclCmd::DelUser() { + for (auto it = argv_.begin() + 2; it != argv_.end(); ++it) { + if (it->data() == Acl::DefaultUser) { + res().SetRes(CmdRes::kErrOther, "The 'default' user cannot be removed"); + return; + } + if (it->data() == Acl::DefaultLimitUser) { + res().SetRes(CmdRes::kErrOther, "The 'limit' user cannot be removed"); + return; + } + } + + std::vector userNames(argv_.begin() + 2, argv_.end()); + auto delUserNames = g_pika_server->Acl()->DeleteUser(userNames); + res().AppendInteger(static_cast(delUserNames.size())); + + g_pika_server->AllClientUnAuth(delUserNames); +} + +void PikaAclCmd::DryRun() { + auto user = g_pika_server->Acl()->GetUserLock(argv_[2]); + + if (!user) { + res().SetRes(CmdRes::kErrOther, fmt::format("User '{}' not found", argv_[2])); + return; + } + auto cmd = g_pika_cmd_table_manager->GetCmd(argv_[3]); + + if (!cmd) { + res().SetRes(CmdRes::kErrOther, fmt::format("Command '{}' not found", argv_[3])); + return; + } + + PikaCmdArgsType args; + if (argv_.size() > 4) { + args = PikaCmdArgsType(argv_.begin() + 3, argv_.end()); + } + if (!cmd->CheckArg(args.size())) { + res().SetRes(CmdRes::kWrongNum, cmd->name()); + return; + } + + int8_t subCmdIndex = -1; + AclDeniedCmd checkRes = user->CheckUserPermission(cmd, args, subCmdIndex, nullptr); + + switch (checkRes) { + case AclDeniedCmd::OK: + res().SetRes(CmdRes::kOk); + break; + case AclDeniedCmd::CMD: + res().SetRes(CmdRes::kErrOther, + cmd->HasSubCommand() + ? fmt::format("This user has no permissions to run the '{}|{}' command", argv_[3], argv_[4]) + : fmt::format("This user has no permissions to run the '{}' command", argv_[3])); + break; + case AclDeniedCmd::KEY: + res().SetRes(CmdRes::kErrOther, + cmd->HasSubCommand() + ? fmt::format("This user has no permissions to run the '{}|{}' key", argv_[3], argv_[4]) + : fmt::format("This user has no permissions to run the '{}' key", argv_[3])); + break; + case AclDeniedCmd::CHANNEL: + res().SetRes(CmdRes::kErrOther, + cmd->HasSubCommand() + ? fmt::format("This user has no permissions to run the '{}|{}' channel", argv_[3], argv_[4]) + : fmt::format("This user has no permissions to run the '{}' channel", argv_[3])); + break; + case AclDeniedCmd::NUMBER: + res().SetRes(CmdRes::kErrOther, fmt::format("wrong number of arguments for '{}' command", argv_[3])); + break; + default: + break; + } +} + +void PikaAclCmd::GenPass() { + int bits = 256; + if (argv_.size() > 2) { + try { + bits = std::stoi(argv_[2]); + } catch (std::exception& e) { + res().SetRes(CmdRes::kErrOther, fmt::format("Invalid bits value: {}", argv_[2])); + return; + } + } + + if (bits <= 0 || bits > AclGenPassMaxBit) { + res().SetRes( + CmdRes::kErrOther, + fmt::format( + "ACL GENPASS argument must be the number of bits for the output password, a positive number up to 4096 {}", + bits)); + return; + } + + std::string pass = pstd::getRandomHexChars((bits + 3) / 4); + res().AppendString(pass); +} + +void PikaAclCmd::GetUser() { + auto user = g_pika_server->Acl()->GetUserLock(argv_[2]); + + if (!user) { + res().AppendStringLen(-1); + return; + } + + user->GetUserDescribe(&res_); +} + +void PikaAclCmd::List() { + std::vector result; + g_pika_server->Acl()->DescribeAllUser(&result); + + res().AppendStringVector(result); +} + +void PikaAclCmd::Load() { + std::set toUnAuthUsers; + auto status = g_pika_server->Acl()->LoadUserFromFile(&toUnAuthUsers); + if (status.ok()) { + res().SetRes(CmdRes::kOk); + g_pika_server->AllClientUnAuth(toUnAuthUsers); + return; + } + + res().SetRes(CmdRes::kErrOther, status.ToString()); +} + +void PikaAclCmd::Log() { + if (argv_.size() == 2) { + g_pika_server->Acl()->GetLog(-1, &res_); + return; + } + + long count = 0; + if (!strcasecmp(argv_[2].data(), "reset")) { + g_pika_server->Acl()->ResetLog(); + res().SetRes(CmdRes::kOk); + return; + } + if (!pstd::string2int(argv_[2].data(), argv_[2].size(), &count)) { + res().SetRes(CmdRes::kErrOther, fmt::format("Invalid count value: {}", argv_[2])); + return; + } + + g_pika_server->Acl()->GetLog(count, &res_); +} + +void PikaAclCmd::Save() { + auto status = g_pika_server->Acl()->SaveToFile(); + + if (status.ok()) { + res().SetRes(CmdRes::kOk); + } else { + res().SetRes(CmdRes::kErrOther, status.ToString()); + } +} + +void PikaAclCmd::SetUser() { + std::vector rule; + if (argv_.size() > 3) { + rule = std::vector(argv_.begin() + 3, argv_.end()); + } + + if (pstd::isspace(argv_[2])) { + res().SetRes(CmdRes::kErrOther, "Usernames can't contain spaces or null characters"); + return; + } + auto status = g_pika_server->Acl()->SetUser(argv_[2], rule); + if (status.ok()) { + res().SetRes(CmdRes::kOk); + return; + } + LOG(ERROR) << "ACL SETUSER modifier " + status.ToString(); + res().SetRes(CmdRes::kErrOther, "ACL SETUSER modifier " + status.ToString()); +} + +void PikaAclCmd::Users() { res().AppendStringVector(g_pika_server->Acl()->Users()); } + +void PikaAclCmd::WhoAmI() { + std::shared_ptr conn = std::dynamic_pointer_cast(GetConn()); + auto name = conn->UserName(); + + if (name.empty()) { + res().AppendString(Acl::DefaultUser); + } else { + res().AppendString(name); + } +} + +void PikaAclCmd::Help() { + if (argv_.size() > 2) { + res().SetRes(CmdRes::kWrongNum, "acl|help"); + return; + } + const std::vector info = { + "CAT []", + " List all commands that belong to , or all command categories", + " when no category is specified.", + "DELUSER [ ...]", + " Delete a list of users.", + "DRYRUN [ ...]", + " Returns whether the user can execute the given command without executing the command.", + "GETUSER ", + " Get the user's details.", + "GENPASS []", + " Generate a secure 256-bit user password. The optional `bits` argument can", + " be used to specify a different size.", + "LIST", + " Show users details in config file format.", + "LOAD", + " Reload users from the ACL file.", + "LOG [ | RESET]", + " Show the ACL log entries.", + "SAVE", + " Save the current config to the ACL file.", + "SETUSER [ ...]", + " Create or modify a user with the specified attributes.", + "USERS", + " List all the registered usernames.", + "WHOAMI", + " Return the current connection username."}; + + res().AppendStringVector(info); +} diff --git a/tools/pika_migrate/src/pika_admin.cc b/tools/pika_migrate/src/pika_admin.cc new file mode 100644 index 0000000000..3c0cf13b11 --- /dev/null +++ b/tools/pika_migrate/src/pika_admin.cc @@ -0,0 +1,3766 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_admin.h" + +#include +#include +#include + +#include +#include + +#include + +#include "include/build_version.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_version.h" +#include "include/pika_conf.h" +#include "pstd/include/rsync.h" +#include "include/throttle.h" +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +static std::string ConstructPinginPubSubResp(const PikaCmdArgsType& argv) { + if (argv.size() > 2) { + return "-ERR wrong number of arguments for " + kCmdNamePing + " command\r\n"; + } + std::stringstream resp; + + resp << "*2\r\n" + << "$4\r\n" + << "pong\r\n"; + if (argv.size() == 2) { + resp << "$" << argv[1].size() << "\r\n" << argv[1] << "\r\n"; + } else { + resp << "$0\r\n\r\n"; + } + return resp.str(); +} + +static double MethodofCommandStatistics(const uint64_t time_consuming, const uint64_t frequency) { + return (static_cast(time_consuming) / 1000.0) / static_cast(frequency); +} + +static double MethodofTotalTimeCalculation(const uint64_t time_consuming) { + return static_cast(time_consuming) / 1000.0; +} + +enum AuthResult { + OK, + INVALID_PASSWORD, + NO_REQUIRE_PASS, + INVALID_CONN, +}; + +static AuthResult AuthenticateUser(const std::string& cmdName, const std::string& userName, const std::string& pwd, + const std::shared_ptr& conn, bool defaultAuth) { + if (defaultAuth) { + auto defaultUser = g_pika_server->Acl()->GetUserLock(Acl::DefaultUser); + if (defaultUser->HasFlags(static_cast(AclUserFlag::NO_PASS))) { + return AuthResult::NO_REQUIRE_PASS; + } + } + + auto user = g_pika_server->Acl()->Auth(userName, pwd); + + if (!user) { + std::string cInfo; + if (auto ptr = std::dynamic_pointer_cast(conn); ptr) { + ptr->ClientInfoToString(&cInfo, cmdName); + } + g_pika_server->Acl()->AddLogEntry(static_cast(AclDeniedCmd::NO_AUTH), + static_cast(AclLogCtx::TOPLEVEL), userName, "AUTH", cInfo); + return AuthResult::INVALID_PASSWORD; + } + + if (!conn) { + LOG(WARNING) << " weak ptr is empty"; + return AuthResult::INVALID_CONN; + } + std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); + + cli_conn->DoAuth(user); + + return AuthResult::OK; +} + +/* + * slaveof no one + * slaveof ip port + * slaveof ip port force + */ +void SlaveofCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlaveof); + return; + } + + if (argv_.size() > 4) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlaveof); + return; + } + + if (argv_.size() == 3 && (strcasecmp(argv_[1].data(), "no") == 0) && (strcasecmp(argv_[2].data(), "one") == 0)) { + is_none_ = true; + return; + } + // self is master of A , want to slaveof B + if ((g_pika_server->role() & PIKA_ROLE_MASTER) != 0) { + res_.SetRes(CmdRes::kErrOther, "already master of others, invalid usage"); + return; + } + + master_ip_ = argv_[1]; + std::string str_master_port = argv_[2]; + if ((pstd::string2int(str_master_port.data(), str_master_port.size(), &master_port_) == 0) || master_port_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if ((pstd::StringToLower(master_ip_) == "localhost" || master_ip_ == "127.0.0.1" || master_ip_ == g_pika_server->host()) && master_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "The master ip:port and the slave ip:port are the same"); + return; + } + + if (argv_.size() == 4) { + if (strcasecmp(argv_[3].data(), "force") == 0) { + g_pika_server->SetForceFullSync(true); + } else { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlaveof); + } + } +} + +void SlaveofCmd::Do() { + // Check if we are already connected to the specified master + if ((master_ip_ == "127.0.0.1" || g_pika_server->master_ip() == master_ip_) && + g_pika_server->master_port() == master_port_) { + res_.SetRes(CmdRes::kOk); + return; + } + + g_pika_server->RemoveMaster(); + + if (is_none_) { + res_.SetRes(CmdRes::kOk); + g_pika_conf->SetSlaveof(std::string()); + return; + } + + /* The return value of the slaveof command OK does not really represent whether + * the data synchronization was successful, but only changes the status of the + * slaveof executor to slave */ + + bool sm_ret = g_pika_server->SetMaster(master_ip_, static_cast(master_port_)); + + if (sm_ret) { + res_.SetRes(CmdRes::kOk); + g_pika_server->ClearCacheDbAsync(db_); + g_pika_conf->SetSlaveof(master_ip_ + ":" + std::to_string(master_port_)); + g_pika_server->SetFirstMetaSync(true); + } else { + res_.SetRes(CmdRes::kErrOther, "Server is not in correct state for slaveof"); + } +} + +/* + * dbslaveof db[0 ~ 7] + * dbslaveof db[0 ~ 7] force + * dbslaveof db[0 ~ 7] no one + * dbslaveof db[0 ~ 7] filenum offset + */ +void DbSlaveofCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDbSlaveof); + return; + } + if (((g_pika_server->role() ^ PIKA_ROLE_SLAVE) != 0) || !g_pika_server->MetaSyncDone()) { + res_.SetRes(CmdRes::kErrOther, "Not currently a slave"); + return; + } + + if (argv_.size() > 4) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDbSlaveof); + return; + } + + db_name_ = argv_[1]; + if (!g_pika_server->IsDBExist(db_name_)) { + res_.SetRes(CmdRes::kErrOther, "Invaild db name"); + return; + } + + if (argv_.size() == 3 && (strcasecmp(argv_[2].data(), "force") == 0)) { + force_sync_ = true; + return; + } + + if (argv_.size() == 4) { + if ((strcasecmp(argv_[2].data(), "no") == 0) && (strcasecmp(argv_[3].data(), "one") == 0)) { + is_none_ = true; + return; + } + + if ((pstd::string2int(argv_[2].data(), argv_[2].size(), &filenum_) == 0) || filenum_ < 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if ((pstd::string2int(argv_[3].data(), argv_[3].size(), &offset_) == 0) || offset_ < 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + have_offset_ = true; + } +} + +void DbSlaveofCmd::Do() { + std::shared_ptr slave_db = g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name_)); + if (!slave_db) { + res_.SetRes(CmdRes::kErrOther, "Db not found"); + return; + } + + Status s; + if (is_none_) { + s = g_pika_rm->SendRemoveSlaveNodeRequest(db_name_); + } else { + if (slave_db->State() == ReplState::kNoConnect || slave_db->State() == ReplState::kError || + slave_db->State() == ReplState::kDBNoConnect) { + if (have_offset_) { + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + db->Logger()->SetProducerStatus(filenum_, offset_); + } + ReplState state = force_sync_ ? ReplState::kTryDBSync : ReplState::kTryConnect; + s = g_pika_rm->ActivateSyncSlaveDB( + RmNode(g_pika_server->master_ip(), g_pika_server->master_port(), db_name_, 0), state); + } + } + + if (s.ok()) { + res_.SetRes(CmdRes::kOk); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void AuthCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameAuth); + return; + } +} + +void AuthCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNamePing); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + + std::string userName = ""; + std::string pwd = ""; + bool defaultAuth = false; + if (argv_.size() == 2) { + pwd = argv_[1]; +// defaultAuth = true; + } else { + userName = argv_[1]; + pwd = argv_[2]; + } + + AuthResult authResult; + if (userName == "") { + // default + authResult = AuthenticateUser(name(), Acl::DefaultUser, pwd, conn, true); + if (authResult != AuthResult::OK && authResult != AuthResult::NO_REQUIRE_PASS) { + // Limit + authResult = AuthenticateUser(name(), Acl::DefaultLimitUser, pwd, conn, defaultAuth); + } + } else { + authResult = AuthenticateUser(name(), userName, pwd, conn, defaultAuth); + } + + switch (authResult) { + case AuthResult::INVALID_CONN: + res_.SetRes(CmdRes::kErrOther, kCmdNamePing); + return; + case AuthResult::INVALID_PASSWORD: + res_.AppendContent("-WRONGPASS invalid username-password pair or user is disabled."); + return; + case AuthResult::NO_REQUIRE_PASS: + res_.SetRes(CmdRes::kErrOther, "Client sent AUTH, but no password is set"); + return; + case AuthResult::OK: + break; + } + res_.SetRes(CmdRes::kOk); +} + +void BgsaveCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBgsave); + return; + } + if (argv_.size() == 2) { + std::vector dbs; + pstd::StringSplit(argv_[1], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); + return; + } else { + bgsave_dbs_.insert(db); + } + } + } else { + bgsave_dbs_ = g_pika_server->GetAllDBName(); + } +} + +void BgsaveCmd::Do() { + g_pika_server->DoSameThingSpecificDB(bgsave_dbs_, {TaskType::kBgSave}); + LogCommand(); + res_.AppendContent("+Background saving started"); +} + +void CompactCmd::DoInitial() { + if (!CheckArg(argv_.size()) || argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameCompact); + return; + } + + if (g_pika_server->IsKeyScaning()) { + res_.SetRes(CmdRes::kErrOther, "The info keyspace operation is executing, Try again later"); + return; + } + + if (argv_.size() == 1) { + compact_dbs_ = g_pika_server->GetAllDBName(); + } else if (argv_.size() == 2) { + std::vector dbs; + pstd::StringSplit(argv_[1], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); + return; + } else { + compact_dbs_.insert(db); + } + } + } +} + +/* + * Because meta-CF stores the meta information of all data structures, + * the compact operation can only operate on all data types without + * specifying data types + */ +void CompactCmd::Do() { + g_pika_server->DoSameThingSpecificDB(compact_dbs_, {TaskType::kCompactAll}); + LogCommand(); + res_.SetRes(CmdRes::kOk); +} + +void CompactRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameCompactRange); + return; + } + + if (g_pika_server->IsKeyScaning()) { + res_.SetRes(CmdRes::kErrOther, "The info keyspace operation is executing, Try again later"); + return; + } + + std::vector dbs; + pstd::StringSplit(argv_[1], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); + return; + } else { + compact_dbs_.insert(db); + } + } + start_key_ = argv_[2]; + end_key_ = argv_[3]; +} + +void CompactRangeCmd::Do() { + g_pika_server->DoSameThingSpecificDB(compact_dbs_, {TaskType::kCompactRangeAll, {start_key_, end_key_}}); + LogCommand(); + res_.SetRes(CmdRes::kOk); +} + +void PurgelogstoCmd::DoInitial() { + if (!CheckArg(argv_.size()) || argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePurgelogsto); + return; + } + std::string filename = argv_[1]; + if (filename.size() <= kBinlogPrefixLen || kBinlogPrefix != filename.substr(0, kBinlogPrefixLen)) { + res_.SetRes(CmdRes::kInvalidParameter); + return; + } + std::string str_num = filename.substr(kBinlogPrefixLen); + int64_t num = 0; + if ((pstd::string2int(str_num.data(), str_num.size(), &num) == 0) || num < 0) { + res_.SetRes(CmdRes::kInvalidParameter); + return; + } + num_ = num; + + db_ = (argv_.size() == 3) ? argv_[2] : g_pika_conf->default_db(); + if (!g_pika_server->IsDBExist(db_)) { + res_.SetRes(CmdRes::kInvalidDB, db_); + return; + } +} + +void PurgelogstoCmd::Do() { + std::shared_ptr sync_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_)); + if (!sync_db) { + res_.SetRes(CmdRes::kErrOther, "DB not found"); + } else { + sync_db->StableLogger()->PurgeStableLogs(num_, true); + res_.SetRes(CmdRes::kOk); + } +} + +void PingCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePing); + return; + } +} + +void PingCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNamePing); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); + + if (cli_conn->IsPubSub()) { + return res_.SetRes(CmdRes::kNone, ConstructPinginPubSubResp(argv_)); + } + res_.SetRes(CmdRes::kPong); +} +void SelectCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSelect); + return; + } + db_name_ = "db" + argv_[1]; + db_ = g_pika_server->GetDB(db_name_); + sync_db_ = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + int index = atoi(argv_[1].data()); + if (std::to_string(index) != argv_[1]) { + res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect); + return; + } + if (index < 0 || index >= g_pika_conf->databases()) { + res_.SetRes(CmdRes::kInvalidIndex, kCmdNameSelect + " DB index is out of range"); + return; + } + if (db_ == nullptr || sync_db_ == nullptr) { + res_.SetRes(CmdRes::kInvalidDB, kCmdNameSelect); + return; + } +} + +void SelectCmd::Do() { + std::shared_ptr conn = std::dynamic_pointer_cast(GetConn()); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNameSelect); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + conn->SetCurrentDb(db_name_); + res_.SetRes(CmdRes::kOk); +} + +void FlushallCmd::DoInitial() { + flushall_succeed_ = false; + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameFlushall); + return; + } +} + +void FlushallCmd::Do() { + std::lock_guard l_trw(g_pika_server->GetDBLock()); + for (const auto& db_item : g_pika_server->GetDB()) { + if (db_item.second->IsKeyScaning()) { + res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); + return; + } + } + g_pika_rm->DBLock(); + for (const auto& db_item : g_pika_server->GetDB()) { + db_item.second->DBLock(); + } + flushall_succeed_ = FlushAllWithoutLock(); + for (const auto& db_item : g_pika_server->GetDB()) { + db_item.second->DBUnlock(); + } + g_pika_rm->DBUnlock(); + if (flushall_succeed_) { + res_.SetRes(CmdRes::kOk); + } else if (res_.ret() == CmdRes::kErrOther){ + //flushdb failed and the res_ was set + } else { + //flushall failed, but res_ was not set + res_.SetRes(CmdRes::kErrOther, + "Flushall failed, maybe only some of the dbs successfully flushed while some not, check WARNING/ERROR log to know " + "more, you can try again moment later"); + } +} + +void FlushallCmd::DoThroughDB() { + Do(); +} + +void FlushallCmd::DoFlushCache(std::shared_ptr db) { + // clear cache + if (PIKA_CACHE_NONE != g_pika_conf->cache_mode()) { + g_pika_server->ClearCacheDbAsync(std::move(db)); + } +} + +bool FlushallCmd::FlushAllWithoutLock() { + for (const auto& db_item : g_pika_server->GetDB()) { + std::shared_ptr db = db_item.second; + DBInfo p_info(db->GetDBName()); + if (g_pika_rm->GetSyncMasterDBs().find(p_info) == g_pika_rm->GetSyncMasterDBs().end()) { + LOG(ERROR) << p_info.db_name_ + " not found when flushall db"; + res_.SetRes(CmdRes::kErrOther,p_info.db_name_ + " not found when flushall db"); + return false; + } + bool success = DoWithoutLock(db); + if (!success) { return false; } + } + return true; +} + +bool FlushallCmd::DoWithoutLock(std::shared_ptr db) { + if (!db) { + LOG(ERROR) << "Flushall, but DB not found"; + res_.SetRes(CmdRes::kErrOther,db->GetDBName() + " not found when flushall db"); + return false; + } + bool success = db->FlushDBWithoutLock(); + if (!success) { + // if the db is not flushed, return before clear the cache + res_.SetRes(CmdRes::kErrOther,db->GetDBName() + " flushall failed due to other Errors, please check Error/Warning log to know more"); + return false; + } + DoFlushCache(db); + return true; +} + + +void FlushallCmd::DoBinlogByDB(const std::shared_ptr& sync_db) { + if (res().ok() && is_write() && g_pika_conf->write_binlog()) { + std::shared_ptr conn_ptr = GetConn(); + std::shared_ptr resp_ptr = GetResp(); + // Consider that dummy cmd appended by system, both conn and resp are null. + if ((!conn_ptr || !resp_ptr) && (name_ != kCmdDummy)) { + if (!conn_ptr) { + LOG(WARNING) << sync_db->SyncDBInfo().ToString() << " conn empty."; + } + if (!resp_ptr) { + LOG(WARNING) << sync_db->SyncDBInfo().ToString() << " resp empty."; + } + res().SetRes(CmdRes::kErrOther); + return; + } + + Status s = sync_db->ConsensusProposeLog(shared_from_this()); + if (!s.ok()) { + LOG(WARNING) << sync_db->SyncDBInfo().ToString() << " Writing binlog failed, maybe no space left on device " + << s.ToString(); + res().SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } +} + + +void FlushallCmd::DoBinlog() { + if (flushall_succeed_) { + for (auto& db : g_pika_server->GetDB()) { + DBInfo info(db.second->GetDBName()); + DoBinlogByDB(g_pika_rm->GetSyncMasterDBByName(info)); + } + } +} + +//let flushall use +std::string FlushallCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 1, "*"); + + // to flushdb cmd + std::string flushdb_cmd("flushdb"); + RedisAppendLenUint64(content, flushdb_cmd.size(), "$"); + RedisAppendContent(content, flushdb_cmd); + return content; +} + +void FlushdbCmd::DoInitial() { + flush_succeed_ = false; + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameFlushdb); + return; + } + if (argv_.size() == 1) { + db_name_ = "all"; + } else { + LOG(WARNING) << "not supported to flushdb with specific type in Floyd"; + res_.SetRes(CmdRes::kInvalidParameter, "not supported to flushdb with specific type in Floyd"); + } +} + +void FlushdbCmd::Do() { + if (!db_) { + res_.SetRes(CmdRes::kInvalidDB, "DB not found while flushdb"); + return; + } + if (db_->IsKeyScaning()) { + res_.SetRes(CmdRes::kErrOther, "The keyscan operation is executing, Try again later"); + return; + } + std::lock_guard s_prw(g_pika_rm->GetDBLock()); + std::lock_guard l_prw(db_->GetDBLock()); + flush_succeed_ = DoWithoutLock(); + if (flush_succeed_) { + res_.SetRes(CmdRes::kOk); + } else if (res_.ret() == CmdRes::kErrOther || res_.ret() == CmdRes::kInvalidParameter) { + //flushdb failed and res_ was set + } else { + res_.SetRes(CmdRes::kErrOther, "flushdb failed, maybe you cna try again later(check WARNING/ERROR log to know more)"); + } +} + +void FlushdbCmd::DoThroughDB() { + Do(); +} + +void FlushdbCmd::DoUpdateCache() { + if (!flush_succeed_) { + //if flushdb failed, also do not clear the cache + return; + } + // clear cache + if (g_pika_conf->cache_mode() != PIKA_CACHE_NONE) { + g_pika_server->ClearCacheDbAsync(db_); + } +} + +bool FlushdbCmd::DoWithoutLock() { + if (!db_) { + LOG(ERROR) << db_name_ << " Flushdb, but DB not found"; + res_.SetRes(CmdRes::kErrOther, db_name_ + " Flushdb, but DB not found"); + return false; + } + DBInfo p_info(db_->GetDBName()); + if (g_pika_rm->GetSyncMasterDBs().find(p_info) == g_pika_rm->GetSyncMasterDBs().end()) { + LOG(ERROR) << "DB not found when flushing " << db_->GetDBName(); + res_.SetRes(CmdRes::kErrOther, db_->GetDBName() + " Flushdb, but DB not found"); + return false; + } + return db_->FlushDBWithoutLock(); +} + +void FlushdbCmd::DoBinlog() { + if (flush_succeed_) { + Cmd::DoBinlog(); + } +} + +void ClientCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameClient); + return; + } + + if ((strcasecmp(argv_[1].data(), "getname") == 0) && argv_.size() == 2) { + operation_ = argv_[1]; + return; + } + + if ((strcasecmp(argv_[1].data(), "setname") == 0) && argv_.size() != 3) { + res_.SetRes(CmdRes::kErrOther, + "Unknown subcommand or wrong number of arguments for " + "'SETNAME'., try CLIENT SETNAME "); + return; + } + if ((strcasecmp(argv_[1].data(), "setname") == 0) && argv_.size() == 3) { + operation_ = argv_[1]; + return; + } + + if ((strcasecmp(argv_[1].data(), "list") == 0) && argv_.size() == 2) { + // nothing + } else if ((strcasecmp(argv_[1].data(), "list") == 0) && argv_.size() == 5) { + if ((strcasecmp(argv_[2].data(), "order") == 0) && (strcasecmp(argv_[3].data(), "by") == 0)) { + info_ = argv_[4]; + } else { + res_.SetRes(CmdRes::kErrOther, "Syntax error, try CLIENT (LIST [order by [addr|idle])"); + return; + } + } else if (argv_.size() == 3 && (strcasecmp(argv_[1].data(), "kill") == 0)) { + info_ = argv_[2]; + } else if (argv_.size() == 4 && + (strcasecmp(argv_[1].data(), "kill") == 0) && + (strcasecmp(argv_[2].data(), "type") == 0) && + ((strcasecmp(argv_[3].data(), KILLTYPE_NORMAL.data()) == 0) || (strcasecmp(argv_[3].data(), KILLTYPE_PUBSUB.data()) == 0))) { + //kill all if user wanna kill a type + info_ = "type"; + kill_type_ = argv_[3]; + } else { + res_.SetRes(CmdRes::kErrOther, "Syntax error, try CLIENT (LIST [order by [addr|idle]| KILL ip:port)"); + return; + } + operation_ = argv_[1]; +} + +void ClientCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNameClient); + return; + } + + if ((strcasecmp(operation_.data(), "getname") == 0) && argv_.size() == 2) { + res_.AppendString(conn->name()); + return; + } + + if ((strcasecmp(operation_.data(), "setname") == 0) && argv_.size() == 3) { + std::string name = argv_[2]; + conn->set_name(name); + res_.SetRes(CmdRes::kOk); + return; + } + + if (strcasecmp(operation_.data(), "list") == 0) { + struct timeval now; + gettimeofday(&now, nullptr); + std::vector clients; + g_pika_server->ClientList(&clients); + auto iter = clients.begin(); + std::string reply; + char buf[128]; + if (strcasecmp(info_.data(), "addr") == 0) { + std::sort(clients.begin(), clients.end(), AddrCompare); + } else if (strcasecmp(info_.data(), "idle") == 0) { + std::sort(clients.begin(), clients.end(), IdleCompare); + } + while (iter != clients.end()) { + snprintf(buf, sizeof(buf), "addr=%s fd=%d idle=%ld\n", iter->ip_port.c_str(), iter->fd, + iter->last_interaction == 0 ? 0 : now.tv_sec - iter->last_interaction); // NOLINT + reply.append(buf); + iter++; + } + res_.AppendString(reply); + } else if ((strcasecmp(operation_.data(), "kill") == 0) && (strcasecmp(info_.data(), "all") == 0)) { + g_pika_server->ClientKillAll(); + res_.SetRes(CmdRes::kOk); + } else if ((strcasecmp(operation_.data(), "kill") == 0) && (strcasecmp(info_.data(), "type") == 0)) { + if (kill_type_ == KILLTYPE_NORMAL) { + g_pika_server->ClientKillAllNormal(); + res_.SetRes(CmdRes::kOk); + } else if (kill_type_ == KILLTYPE_PUBSUB) { + g_pika_server->ClientKillPubSub(); + res_.SetRes(CmdRes::kOk); + } else { + res_.SetRes(CmdRes::kErrOther, "kill type is unknown"); + } + } else if (g_pika_server->ClientKill(info_) == 1) { + res_.SetRes(CmdRes::kOk); + } else { + res_.SetRes(CmdRes::kErrOther, "No such client"); + } +} + +void ShutdownCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameShutdown); + return; + } + + // For now, only shutdown need check local + if (IsLocal()) { + std::shared_ptr conn = GetConn(); + if (conn) { + if (conn->ip_port().find("127.0.0.1") == std::string::npos && + conn->ip_port().find(g_pika_server->host()) == std::string::npos) { + LOG(WARNING) << "\'shutdown\' should be localhost" + << " command from " << conn->ip_port(); + res_.SetRes(CmdRes::kErrOther, kCmdNameShutdown + " should be localhost"); + } + } else { + LOG(WARNING) << name_ << " weak ptr is empty"; + res_.SetRes(CmdRes::kErrOther, kCmdNameShutdown); + return; + } + } +} +// no return +void ShutdownCmd::Do() { + DLOG(WARNING) << "handle \'shutdown\'"; + db_->DBUnlockShared(); + g_pika_server->Exit(); + db_->DBLockShared(); + res_.SetRes(CmdRes::kNone); +} + +const std::string InfoCmd::kInfoSection = "info"; +const std::string InfoCmd::kAllSection = "all"; +const std::string InfoCmd::kServerSection = "server"; +const std::string InfoCmd::kClientsSection = "clients"; +const std::string InfoCmd::kStatsSection = "stats"; +const std::string InfoCmd::kExecCountSection = "command_exec_count"; +const std::string InfoCmd::kCPUSection = "cpu"; +const std::string InfoCmd::kReplicationSection = "replication"; +const std::string InfoCmd::kKeyspaceSection = "keyspace"; +const std::string InfoCmd::kDataSection = "data"; +const std::string InfoCmd::kRocksDBSection = "rocksdb"; +const std::string InfoCmd::kDebugSection = "debug"; +const std::string InfoCmd::kCommandStatsSection = "commandstats"; +const std::string InfoCmd::kCacheSection = "cache"; + + +const std::string ClientCmd::KILLTYPE_NORMAL = "normal"; +const std::string ClientCmd::KILLTYPE_PUBSUB = "pubsub"; + +void InfoCmd::Execute() { + std::shared_ptr db = g_pika_server->GetDB(db_name_); + Do(); +} + +void InfoCmd::DoInitial() { + size_t argc = argv_.size(); + if (argc > 4) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (argc == 1) { + info_section_ = kInfo; + return; + } // then the agc is 2 or 3 + + if (strcasecmp(argv_[1].data(), kAllSection.data()) == 0) { + info_section_ = kInfoAll; + keyspace_scan_dbs_ = g_pika_server->GetAllDBName(); + } else if (strcasecmp(argv_[1].data(), kServerSection.data()) == 0) { + info_section_ = kInfoServer; + } else if (strcasecmp(argv_[1].data(), kClientsSection.data()) == 0) { + info_section_ = kInfoClients; + } else if (strcasecmp(argv_[1].data(), kStatsSection.data()) == 0) { + info_section_ = kInfoStats; + } else if (strcasecmp(argv_[1].data(), kExecCountSection.data()) == 0) { + info_section_ = kInfoExecCount; + } else if (strcasecmp(argv_[1].data(), kCPUSection.data()) == 0) { + info_section_ = kInfoCPU; + } else if (strcasecmp(argv_[1].data(), kReplicationSection.data()) == 0) { + info_section_ = kInfoReplication; + } else if (strcasecmp(argv_[1].data(), kKeyspaceSection.data()) == 0) { + info_section_ = kInfoKeyspace; + if (argc == 2) { + LogCommand(); + + return; + } + // info keyspace [ 0 | 1 | off ] + // info keyspace 1 db0,db1 + // info keyspace 0 db0,db1 + // info keyspace off db0,db1 + if (argv_[2] == "1") { + if (g_pika_server->IsCompacting()) { + res_.SetRes(CmdRes::kErrOther, "The compact operation is executing, Try again later"); + } else { + rescan_ = true; + } + } else if (argv_[2] == "off") { + off_ = true; + } else if (argv_[2] != "0") { + res_.SetRes(CmdRes::kSyntaxErr); + } + + if (argc == 4) { + std::vector dbs; + pstd::StringSplit(argv_[3], COMMA, dbs); + for (const auto& db : dbs) { + if (!g_pika_server->IsDBExist(db)) { + res_.SetRes(CmdRes::kInvalidDB, db); + return; + } else { + keyspace_scan_dbs_.insert(db); + } + } + } else { + keyspace_scan_dbs_ = g_pika_server->GetAllDBName(); + } + LogCommand(); + return; + } else if (strcasecmp(argv_[1].data(), kDataSection.data()) == 0) { + info_section_ = kInfoData; + } else if (strcasecmp(argv_[1].data(), kRocksDBSection.data()) == 0) { + info_section_ = kInfoRocksDB; + } else if (strcasecmp(argv_[1].data(), kDebugSection.data()) == 0) { + info_section_ = kInfoDebug; + } else if (strcasecmp(argv_[1].data(), kCommandStatsSection.data()) == 0) { + info_section_ = kInfoCommandStats; + } else if (strcasecmp(argv_[1].data(), kCacheSection.data()) == 0) { + info_section_ = kInfoCache; + } else { + info_section_ = kInfoErr; + } + if (argc != 2) { + res_.SetRes(CmdRes::kSyntaxErr); + } +} + +void InfoCmd::Do() { + std::string info; + switch (info_section_) { + case kInfo: + InfoServer(info); + info.append("\r\n"); + InfoData(info); + info.append("\r\n"); + InfoClients(info); + info.append("\r\n"); + InfoStats(info); + info.append("\r\n"); + InfoCPU(info); + info.append("\r\n"); + InfoReplication(info); + info.append("\r\n"); + InfoKeyspace(info); + break; + case kInfoAll: + InfoServer(info); + info.append("\r\n"); + InfoData(info); + info.append("\r\n"); + InfoClients(info); + info.append("\r\n"); + InfoStats(info); + info.append("\r\n"); + InfoExecCount(info); + info.append("\r\n"); + InfoCommandStats(info); + info.append("\r\n"); + InfoCache(info, db_); + info.append("\r\n"); + InfoCPU(info); + info.append("\r\n"); + InfoReplication(info); + info.append("\r\n"); + InfoKeyspace(info); + info.append("\r\n"); + InfoRocksDB(info); + break; + case kInfoServer: + InfoServer(info); + break; + case kInfoClients: + InfoClients(info); + break; + case kInfoStats: + InfoStats(info); + break; + case kInfoExecCount: + InfoExecCount(info); + break; + case kInfoCPU: + InfoCPU(info); + break; + case kInfoReplication: + InfoReplication(info); + break; + case kInfoKeyspace: + InfoKeyspace(info); + break; + case kInfoData: + InfoData(info); + break; + case kInfoRocksDB: + InfoRocksDB(info); + break; + case kInfoDebug: + InfoDebug(info); + break; + case kInfoCommandStats: + InfoCommandStats(info); + break; + case kInfoCache: + InfoCache(info, db_); + break; + default: + // kInfoErr is nothing + break; + } + + res_.AppendString(info); +} + +void InfoCmd::InfoServer(std::string& info) { + static struct utsname host_info; + static bool host_info_valid = false; + if (!host_info_valid) { + uname(&host_info); + host_info_valid = true; + } + + time_t current_time_s = time(nullptr); + std::stringstream tmp_stream; + char version[32]; + snprintf(version, sizeof(version), "%d.%d.%d", PIKA_MAJOR, PIKA_MINOR, PIKA_PATCH); + tmp_stream << "# Server\r\n"; + tmp_stream << "pika_version:" << version << "\r\n"; + tmp_stream << pika_build_git_sha << "\r\n"; + tmp_stream << "pika_build_compile_date: " << pika_build_compile_date << "\r\n"; + tmp_stream << "os:" << host_info.sysname << " " << host_info.release << " " << host_info.machine << "\r\n"; + tmp_stream << "arch_bits:" << (reinterpret_cast(&host_info.machine) + strlen(host_info.machine) - 2) << "\r\n"; + tmp_stream << "process_id:" << getpid() << "\r\n"; + tmp_stream << "tcp_port:" << g_pika_conf->port() << "\r\n"; + tmp_stream << "thread_num:" << g_pika_conf->thread_num() << "\r\n"; + tmp_stream << "sync_thread_num:" << g_pika_conf->sync_thread_num() << "\r\n"; + tmp_stream << "sync_binlog_thread_num:" << g_pika_conf->sync_binlog_thread_num() << "\r\n"; + tmp_stream << "uptime_in_seconds:" << (current_time_s - g_pika_server->start_time_s()) << "\r\n"; + tmp_stream << "uptime_in_days:" << (current_time_s / (24 * 3600) - g_pika_server->start_time_s() / (24 * 3600) + 1) + << "\r\n"; + tmp_stream << "config_file:" << g_pika_conf->conf_path() << "\r\n"; + tmp_stream << "server_id:" << g_pika_conf->server_id() << "\r\n"; + tmp_stream << "run_id:" << g_pika_conf->run_id() << "\r\n"; + + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoClients(std::string& info) { + std::stringstream tmp_stream; + tmp_stream << "# Clients" + << "\r\n"; + tmp_stream << "connected_clients:" << g_pika_server->ClientList() << "\r\n"; + + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoStats(std::string& info) { + std::stringstream tmp_stream; + tmp_stream << "# Stats" + << "\r\n"; + tmp_stream << "total_connections_received:" << g_pika_server->accumulative_connections() << "\r\n"; + tmp_stream << "instantaneous_ops_per_sec:" << g_pika_server->ServerCurrentQps() << "\r\n"; + tmp_stream << "total_commands_processed:" << g_pika_server->ServerQueryNum() << "\r\n"; + tmp_stream << "keyspace_hits:" << g_pika_server->ServerKeyspaceHits() << "\r\n"; + tmp_stream << "keyspace_misses:" << g_pika_server->ServerKeyspaceMisses() << "\r\n"; + + // Network stats + tmp_stream << "total_net_input_bytes:" << g_pika_server->NetInputBytes() + g_pika_server->NetReplInputBytes() + << "\r\n"; + tmp_stream << "total_net_output_bytes:" << g_pika_server->NetOutputBytes() + g_pika_server->NetReplOutputBytes() + << "\r\n"; + tmp_stream << "total_net_repl_input_bytes:" << g_pika_server->NetReplInputBytes() << "\r\n"; + tmp_stream << "total_net_repl_output_bytes:" << g_pika_server->NetReplOutputBytes() << "\r\n"; + tmp_stream << "instantaneous_input_kbps:" << g_pika_server->InstantaneousInputKbps() << "\r\n"; + tmp_stream << "instantaneous_output_kbps:" << g_pika_server->InstantaneousOutputKbps() << "\r\n"; + tmp_stream << "instantaneous_input_repl_kbps:" << g_pika_server->InstantaneousInputReplKbps() << "\r\n"; + tmp_stream << "instantaneous_output_repl_kbps:" << g_pika_server->InstantaneousOutputReplKbps() << "\r\n"; + + tmp_stream << "is_bgsaving:" << (g_pika_server->IsBgSaving() ? "Yes" : "No") << "\r\n"; + tmp_stream << "is_scaning_keyspace:" << (g_pika_server->IsKeyScaning() ? "Yes" : "No") << "\r\n"; + tmp_stream << "is_compact:" << (g_pika_server->IsCompacting() ? "Yes" : "No") << "\r\n"; + tmp_stream << "compact_cron:" << g_pika_conf->compact_cron() << "\r\n"; + tmp_stream << "compact_interval:" << g_pika_conf->compact_interval() << "\r\n"; + time_t current_time_s = time(nullptr); + PikaServer::BGSlotsReload bgslotsreload_info = g_pika_server->bgslots_reload(); + bool is_reloading = g_pika_server->GetSlotsreloading(); + tmp_stream << "is_slots_reloading:" << (is_reloading ? "Yes, " : "No, ") << bgslotsreload_info.s_start_time << ", " + << (is_reloading ? (current_time_s - bgslotsreload_info.start_time) + : (bgslotsreload_info.end_time - bgslotsreload_info.start_time)) + << "\r\n"; + PikaServer::BGSlotsCleanup bgslotscleanup_info = g_pika_server->bgslots_cleanup(); + bool is_cleaningup = g_pika_server->GetSlotscleaningup(); + tmp_stream << "is_slots_cleaningup:" << (is_cleaningup ? "Yes, " : "No, ") << bgslotscleanup_info.s_start_time << ", " + << (is_cleaningup ? (current_time_s - bgslotscleanup_info.start_time) + : (bgslotscleanup_info.end_time - bgslotscleanup_info.start_time)) + << "\r\n"; + bool is_migrating = g_pika_server->pika_migrate_thread_->IsMigrating(); + time_t start_migration_time = g_pika_server->pika_migrate_thread_->GetStartTime(); + time_t end_migration_time = g_pika_server->pika_migrate_thread_->GetEndTime(); + std::string start_migration_time_str = g_pika_server->pika_migrate_thread_->GetStartTimeStr(); + tmp_stream << "is_slots_migrating:" << (is_migrating ? "Yes, " : "No, ") << start_migration_time_str << ", " + << (is_migrating ? (current_time_s - start_migration_time) : (end_migration_time - start_migration_time)) + << "\r\n"; + tmp_stream << "slow_logs_count:" << g_pika_server->SlowlogCount() << "\r\n"; + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoExecCount(std::string& info) { + std::stringstream tmp_stream; + tmp_stream << "# Command_Exec_Count\r\n"; + + std::unordered_map command_exec_count_db = g_pika_server->ServerExecCountDB(); + for (const auto& item : command_exec_count_db) { + if (item.second == 0) { + continue; + } + tmp_stream << item.first << ":" << item.second << "\r\n"; + } + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoCPU(std::string& info) { + struct rusage self_ru; + struct rusage c_ru; + getrusage(RUSAGE_SELF, &self_ru); + getrusage(RUSAGE_CHILDREN, &c_ru); + std::stringstream tmp_stream; + tmp_stream << "# CPU" + << "\r\n"; + tmp_stream << "used_cpu_sys:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(self_ru.ru_stime.tv_sec) + static_cast(self_ru.ru_stime.tv_usec) / 1000000 + << "\r\n"; + tmp_stream << "used_cpu_user:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(self_ru.ru_utime.tv_sec) + static_cast(self_ru.ru_utime.tv_usec) / 1000000 + << "\r\n"; + tmp_stream << "used_cpu_sys_children:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(c_ru.ru_stime.tv_sec) + static_cast(c_ru.ru_stime.tv_usec) / 1000000 + << "\r\n"; + tmp_stream << "used_cpu_user_children:" << std::setiosflags(std::ios::fixed) << std::setprecision(2) + << static_cast(c_ru.ru_utime.tv_sec) + static_cast(c_ru.ru_utime.tv_usec) / 1000000 + << "\r\n"; + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoReplication(std::string& info) { + int host_role = g_pika_server->role(); + std::stringstream tmp_stream; + std::stringstream out_of_sync; + std::stringstream repl_connect_status; + int32_t syncing_full_count = 0; + bool all_db_sync = true; + std::shared_lock db_rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->GetDB()) { + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_item.second->GetDBName())); + if (!slave_db) { + out_of_sync << "(" << db_item.first << ": InternalError)"; + continue; + } + repl_connect_status << db_item.first << ":"; + if (slave_db->State() != ReplState::kConnected) { + all_db_sync = false; + out_of_sync << "(" << db_item.first << ":"; + if (slave_db->State() == ReplState::kNoConnect) { + out_of_sync << "NoConnect)"; + repl_connect_status << "no_connect"; + } else if (slave_db->State() == ReplState::kWaitDBSync) { + out_of_sync << "WaitDBSync)"; + repl_connect_status << "syncing_full"; + ++syncing_full_count; + } else if (slave_db->State() == ReplState::kError) { + out_of_sync << "Error)"; + repl_connect_status << "error"; + } else if (slave_db->State() == ReplState::kWaitReply) { + out_of_sync << "kWaitReply)"; + repl_connect_status << "connecting"; + } else if (slave_db->State() == ReplState::kTryConnect) { + out_of_sync << "kTryConnect)"; + repl_connect_status << "try_to_incr_sync"; + } else if (slave_db->State() == ReplState::kTryDBSync) { + out_of_sync << "kTryDBSync)"; + repl_connect_status << "try_to_full_sync"; + } else if (slave_db->State() == ReplState::kDBNoConnect) { + out_of_sync << "kDBNoConnect)"; + repl_connect_status << "no_connect"; + } else { + out_of_sync << "Other)"; + repl_connect_status << "error"; + } + } else { //slave_db->State() equal to kConnected + repl_connect_status << "connected"; + } + repl_connect_status << "\r\n"; + } + + tmp_stream << "# Replication("; + switch (host_role) { + case PIKA_ROLE_SINGLE: + case PIKA_ROLE_MASTER: + tmp_stream << "MASTER)\r\nrole:master\r\n"; + break; + case PIKA_ROLE_SLAVE: + tmp_stream << "SLAVE)\r\nrole:slave\r\n"; + break; + case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE: + tmp_stream << "Master && SLAVE)\r\nrole:master&&slave\r\n"; + break; + default: + info.append("ERR: server role is error\r\n"); + return; + } + tmp_stream << "ReplicationID:" << g_pika_conf->replication_id() << "\r\n"; + std::string slaves_list_str; + switch (host_role) { + case PIKA_ROLE_SLAVE: + tmp_stream << "master_host:" << g_pika_server->master_ip() << "\r\n"; + tmp_stream << "master_port:" << g_pika_server->master_port() << "\r\n"; + tmp_stream << "master_link_status:" + << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) && all_db_sync) ? "up" : "down") + << "\r\n"; + tmp_stream << "repl_connect_status:\r\n" << repl_connect_status.str(); + tmp_stream << "slave_priority:" << g_pika_conf->slave_priority() << "\r\n"; + tmp_stream << "slave_read_only:" << g_pika_conf->slave_read_only() << "\r\n"; + if (!all_db_sync) { + tmp_stream << "db_repl_state:" << out_of_sync.str() << "\r\n"; + } + break; + case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE: + tmp_stream << "master_host:" << g_pika_server->master_ip() << "\r\n"; + tmp_stream << "master_port:" << g_pika_server->master_port() << "\r\n"; + tmp_stream << "master_link_status:" + << (((g_pika_server->repl_state() == PIKA_REPL_META_SYNC_DONE) && all_db_sync) ? "up" : "down") + << "\r\n"; + tmp_stream << "repl_connect_status:\r\n" << repl_connect_status.str(); + tmp_stream << "slave_read_only:" << g_pika_conf->slave_read_only() << "\r\n"; + if (!all_db_sync) { + tmp_stream << "db_repl_state:" << out_of_sync.str() << "\r\n"; + } + case PIKA_ROLE_SINGLE: + case PIKA_ROLE_MASTER: + tmp_stream << "connected_slaves:" << g_pika_server->GetSlaveListString(slaves_list_str) << "\r\n" + << slaves_list_str; + } + + //if current instance is syncing full or has full sync corrupted, it's not qualified to be a new master + if (syncing_full_count == 0 && g_pika_conf->GetUnfinishedFullSyncCount() == 0) { + tmp_stream << "is_eligible_for_master_election:true" << "\r\n"; + } else { + tmp_stream << "is_eligible_for_master_election:false" << "\r\n"; + } + + Status s; + uint32_t filenum = 0; + uint64_t offset = 0; + uint64_t slave_repl_offset = 0; + std::string safety_purge; + std::shared_ptr master_db = nullptr; + for (const auto& t_item : g_pika_server->dbs_) { + std::shared_lock db_rwl(t_item.second->dbs_rw_); + std::string db_name = t_item.first; + master_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << " NotFound"; + continue; + } + master_db->Logger()->GetProducerStatus(&filenum, &offset); + slave_repl_offset += static_cast(filenum) * static_cast(g_pika_conf->binlog_file_size()); + slave_repl_offset += offset; + tmp_stream << db_name << ":binlog_offset=" << filenum << " " << offset; + s = master_db->GetSafetyPurgeBinlog(&safety_purge); + tmp_stream << ",safety_purge=" << (s.ok() ? safety_purge : "error") << "\r\n"; + } + tmp_stream << "slave_repl_offset:" << slave_repl_offset << "\r\n"; + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoKeyspace(std::string& info) { + if (off_) { + g_pika_server->DoSameThingSpecificDB(keyspace_scan_dbs_, {TaskType::kStopKeyScan}); + info.append("OK\r\n"); + return; + } + + std::string db_name; + KeyScanInfo key_scan_info; + int32_t duration = 0; + std::vector key_infos; + std::stringstream tmp_stream; + tmp_stream << "# Keyspace" + << "\r\n"; + if (argv_.size() > 1 && strcasecmp(argv_[1].data(), kAllSection.data()) == 0) { + tmp_stream << "# Start async statistics\r\n"; + } else if (argv_.size() == 3 && strcasecmp(argv_[1].data(), kKeyspaceSection.data()) == 0) { + tmp_stream << "# Start async statistics\r\n"; + } else { + tmp_stream << "# Use \"info keyspace 1\" to do async statistics\r\n"; + } + std::shared_lock rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->dbs_) { + if (keyspace_scan_dbs_.find(db_item.first) != keyspace_scan_dbs_.end()) { + db_name = db_item.second->GetDBName(); + key_scan_info = db_item.second->GetKeyScanInfo(); + key_infos = key_scan_info.key_infos; + duration = key_scan_info.duration; + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + LOG(ERROR) << "key_infos size is not equal with expected, potential data inconsistency"; + info.append("info keyspace error\r\n"); + return; + } + tmp_stream << "# Time:" << key_scan_info.s_start_time << "\r\n"; + if (duration == -2) { + tmp_stream << "# Duration: " + << "In Waiting\r\n"; + } else if (duration == -1) { + tmp_stream << "# Duration: " + << "In Processing\r\n"; + } else if (duration >= 0) { + tmp_stream << "# Duration: " << std::to_string(duration) + "s" + << "\r\n"; + } + + tmp_stream << db_name << " Strings_keys=" << key_infos[0].keys << ", expires=" << key_infos[0].expires + << ", invalid_keys=" << key_infos[0].invaild_keys << "\r\n"; + tmp_stream << db_name << " Hashes_keys=" << key_infos[1].keys << ", expires=" << key_infos[1].expires + << ", invalid_keys=" << key_infos[1].invaild_keys << "\r\n"; + tmp_stream << db_name << " Lists_keys=" << key_infos[2].keys << ", expires=" << key_infos[2].expires + << ", invalid_keys=" << key_infos[2].invaild_keys << "\r\n"; + tmp_stream << db_name << " Zsets_keys=" << key_infos[3].keys << ", expires=" << key_infos[3].expires + << ", invalid_keys=" << key_infos[3].invaild_keys << "\r\n"; + tmp_stream << db_name << " Sets_keys=" << key_infos[4].keys << ", expires=" << key_infos[4].expires + << ", invalid_keys=" << key_infos[4].invaild_keys << "\r\n\r\n"; + tmp_stream << db_name << " Streams_keys=" << key_infos[5].keys << ", expires=" << key_infos[5].expires + << ", invalid_keys=" << key_infos[5].invaild_keys << "\r\n\r\n"; + } + } + info.append(tmp_stream.str()); + if (rescan_) { + g_pika_server->DoSameThingSpecificDB(keyspace_scan_dbs_, {TaskType::kStartKeyScan}); + } +} + +void InfoCmd::InfoData(std::string& info) { + std::stringstream tmp_stream; + std::stringstream db_fatal_msg_stream; + + uint64_t db_size = g_pika_server->GetDBSize(); + uint64_t log_size = g_pika_server->GetLogSize(); + + tmp_stream << "# Data" + << "\r\n"; + tmp_stream << "db_size:" << db_size << "\r\n"; + tmp_stream << "db_size_human:" << (db_size >> 20) << "M\r\n"; + tmp_stream << "log_size:" << log_size << "\r\n"; + tmp_stream << "log_size_human:" << (log_size >> 20) << "M\r\n"; + tmp_stream << "compression:" << g_pika_conf->compression() << "\r\n"; + + // rocksdb related memory usage + std::map background_errors; + uint64_t total_background_errors = 0; + uint64_t total_memtable_usage = 0; + uint64_t total_table_reader_usage = 0; + uint64_t memtable_usage = 0; + uint64_t table_reader_usage = 0; + std::shared_lock db_rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->dbs_) { + if (!db_item.second) { + continue; + } + background_errors.clear(); + memtable_usage = table_reader_usage = 0; + db_item.second->DBLockShared(); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_CUR_SIZE_ALL_MEM_TABLES, &memtable_usage); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_ESTIMATE_TABLE_READER_MEM, &table_reader_usage); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS, &background_errors); + db_item.second->DBUnlockShared(); + total_memtable_usage += memtable_usage; + total_table_reader_usage += table_reader_usage; + for (const auto& item : background_errors) { + if (item.second != 0) { + db_fatal_msg_stream << (total_background_errors != 0 ? "," : ""); + db_fatal_msg_stream << db_item.first << "/" << item.first; + total_background_errors += item.second; + } + } + } + + tmp_stream << "used_memory:" << (total_memtable_usage + total_table_reader_usage) << "\r\n"; + tmp_stream << "used_memory_human:" << ((total_memtable_usage + total_table_reader_usage) >> 20) << "M\r\n"; + + tmp_stream << "db_memtable_usage:" << total_memtable_usage << "\r\n"; + tmp_stream << "db_tablereader_usage:" << total_table_reader_usage << "\r\n"; + tmp_stream << "db_fatal:" << (total_background_errors != 0 ? "1" : "0") << "\r\n"; + tmp_stream << "db_fatal_msg:" << (total_background_errors != 0 ? db_fatal_msg_stream.str() : "nullptr") << "\r\n"; + + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoRocksDB(std::string& info) { + std::stringstream tmp_stream; + + tmp_stream << "# RocksDB" + << "\r\n"; + + std::shared_lock db_rwl(g_pika_server->dbs_rw_); + for (const auto& db_item : g_pika_server->dbs_) { + if (!db_item.second) { + continue; + } + std::string rocksdb_info; + db_item.second->DBLockShared(); + db_item.second->storage()->GetRocksDBInfo(rocksdb_info); + db_item.second->DBUnlockShared(); + tmp_stream << rocksdb_info; + } + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoDebug(std::string& info) { + std::stringstream tmp_stream; + tmp_stream << "# Synchronization Status" + << "\r\n"; + + info.append(tmp_stream.str()); + g_pika_rm->RmStatus(&info); + + tmp_stream.str(std::string()); + tmp_stream << "# Running Status " + << "\r\n"; + + info.append(tmp_stream.str()); + g_pika_server->ServerStatus(&info); +} + +void InfoCmd::InfoCommandStats(std::string& info) { + std::stringstream tmp_stream; + tmp_stream.precision(2); + tmp_stream.setf(std::ios::fixed); + tmp_stream << "# Commandstats" << "\r\n"; + auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); + for (auto iter : *cmdstat_map) { + if (iter.second.cmd_count != 0) { + tmp_stream << iter.first << ":" + << "calls=" << iter.second.cmd_count << ", usec=" + << MethodofTotalTimeCalculation(iter.second.cmd_time_consuming) + << ", usec_per_call="; + if (!iter.second.cmd_time_consuming) { + tmp_stream << 0 << "\r\n"; + } else { + tmp_stream << MethodofCommandStatistics(iter.second.cmd_time_consuming, iter.second.cmd_count) + << "\r\n"; + } + } + } + info.append(tmp_stream.str()); +} + +void InfoCmd::InfoCache(std::string& info, std::shared_ptr db) { + std::stringstream tmp_stream; + tmp_stream << "# Cache" << "\r\n"; + if (PIKA_CACHE_NONE == g_pika_conf->cache_mode()) { + tmp_stream << "cache_status:Disable" << "\r\n"; + } else { + auto cache_info = db->GetCacheInfo(); + tmp_stream << "cache_status:" << CacheStatusToString(cache_info.status) << "\r\n"; + tmp_stream << "cache_db_num:" << cache_info.cache_num << "\r\n"; + tmp_stream << "cache_keys:" << cache_info.keys_num << "\r\n"; + tmp_stream << "cache_memory:" << cache_info.used_memory << "\r\n"; + tmp_stream << "cache_memory_human:" << (cache_info.used_memory >> 20) << "M\r\n"; + tmp_stream << "hits:" << cache_info.hits << "\r\n"; + tmp_stream << "all_cmds:" << cache_info.hits + cache_info.misses << "\r\n"; + tmp_stream << "hits_per_sec:" << cache_info.hits_per_sec << "\r\n"; + tmp_stream << "read_cmd_per_sec:" << cache_info.read_cmd_per_sec << "\r\n"; + tmp_stream << "hitratio_per_sec:" << std::setprecision(4) << cache_info.hitratio_per_sec << "%" << "\r\n"; + tmp_stream << "hitratio_all:" << std::setprecision(4) << cache_info.hitratio_all << "%" << "\r\n"; + tmp_stream << "load_keys_per_sec:" << cache_info.load_keys_per_sec << "\r\n"; + tmp_stream << "waitting_load_keys_num:" << cache_info.waitting_load_keys_num << "\r\n"; + } + info.append(tmp_stream.str()); +} + +std::string InfoCmd::CacheStatusToString(int status) { + switch (status) { + case PIKA_CACHE_STATUS_NONE: + return std::string("None"); + case PIKA_CACHE_STATUS_OK: + return std::string("Ok"); + case PIKA_CACHE_STATUS_INIT: + return std::string("Init"); + case PIKA_CACHE_STATUS_RESET: + return std::string("Reset"); + case PIKA_CACHE_STATUS_DESTROY: + return std::string("Destroy"); + case PIKA_CACHE_STATUS_CLEAR: + return std::string("Clear"); + default: + return std::string("Unknown"); + } +} +void ConfigCmd::Execute() { + Do(); +} + +void ConfigCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameConfig); + return; + } + size_t argc = argv_.size(); + if (strcasecmp(argv_[1].data(), "get") == 0) { + if (argc != 3) { + res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG get"); + return; + } + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + if (argc == 3 && argv_[2] != "*") { + res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG set"); + return; + } else if (argc != 4 && argc != 3) { + res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG set"); + return; + } + } else if (strcasecmp(argv_[1].data(), "rewrite") == 0) { + if (argc != 2) { + res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG rewrite"); + return; + } + } else if (strcasecmp(argv_[1].data(), "resetstat") == 0) { + if (argc != 2) { + res_.SetRes(CmdRes::kErrOther, "Wrong number of arguments for CONFIG resetstat"); + return; + } + } else { + res_.SetRes(CmdRes::kErrOther, "CONFIG subcommand must be one of GET, SET, RESETSTAT, REWRITE"); + return; + } + config_args_v_.assign(argv_.begin() + 1, argv_.end()); +} + +void ConfigCmd::Do() { + std::string config_ret; + if (strcasecmp(config_args_v_[0].data(), "get") == 0) { + ConfigGet(config_ret); + } else if (strcasecmp(config_args_v_[0].data(), "set") == 0) { + ConfigSet(db_); + } else if (strcasecmp(config_args_v_[0].data(), "rewrite") == 0) { + ConfigRewrite(config_ret); + } else if (strcasecmp(config_args_v_[0].data(), "resetstat") == 0) { + ConfigResetstat(config_ret); + } else if (strcasecmp(config_args_v_[0].data(), "rewritereplicationid") == 0) { + ConfigRewriteReplicationID(config_ret); + } + res_.AppendStringRaw(config_ret); +} + +static void EncodeString(std::string* dst, const std::string& value) { + dst->append("$"); + dst->append(std::to_string(value.size())); + dst->append(kNewLine); + dst->append(value.data(), value.size()); + dst->append(kNewLine); +} + +template +static void EncodeNumber(std::string* dst, const T v) { + std::string vstr = std::to_string(v); + dst->append("$"); + dst->append(std::to_string(vstr.length())); + dst->append(kNewLine); + dst->append(vstr); + dst->append(kNewLine); +} + +void ConfigCmd::ConfigGet(std::string& ret) { + size_t elements = 0; + std::string config_body; + std::string pattern = config_args_v_[1]; + + if (pstd::stringmatch(pattern.data(), "port", 1) != 0) { + elements += 2; + EncodeString(&config_body, "port"); + EncodeNumber(&config_body, g_pika_conf->port()); + } + + if (pstd::stringmatch(pattern.data(), "thread-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "thread-num"); + EncodeNumber(&config_body, g_pika_conf->thread_num()); + } + + if (pstd::stringmatch(pattern.data(), "thread-pool-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "thread-pool-size"); + EncodeNumber(&config_body, g_pika_conf->thread_pool_size()); + } + + if (pstd::stringmatch(pattern.data(), "slow-cmd-thread-pool-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slow-cmd-thread-pool-size"); + EncodeNumber(&config_body, g_pika_conf->slow_cmd_thread_pool_size()); + } + + if (pstd::stringmatch(pattern.data(), "admin-thread-pool-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "admin-thread-pool-size"); + EncodeNumber(&config_body, g_pika_conf->admin_thread_pool_size()); + } + + if (pstd::stringmatch(pattern.data(), "userblacklist", 1) != 0) { + elements += 2; + EncodeString(&config_body, "userblacklist"); + EncodeString(&config_body, g_pika_conf->user_blacklist_string()); + } + if (pstd::stringmatch(pattern.data(), "slow-cmd-list", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slow-cmd-list"); + EncodeString(&config_body, g_pika_conf->GetSlowCmd()); + } + if (pstd::stringmatch(pattern.data(), "admin-cmd-list", 1) != 0) { + elements += 2; + EncodeString(&config_body, "admin-cmd-list"); + EncodeString(&config_body, g_pika_conf->GetAdminCmd()); + } + if (pstd::stringmatch(pattern.data(), "sync-thread-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "sync-thread-num"); + EncodeNumber(&config_body, g_pika_conf->sync_thread_num()); + } + + if (pstd::stringmatch(pattern.data(), "sync-binlog-thread-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "sync-binlog-thread-num"); + EncodeNumber(&config_body, g_pika_conf->sync_binlog_thread_num()); + } + + if (pstd::stringmatch(pattern.data(), "log-path", 1) != 0) { + elements += 2; + EncodeString(&config_body, "log-path"); + EncodeString(&config_body, g_pika_conf->log_path()); + } + + if (pstd::stringmatch(pattern.data(), "db-path", 1) != 0) { + elements += 2; + EncodeString(&config_body, "db-path"); + EncodeString(&config_body, g_pika_conf->db_path()); + } + + if (pstd::stringmatch(pattern.data(), "maxmemory", 1) != 0) { + elements += 2; + EncodeString(&config_body, "maxmemory"); + EncodeNumber(&config_body, g_pika_conf->write_buffer_size()); + } + + if (pstd::stringmatch(pattern.data(), "write-buffer-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "write-buffer-size"); + EncodeNumber(&config_body, g_pika_conf->write_buffer_size()); + } + + if (pstd::stringmatch(pattern.data(), "arena-block-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "arena-block-size"); + EncodeNumber(&config_body, g_pika_conf->arena_block_size()); + } + + if (pstd::stringmatch(pattern.data(), "max-write-buffer-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-write-buffer-num"); + EncodeNumber(&config_body, g_pika_conf->max_write_buffer_number()); + } + + if (pstd::stringmatch(pattern.data(), "timeout", 1) != 0) { + elements += 2; + EncodeString(&config_body, "timeout"); + EncodeNumber(&config_body, g_pika_conf->timeout()); + } + + if (pstd::stringmatch(pattern.data(), "requirepass", 1) != 0) { + elements += 2; + EncodeString(&config_body, "requirepass"); + EncodeString(&config_body, g_pika_conf->requirepass()); + } + + if (pstd::stringmatch(pattern.data(), "masterauth", 1) != 0) { + elements += 2; + EncodeString(&config_body, "masterauth"); + EncodeString(&config_body, g_pika_conf->masterauth()); + } + + if (pstd::stringmatch(pattern.data(), "userpass", 1) != 0) { + elements += 2; + EncodeString(&config_body, "userpass"); + EncodeString(&config_body, g_pika_conf->userpass()); + } + + if (pstd::stringmatch(pattern.data(), "instance-mode", 1) != 0) { + elements += 2; + EncodeString(&config_body, "instance-mode"); + EncodeString(&config_body, "classic"); + } + + if (pstd::stringmatch(pattern.data(), "databases", 1) != 0) { + elements += 2; + EncodeString(&config_body, "databases"); + EncodeNumber(&config_body, g_pika_conf->databases()); + } + + if (pstd::stringmatch(pattern.data(), "daemonize", 1)) { + elements += 2; + EncodeString(&config_body, "daemonize"); + EncodeString(&config_body, g_pika_conf->daemonize() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "slotmigrate", 1)) { + elements += 2; + EncodeString(&config_body, "slotmigrate"); + EncodeString(&config_body, g_pika_conf->slotmigrate() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "slow-cmd-pool", 1)) { + elements += 2; + EncodeString(&config_body, "slow-cmd-pool"); + EncodeString(&config_body, g_pika_conf->slow_cmd_pool() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "slotmigrate-thread-num", 1)!= 0) { + elements += 2; + EncodeString(&config_body, "slotmigrate-thread-num"); + EncodeNumber(&config_body, g_pika_conf->slotmigrate_thread_num()); + } + + if (pstd::stringmatch(pattern.data(), "thread-migrate-keys-num", 1)!= 0) { + elements += 2; + EncodeString(&config_body, "thread-migrate-keys-num"); + EncodeNumber(&config_body, g_pika_conf->thread_migrate_keys_num()); + } + + if (pstd::stringmatch(pattern.data(), "dump-path", 1) != 0) { + elements += 2; + EncodeString(&config_body, "dump-path"); + EncodeString(&config_body, g_pika_conf->bgsave_path()); + } + + if (pstd::stringmatch(pattern.data(), "dump-expire", 1) != 0) { + elements += 2; + EncodeString(&config_body, "dump-expire"); + EncodeNumber(&config_body, g_pika_conf->expire_dump_days()); + } + + if (pstd::stringmatch(pattern.data(), "dump-prefix", 1) != 0) { + elements += 2; + EncodeString(&config_body, "dump-prefix"); + EncodeString(&config_body, g_pika_conf->bgsave_prefix()); + } + + if (pstd::stringmatch(pattern.data(), "pidfile", 1) != 0) { + elements += 2; + EncodeString(&config_body, "pidfile"); + EncodeString(&config_body, g_pika_conf->pidfile()); + } + + if (pstd::stringmatch(pattern.data(), "maxclients", 1) != 0) { + elements += 2; + EncodeString(&config_body, "maxclients"); + EncodeNumber(&config_body, g_pika_conf->maxclients()); + } + + if (pstd::stringmatch(pattern.data(), "target-file-size-base", 1) != 0) { + elements += 2; + EncodeString(&config_body, "target-file-size-base"); + EncodeNumber(&config_body, g_pika_conf->target_file_size_base()); + } + + if (pstd::stringmatch(pattern.data(), "max-cache-statistic-keys", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-cache-statistic-keys"); + EncodeNumber(&config_body, g_pika_conf->max_cache_statistic_keys()); + } + + if (pstd::stringmatch(pattern.data(), "small-compaction-threshold", 1) != 0) { + elements += 2; + EncodeString(&config_body, "small-compaction-threshold"); + EncodeNumber(&config_body, g_pika_conf->small_compaction_threshold()); + } + + if (pstd::stringmatch(pattern.data(), "small-compaction-duration-threshold", 1) != 0) { + elements += 2; + EncodeString(&config_body, "small-compaction-duration-threshold"); + EncodeNumber(&config_body, g_pika_conf->small_compaction_duration_threshold()); + } + + if (pstd::stringmatch(pattern.data(), "max-background-flushes", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-background-flushes"); + EncodeNumber(&config_body, g_pika_conf->max_background_flushes()); + } + + if (pstd::stringmatch(pattern.data(), "max-background-compactions", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-background-compactions"); + EncodeNumber(&config_body, g_pika_conf->max_background_compactions()); + } + + if (pstd::stringmatch(pattern.data(), "max-background-jobs", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-background-jobs"); + EncodeNumber(&config_body, g_pika_conf->max_background_jobs()); + } + + if (pstd::stringmatch(pattern.data(), "max-cache-files", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-cache-files"); + EncodeNumber(&config_body, g_pika_conf->max_cache_files()); + } + + if (pstd::stringmatch(pattern.data(), "max-bytes-for-level-multiplier", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-bytes-for-level-multiplier"); + EncodeNumber(&config_body, g_pika_conf->max_bytes_for_level_multiplier()); + } + + if (pstd::stringmatch(pattern.data(), "block-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "block-size"); + EncodeNumber(&config_body, g_pika_conf->block_size()); + } + + if (pstd::stringmatch(pattern.data(), "block-cache", 1) != 0) { + elements += 2; + EncodeString(&config_body, "block-cache"); + EncodeNumber(&config_body, g_pika_conf->block_cache()); + } + + if (pstd::stringmatch(pattern.data(), "share-block-cache", 1) != 0) { + elements += 2; + EncodeString(&config_body, "share-block-cache"); + EncodeString(&config_body, g_pika_conf->share_block_cache() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "enable-partitioned-index-filters", 1) != 0) { + elements += 2; + EncodeString(&config_body, "enable-partitioned-index-filters"); + EncodeString(&config_body, g_pika_conf->enable_partitioned_index_filters() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "cache-index-and-filter-blocks", 1) != 0) { + elements += 2; + EncodeString(&config_body, "cache-index-and-filter-blocks"); + EncodeString(&config_body, g_pika_conf->cache_index_and_filter_blocks() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "optimize-filters-for-hits", 1) != 0) { + elements += 2; + EncodeString(&config_body, "optimize-filters-for-hits"); + EncodeString(&config_body, g_pika_conf->optimize_filters_for_hits() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "level-compaction-dynamic-level-bytes", 1) != 0) { + elements += 2; + EncodeString(&config_body, "level-compaction-dynamic-level-bytes"); + EncodeString(&config_body, g_pika_conf->level_compaction_dynamic_level_bytes() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "expire-logs-days", 1) != 0) { + elements += 2; + EncodeString(&config_body, "expire-logs-days"); + EncodeNumber(&config_body, g_pika_conf->expire_logs_days()); + } + + if (pstd::stringmatch(pattern.data(), "expire-logs-nums", 1) != 0) { + elements += 2; + EncodeString(&config_body, "expire-logs-nums"); + EncodeNumber(&config_body, g_pika_conf->expire_logs_nums()); + } + + if (pstd::stringmatch(pattern.data(), "root-connection-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "root-connection-num"); + EncodeNumber(&config_body, g_pika_conf->root_connection_num()); + } + + if (pstd::stringmatch(pattern.data(), "slowlog-write-errorlog", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slowlog-write-errorlog"); + EncodeString(&config_body, g_pika_conf->slowlog_write_errorlog() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "slowlog-log-slower-than", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slowlog-log-slower-than"); + EncodeNumber(&config_body, g_pika_conf->slowlog_slower_than()); + } + + if (pstd::stringmatch(pattern.data(), "slowlog-max-len", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slowlog-max-len"); + EncodeNumber(&config_body, g_pika_conf->slowlog_max_len()); + } + + if (pstd::stringmatch(pattern.data(), "write-binlog", 1) != 0) { + elements += 2; + EncodeString(&config_body, "write-binlog"); + EncodeString(&config_body, g_pika_conf->write_binlog() ? "yes" : "no"); + } + if (pstd::stringmatch(pattern.data(), "binlog-file-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "binlog-file-size"); + EncodeNumber(&config_body, g_pika_conf->binlog_file_size()); + } + + if (pstd::stringmatch(pattern.data(), "max-write-buffer-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-write-buffer-size"); + EncodeNumber(&config_body, g_pika_conf->max_write_buffer_size()); + } + + if (pstd::stringmatch(pattern.data(), "max-total-wal-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-total-wal-size"); + EncodeNumber(&config_body, g_pika_conf->MaxTotalWalSize()); + } + + if (pstd::stringmatch(pattern.data(), "min-write-buffer-number-to-merge", 1) != 0) { + elements += 2; + EncodeString(&config_body, "min-write-buffer-number-to-merge"); + EncodeNumber(&config_body, g_pika_conf->min_write_buffer_number_to_merge()); + } + + if (pstd::stringmatch(pattern.data(), "level0-stop-writes-trigger", 1) != 0) { + elements += 2; + EncodeString(&config_body, "level0-stop-writes-trigger"); + EncodeNumber(&config_body, g_pika_conf->level0_stop_writes_trigger()); + } + + if (pstd::stringmatch(pattern.data(), "level0-slowdown-writes-trigger", 1) != 0) { + elements += 2; + EncodeString(&config_body, "level0-slowdown-writes-trigger"); + EncodeNumber(&config_body, g_pika_conf->level0_slowdown_writes_trigger()); + } + + if (pstd::stringmatch(pattern.data(), "level0-file-num-compaction-trigger", 1) != 0) { + elements += 2; + EncodeString(&config_body, "level0-file-num-compaction-trigger"); + EncodeNumber(&config_body, g_pika_conf->level0_file_num_compaction_trigger()); + } + + if (pstd::stringmatch(pattern.data(), "max-client-response-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-client-response-size"); + EncodeNumber(&config_body, g_pika_conf->max_client_response_size()); + } + + if (pstd::stringmatch(pattern.data(), "compression", 1) != 0) { + elements += 2; + EncodeString(&config_body, "compression"); + EncodeString(&config_body, g_pika_conf->compression()); + } + + if (pstd::stringmatch(pattern.data(), "db-sync-path", 1) != 0) { + elements += 2; + EncodeString(&config_body, "db-sync-path"); + EncodeString(&config_body, g_pika_conf->db_sync_path()); + } + + if (pstd::stringmatch(pattern.data(), "db-sync-speed", 1) != 0) { + elements += 2; + EncodeString(&config_body, "db-sync-speed"); + EncodeNumber(&config_body, g_pika_conf->db_sync_speed()); + } + + if (pstd::stringmatch(pattern.data(), "compact-cron", 1) != 0) { + elements += 2; + EncodeString(&config_body, "compact-cron"); + EncodeString(&config_body, g_pika_conf->compact_cron()); + } + + if (pstd::stringmatch(pattern.data(), "compact-interval", 1) != 0) { + elements += 2; + EncodeString(&config_body, "compact-interval"); + EncodeString(&config_body, g_pika_conf->compact_interval()); + } + if (pstd::stringmatch(pattern.data(), "disable_auto_compactions", 1) != 0) { + elements += 2; + EncodeString(&config_body, "disable_auto_compactions"); + EncodeString(&config_body, g_pika_conf->disable_auto_compactions() ? "true" : "false"); + } + if (pstd::stringmatch(pattern.data(), "network-interface", 1) != 0) { + elements += 2; + EncodeString(&config_body, "network-interface"); + EncodeString(&config_body, g_pika_conf->network_interface()); + } + + if (pstd::stringmatch(pattern.data(), "slaveof", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slaveof"); + EncodeString(&config_body, g_pika_conf->slaveof()); + } + + if (pstd::stringmatch(pattern.data(), "slave-priority", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slave-priority"); + EncodeNumber(&config_body, g_pika_conf->slave_priority()); + } + + // fake string for redis-benchmark + if (pstd::stringmatch(pattern.data(), "save", 1) != 0) { + elements += 2; + EncodeString(&config_body, "save"); + EncodeString(&config_body, ""); + } + + if (pstd::stringmatch(pattern.data(), "appendonly", 1) != 0) { + elements += 2; + EncodeString(&config_body, "appendonly"); + EncodeString(&config_body, "no"); + } + + if (pstd::stringmatch(pattern.data(), "sync-window-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "sync-window-size"); + EncodeNumber(&config_body, g_pika_conf->sync_window_size()); + } + + if (pstd::stringmatch(pattern.data(), "max-conn-rbuf-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-conn-rbuf-size"); + EncodeNumber(&config_body, g_pika_conf->max_conn_rbuf_size()); + } + + if (pstd::stringmatch(pattern.data(), "replication-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "replication-num"); + EncodeNumber(&config_body, g_pika_conf->replication_num()); + } + if (pstd::stringmatch(pattern.data(), "consensus-level", 1) != 0) { + elements += 2; + EncodeString(&config_body, "consensus-level"); + EncodeNumber(&config_body, g_pika_conf->consensus_level()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-mode", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-mode"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_mode()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-bandwidth", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-bandwidth"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_bandwidth()); + } + + if (pstd::stringmatch(pattern.data(), "delayed-write-rate", 1) != 0) { + elements += 2; + EncodeString(&config_body, "delayed-write-rate"); + EncodeNumber(&config_body, g_pika_conf->delayed_write_rate()); + } + + if (pstd::stringmatch(pattern.data(), "max-compaction-bytes", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-compaction-bytes"); + EncodeNumber(&config_body, g_pika_conf->max_compaction_bytes()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-refill-period-us", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-refill-period-us"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_refill_period_us()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-fairness", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-fairness"); + EncodeNumber(&config_body, g_pika_conf->rate_limiter_fairness()); + } + + if (pstd::stringmatch(pattern.data(), "rate-limiter-auto-tuned", 1) != 0) { + elements += 2; + EncodeString(&config_body, "rate-limiter-auto-tuned"); + EncodeString(&config_body, g_pika_conf->rate_limiter_auto_tuned() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "run-id", 1) != 0) { + elements += 2; + EncodeString(&config_body, "run-id"); + EncodeString(&config_body, g_pika_conf->run_id()); + } + + if (pstd::stringmatch(pattern.data(), "blob-cache", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-cache"); + EncodeNumber(&config_body, g_pika_conf->blob_cache()); + } + + if (pstd::stringmatch(pattern.data(), "blob-compression-type", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-compression-type"); + EncodeString(&config_body, g_pika_conf->blob_compression_type()); + } + + if (pstd::stringmatch(pattern.data(), "blob-file-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-file-size"); + EncodeNumber(&config_body, g_pika_conf->blob_file_size()); + } + + if (pstd::stringmatch(pattern.data(), "blob-garbage-collection-age-cutoff", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-garbage-collection-age-cutoff"); + EncodeNumber(&config_body, g_pika_conf->blob_garbage_collection_age_cutoff()); + } + + if (pstd::stringmatch(pattern.data(), "blob-garbage-collection-force-threshold", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-garbage-collection-force-threshold"); + EncodeNumber(&config_body, g_pika_conf->blob_garbage_collection_force_threshold()); + } + + if (pstd::stringmatch(pattern.data(), "blob-num-shard-bits", 1) != 0) { + elements += 2; + EncodeString(&config_body, "blob-num-shard-bits"); + EncodeNumber(&config_body, g_pika_conf->blob_num_shard_bits()); + } + + if (pstd::stringmatch(pattern.data(), "compression-per-level", 1) != 0) { + elements += 2; + EncodeString(&config_body, "compression-per-level"); + EncodeString(&config_body, g_pika_conf->compression_all_levels()); + } + + if (pstd::stringmatch(pattern.data(), "default-slot-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "default-slot-num"); + EncodeNumber(&config_body, g_pika_conf->default_slot_num()); + } + + if (pstd::stringmatch(pattern.data(), "enable-blob-files", 1) != 0) { + elements += 2; + EncodeString(&config_body, "enable-blob-files"); + EncodeString(&config_body, g_pika_conf->enable_blob_files() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "enable-blob-garbage-collection", 1) != 0) { + elements += 2; + EncodeString(&config_body, "enable-blob-garbage-collection"); + EncodeString(&config_body, g_pika_conf->enable_blob_garbage_collection() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "loglevel", 1) != 0) { + elements += 2; + EncodeString(&config_body, "loglevel"); + EncodeString(&config_body, g_pika_conf->log_level()); + } + + if (pstd::stringmatch(pattern.data(), "min-blob-size", 1) != 0) { + elements += 2; + EncodeString(&config_body, "min-blob-size"); + EncodeNumber(&config_body, g_pika_conf->min_blob_size()); + } + + if (pstd::stringmatch(pattern.data(), "pin_l0_filter_and_index_blocks_in_cache", 1) != 0) { + elements += 2; + EncodeString(&config_body, "pin_l0_filter_and_index_blocks_in_cache"); + EncodeString(&config_body, g_pika_conf->pin_l0_filter_and_index_blocks_in_cache() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "slave-read-only", 1) != 0) { + elements += 2; + EncodeString(&config_body, "slave-read-only"); + EncodeString(&config_body, g_pika_conf->slave_read_only() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "throttle-bytes-per-second", 1) != 0) { + elements += 2; + EncodeString(&config_body, "throttle-bytes-per-second"); + EncodeNumber(&config_body, g_pika_conf->throttle_bytes_per_second()); + } + + if (pstd::stringmatch(pattern.data(), "max-rsync-parallel-num", 1) != 0) { + elements += 2; + EncodeString(&config_body, "max-rsync-parallel-num"); + EncodeNumber(&config_body, g_pika_conf->max_rsync_parallel_num()); + } + + if (pstd::stringmatch(pattern.data(), "replication-id", 1) != 0) { + elements += 2; + EncodeString(&config_body, "replication-id"); + EncodeString(&config_body, g_pika_conf->replication_id()); + } + + + if (pstd::stringmatch(pattern.data(), "cache-num", 1)) { + elements += 2; + EncodeString(&config_body, "cache-num"); + EncodeNumber(&config_body, g_pika_conf->GetCacheNum()); + } + + if (pstd::stringmatch(pattern.data(), "cache-model", 1)) { + elements += 2; + EncodeString(&config_body, "cache-model"); + EncodeNumber(&config_body, g_pika_conf->cache_mode()); + } + + if (pstd::stringmatch(pattern.data(), "cache-type", 1)) { + elements += 2; + EncodeString(&config_body, "cache-type"); + EncodeString(&config_body, g_pika_conf->scache_type()); + } + + if (pstd::stringmatch(pattern.data(), "zset-cache-start-direction", 1)) { + elements += 2; + EncodeString(&config_body, "zset-cache-start-direction"); + EncodeNumber(&config_body, g_pika_conf->zset_cache_start_direction()); + } + + if (pstd::stringmatch(pattern.data(), "zset-cache-field-num-per-key", 1)) { + elements += 2; + EncodeString(&config_body, "zset-cache-field-num-per-key"); + EncodeNumber(&config_body, g_pika_conf->zset_cache_field_num_per_key()); + } + + if (pstd::stringmatch(pattern.data(), "cache-maxmemory", 1)) { + elements += 2; + EncodeString(&config_body, "cache-maxmemory"); + EncodeNumber(&config_body, g_pika_conf->cache_maxmemory()); + } + + if (pstd::stringmatch(pattern.data(), "cache-maxmemory-policy", 1)) { + elements += 2; + EncodeString(&config_body, "cache-maxmemory-policy"); + EncodeNumber(&config_body, g_pika_conf->cache_maxmemory_policy()); + } + + if (pstd::stringmatch(pattern.data(), "cache-maxmemory-samples", 1)) { + elements += 2; + EncodeString(&config_body, "cache-maxmemory-samples"); + EncodeNumber(&config_body, g_pika_conf->cache_maxmemory_samples()); + } + + if (pstd::stringmatch(pattern.data(), "cache-lfu-decay-time", 1)) { + elements += 2; + EncodeString(&config_body, "cache-lfu-decay-time"); + EncodeNumber(&config_body, g_pika_conf->cache_lfu_decay_time()); + } + + if (pstd::stringmatch(pattern.data(), "acl-pubsub-default", 1) != 0) { + elements += 2; + EncodeString(&config_body, "acl-pubsub-default"); + g_pika_conf->acl_pubsub_default() ? EncodeString(&config_body, "allchannels") + : EncodeString(&config_body, "resetchannels"); + } + + if (pstd::stringmatch(pattern.data(), "enable-db-statistics", 1)) { + elements += 2; + EncodeString(&config_body, "enable-db-statistics"); + EncodeString(&config_body, g_pika_conf->enable_db_statistics() ? "yes" : "no"); + } + + if (pstd::stringmatch(pattern.data(), "db-statistics-level", 1)) { + elements += 2; + EncodeString(&config_body, "db-statistics-level"); + EncodeNumber(&config_body, g_pika_conf->db_statistics_level()); + } + + std::stringstream resp; + resp << "*" << std::to_string(elements) << "\r\n" << config_body; + ret = resp.str(); +} + +// Remember to sync change PikaConf::ConfigRewrite(); +void ConfigCmd::ConfigSet(std::shared_ptr db) { + std::string set_item = config_args_v_[1]; + if (set_item == "*") { + std::vector replyVt({ + "timeout", + "requirepass", + "masterauth", + "slotmigrate", + "slow-cmd-pool", + "slotmigrate-thread-num", + "thread-migrate-keys-num", + "userpass", + "userblacklist", + "dump-prefix", + "maxclients", + "dump-expire", + "expire-logs-days", + "expire-logs-nums", + "root-connection-num", + "slowlog-write-errorlog", + "slowlog-log-slower-than", + "slowlog-max-len", + "write-binlog", + "max-cache-statistic-keys", + "small-compaction-threshold", + "small-compaction-duration-threshold", + "max-client-response-size", + "db-sync-speed", + "compact-cron", + "compact-interval", + "disable_auto_compactions", + "slave-priority", + "sync-window-size", + "slow-cmd-list", + // Options for storage engine + // MutableDBOptions + "max-cache-files", + "max-background-compactions", + "max-background-jobs", + // MutableColumnFamilyOptions + "write-buffer-size", + "max-write-buffer-num", + "min-write-buffer-number-to-merge", + "max-total-wal-size", + "level0-slowdown-writes-trigger", + "level0-stop-writes-trigger", + "level0-file-num-compaction-trigger", + "arena-block-size", + "throttle-bytes-per-second", + "max-rsync-parallel-num", + "cache-model", + "cache-type", + "zset-cache-start-direction", + "zset-cache-field-num-per-key", + "cache-lfu-decay-time", + "max-conn-rbuf-size", + }); + res_.AppendStringVector(replyVt); + return; + } + long int ival = 0; + std::string value = config_args_v_[2]; + if (set_item == "timeout") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'timeout'\r\n"); + return; + } + g_pika_conf->SetTimeout(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "requirepass") { + g_pika_conf->SetRequirePass(value); + g_pika_server->Acl()->UpdateDefaultUserPassword(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "masterauth") { + g_pika_conf->SetMasterAuth(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "userpass") { + g_pika_conf->SetUserPass(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "userblacklist") { + g_pika_conf->SetUserBlackList(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "dump-prefix") { + g_pika_conf->SetBgsavePrefix(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "maxclients") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'maxclients'\r\n"); + return; + } + g_pika_conf->SetMaxConnection(static_cast(ival)); + g_pika_server->SetDispatchQueueLimit(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "dump-expire") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'dump-expire'\r\n"); + return; + } + g_pika_conf->SetExpireDumpDays(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slave-priority") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slave-priority'\r\n"); + return; + } + g_pika_conf->SetSlavePriority(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "expire-logs-days") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-days'\r\n"); + return; + } + g_pika_conf->SetExpireLogsDays(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "expire-logs-nums") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'expire-logs-nums'\r\n"); + return; + } + g_pika_conf->SetExpireLogsNums(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "root-connection-num") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'root-connection-num'\r\n"); + return; + } + g_pika_conf->SetRootConnectionNum(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slotmigrate-thread-num") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slotmigrate-thread-num'\r\n"); + return; + } + long int migrate_thread_num = (1 > ival || 24 < ival) ? 8 : ival; + g_pika_conf->SetSlotMigrateThreadNum(migrate_thread_num); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "thread-migrate-keys-num") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'thread-migrate-keys-num'\r\n"); + return; + } + long int thread_migrate_keys_num = (8 > ival || 128 < ival) ? 64 : ival; + g_pika_conf->SetThreadMigrateKeysNum(thread_migrate_keys_num); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slowlog-write-errorlog") { + bool is_write_errorlog; + if (value == "yes") { + is_write_errorlog = true; + } else if (value == "no") { + is_write_errorlog = false; + } else { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-write-errorlog'\r\n"); + return; + } + g_pika_conf->SetSlowlogWriteErrorlog(is_write_errorlog); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slotmigrate") { + bool slotmigrate; + if (value == "yes") { + slotmigrate = true; + } else if (value == "no") { + slotmigrate = false; + } else { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slotmigrate'\r\n"); + return; + } + g_pika_conf->SetSlotMigrate(slotmigrate); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slow_cmd_pool") { + bool SlowCmdPool; + if (value == "yes") { + SlowCmdPool = true; + } else if (value == "no") { + SlowCmdPool = false; + } else { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slow-cmd-pool'\r\n"); + return; + } + g_pika_conf->SetSlowCmdPool(SlowCmdPool); + g_pika_server->SetSlowCmdThreadPoolFlag(SlowCmdPool); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slowlog-log-slower-than") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-log-slower-than'\r\n"); + return; + } + g_pika_conf->SetSlowlogSlowerThan(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slowlog-max-len") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'slowlog-max-len'\r\n"); + return; + } + g_pika_conf->SetSlowlogMaxLen(static_cast(ival)); + g_pika_server->SlowlogTrim(); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-cache-statistic-keys") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-cache-statistic-keys'\r\n"); + return; + } + g_pika_conf->SetMaxCacheStatisticKeys(static_cast(ival)); + g_pika_server->DBSetMaxCacheStatisticKeys(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "small-compaction-threshold") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'small-compaction-threshold'\r\n"); + return; + } + g_pika_conf->SetSmallCompactionThreshold(static_cast(ival)); + g_pika_server->DBSetSmallCompactionThreshold(static_cast(ival)); + res_.AppendStringRaw( "+OK\r\n"); + } else if (set_item == "small-compaction-duration-threshold") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'small-compaction-duration-threshold'\r\n"); + return; + } + g_pika_conf->SetSmallCompactionDurationThreshold(static_cast(ival)); + g_pika_server->DBSetSmallCompactionDurationThreshold(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "disable_auto_compactions") { + if (value != "true" && value != "false") { + res_.AppendStringRaw("-ERR invalid disable_auto_compactions (true or false)\r\n"); + return; + } + std::unordered_map options_map{{"disable_auto_compactions", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set storage::OptionType::kColumnFamily disable_auto_compactions wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetDisableAutoCompaction(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rate-limiter-bandwidth") { + int64_t new_bandwidth = 0; + if (pstd::string2int(value.data(), value.size(), &new_bandwidth) == 0 || new_bandwidth <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rate-limiter-bandwidth'\r\n"); + return; + } + g_pika_server->storage_options().options.rate_limiter->SetBytesPerSecond(new_bandwidth); + g_pika_conf->SetRateLmiterBandwidth(new_bandwidth); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "delayed-write-rate") { + int64_t new_delayed_write_rate = 0; + if (pstd::string2int(value.data(), value.size(), &new_delayed_write_rate) == 0 || new_delayed_write_rate <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'delayed-write-rate'\r\n"); + return; + } + std::unordered_map options_map{{"delayed_write_rate", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set delayed-write-rate wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetDelayedWriteRate(new_delayed_write_rate); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-compaction-bytes") { + int64_t new_max_compaction_bytes = 0; + if (pstd::string2int(value.data(), value.size(), &new_max_compaction_bytes) == 0 || new_max_compaction_bytes <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-compaction-bytes'\r\n"); + return; + } + std::unordered_map options_map{{"max_compaction_bytes", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-compaction-bytes wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxCompactionBytes(new_max_compaction_bytes); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-client-response-size") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-client-response-size'\r\n"); + return; + } + g_pika_conf->SetMaxClientResponseSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "write-binlog") { + int role = g_pika_server->role(); + if (role == PIKA_ROLE_SLAVE) { + res_.AppendStringRaw("-ERR need to close master-slave mode first\r\n"); + return; + } else if (value != "yes" && value != "no") { + res_.AppendStringRaw("-ERR invalid write-binlog (yes or no)\r\n"); + return; + } else { + g_pika_conf->SetWriteBinlog(value); + res_.AppendStringRaw("+OK\r\n"); + } + } else if (set_item == "db-sync-speed") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'db-sync-speed(MB)'\r\n"); + return; + } + if (ival < 0 || ival > 1024) { + ival = 1024; + } + g_pika_conf->SetDbSyncSpeed(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "compact-cron") { + bool invalid = false; + if (!value.empty()) { + bool have_week = false; + std::string compact_cron; + std::string week_str; + int64_t slash_num = count(value.begin(), value.end(), '/'); + if (slash_num == 2) { + have_week = true; + std::string::size_type first_slash = value.find('/'); + week_str = value.substr(0, first_slash); + compact_cron = value.substr(first_slash + 1); + } else { + compact_cron = value; + } + + std::string::size_type len = compact_cron.length(); + std::string::size_type colon = compact_cron.find('-'); + std::string::size_type underline = compact_cron.find('/'); + if (colon == std::string::npos || underline == std::string::npos || colon >= underline || colon + 1 >= len || + colon + 1 == underline || underline + 1 >= len) { + invalid = true; + } else { + int week = std::atoi(week_str.c_str()); + int start = std::atoi(compact_cron.substr(0, colon).c_str()); + int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); + int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); + if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || + usage > 100) { + invalid = true; + } + } + } + if (invalid) { + res_.AppendStringRaw("-ERR invalid compact-cron\r\n"); + return; + } else { + g_pika_conf->SetCompactCron(value); + res_.AppendStringRaw("+OK\r\n"); + } + } else if (set_item == "compact-interval") { + bool invalid = false; + if (!value.empty()) { + std::string::size_type len = value.length(); + std::string::size_type slash = value.find('/'); + if (slash == std::string::npos || slash + 1 >= len) { + invalid = true; + } else { + int interval = std::atoi(value.substr(0, slash).c_str()); + int usage = std::atoi(value.substr(slash + 1).c_str()); + if (interval <= 0 || usage < 0 || usage > 100) { + invalid = true; + } + } + } + if (invalid) { + res_.AppendStringRaw("-ERR invalid compact-interval\r\n"); + return; + } else { + g_pika_conf->SetCompactInterval(value); + res_.AppendStringRaw("+OK\r\n"); + } + } else if (set_item == "sync-window-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"); + return; + } + if (ival <= 0 || ival > kBinlogReadWinMaxSize) { + res_.AppendStringRaw("-ERR Argument exceed range \'" + value + "\' for CONFIG SET 'sync-window-size'\r\n"); + return; + } + g_pika_conf->SetSyncWindowSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "slow-cmd-list") { + g_pika_conf->SetSlowCmd(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-cache-files") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-cache-files'\r\n"); + return; + } + std::unordered_map options_map{{"max_open_files", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-cache-files wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxCacheFiles(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-background-compactions") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival <= 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-background-compactions'\r\n"); + return; + } + std::unordered_map options_map{{"max_background_compactions", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-background-compactions wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxBackgroudCompactions(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rocksdb-periodic-second") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rocksdb-periodic-second'\r\n"); + return; + } + std::unordered_map options_map{{"periodic_compaction_seconds", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set rocksdb-periodic-second wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetRocksdbPeriodicSecond(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rocksdb-ttl-second") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rocksdb-ttl-second'\r\n"); + return; + } + std::unordered_map options_map{{"ttl", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set rocksdb-ttl-second wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetRocksdbTTLSecond(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-background-jobs") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-background-jobs'\r\n"); + return; + } + std::unordered_map options_map{{"max_background_jobs", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-background-jobs wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxBackgroudJobs(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "write-buffer-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'write-buffer-size'\r\n"); + return; + } + std::unordered_map options_map{{"write_buffer_size", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set write-buffer-size wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetWriteBufferSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-write-buffer-num") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-write-buffer-number'\r\n"); + return; + } + std::unordered_map options_map{{"max_write_buffer_number", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-write-buffer-number wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxWriteBufferNumber(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "min-write-buffer-number-to-merge") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'min-write-buffer-number-to-merge'\r\n"); + return; + } + std::unordered_map options_map{{"min_write_buffer_number_to_merge", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set min-write-buffer-number-to-merge wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMinWriteBufferNumberToMerge(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "level0-stop-writes-trigger") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'level0-stop-writes-trigger'\r\n"); + return; + } + std::unordered_map options_map{{"level0_stop_writes_trigger", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set level0-stop-writes-trigger wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetLevel0StopWritesTrigger(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "level0-slowdown-writes-trigger") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'level0-slowdown-writes-trigger'\r\n"); + return; + } + std::unordered_map options_map{{"level0_slowdown_writes_trigger", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set level0-slowdown-writes-trigger wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetLevel0SlowdownWritesTrigger(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + + } else if (set_item == "max-total-wal-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-total-wal-size'\r\n"); + return; + } + std::unordered_map options_map{{"max_total_wal_size", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kDB, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set max-total-wal-size: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetMaxTotalWalSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "level0-file-num-compaction-trigger") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'level0-file-num-compaction-trigger'\r\n"); + return; + } + std::unordered_map options_map{{"level0_file_num_compaction_trigger", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw("-ERR Set level0-file-num-compaction-trigger wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetLevel0FileNumCompactionTrigger(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "arena-block-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'arena-block-size'\r\n"); + return; + } + std::unordered_map options_map{{"arena_block_size", value}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + res_.AppendStringRaw( "-ERR Set arena-block-size wrong: " + s.ToString() + "\r\n"); + return; + } + g_pika_conf->SetArenaBlockSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "throttle-bytes-per-second") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'throttle-bytes-per-second'\r\n"); + return; + } + int32_t new_throughput_limit = static_cast(ival); + g_pika_conf->SetThrottleBytesPerSecond(new_throughput_limit); + //The rate limiter of rsync(Throttle) is used in singleton mode, all db shares the same rate limiter + rsync::Throttle::GetInstance().ResetThrottleThroughputBytes(new_throughput_limit); + LOG(INFO) << "The conf item [throttle-bytes-per-second] is changed by Config Set command. " + "The rsync rate limit now is " + << new_throughput_limit << "(Which Is Around " << (new_throughput_limit >> 20) << " MB/s)"; + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "rsync-timeout-ms") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival <= 0) { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'rsync-timeout-ms'\r\n"); + return; + } + g_pika_conf->SetRsyncTimeoutMs(ival); + LOG(INFO) << "The conf item [rsync-timeout-ms] is changed by Config Set command. " + "The rsync-timeout-ms now is " << ival << " ms"; + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-rsync-parallel-num") { + if ((pstd::string2int(value.data(), value.size(), &ival) == 0) || ival > kMaxRsyncParallelNum || ival <= 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-rsync-parallel-num'\r\n"); + return; + } + g_pika_conf->SetMaxRsyncParallelNum(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-num") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-num'\r\n"); + return; + } + + int cache_num = (ival <= 0 || ival > 48) ? 16 : ival; + if (cache_num != g_pika_conf->GetCacheNum()) { + g_pika_conf->SetCacheNum(cache_num); + g_pika_server->ResetCacheAsync(cache_num, db); + } + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-model") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw( "-ERR Invalid argument " + value + " for CONFIG SET 'cache-model'\r\n"); + return; + } + if (PIKA_CACHE_NONE > ival || PIKA_CACHE_READ < ival) { + res_.AppendStringRaw("-ERR Invalid cache model\r\n"); + } else { + g_pika_conf->SetCacheMode(ival); + if (PIKA_CACHE_NONE == ival) { + g_pika_server->ClearCacheDbAsync(db); + } + res_.AppendStringRaw("+OK\r\n"); + } + } else if (set_item == "cache-type") { + pstd::StringToLower(value); + std::set available_types = {"string", "set", "zset", "list", "hash", "bit"}; + std::string type_str = value; + std::vector types; + type_str.erase(remove_if(type_str.begin(), type_str.end(), ::isspace), type_str.end()); + pstd::StringSplit(type_str, COMMA, types); + for (auto& type : types) { + if (available_types.find(type) == available_types.end()) { + res_.AppendStringRaw("-ERR Invalid cache type: " + type + "\r\n"); + return; + } + } + g_pika_conf->SetCacheType(value); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "zset-cache-start-direction") { + if (!pstd::string2int(value.data(), value.size(), &ival)) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'zset-cache-start-direction'\r\n"); + return; + } + if (ival != CACHE_START_FROM_BEGIN && ival != CACHE_START_FROM_END) { + res_.AppendStringRaw("-ERR Invalid zset-cache-start-direction\r\n"); + return; + } + auto origin_start_pos = g_pika_conf->zset_cache_start_direction(); + if (origin_start_pos != ival) { + g_pika_conf->SetCacheStartDirection(ival); + g_pika_server->OnCacheStartPosChanged(ival, db); + } + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "zset-cache-field-num-per-key") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'zset-cache-field-num-per-key'\r\n"); + return; + } + g_pika_conf->SetCacheItemsPerKey(ival); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-maxmemory") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-maxmemory'\r\n"); + return; + } + int64_t cache_maxmemory = (PIKA_CACHE_SIZE_MIN > ival) ? PIKA_CACHE_SIZE_DEFAULT : ival; + g_pika_conf->SetCacheMaxmemory(cache_maxmemory); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-maxmemory-policy") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-maxmemory-policy'\r\n"); + return; + } + int cache_maxmemory_policy_ = (ival < 0|| ival > 5) ? 3 : ival; // default allkeys-lru + g_pika_conf->SetCacheMaxmemoryPolicy(cache_maxmemory_policy_); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-maxmemory-samples") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-maxmemory-samples'\r\n"); + return; + } + int cache_maxmemory_samples = (ival > 1) ? 5 : ival; + g_pika_conf->SetCacheMaxmemorySamples(cache_maxmemory_samples); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "cache-lfu-decay-time") { + if (!pstd::string2int(value.data(), value.size(), &ival) || ival < 0) { + res_.AppendStringRaw("-ERR Invalid argument " + value + " for CONFIG SET 'cache-lfu-decay-time'\r\n"); + return; + } + int cache_lfu_decay_time = (ival < 0) ? 1 : ival; + g_pika_conf->SetCacheLFUDecayTime(cache_lfu_decay_time); + g_pika_server->ResetCacheConfig(db); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "acl-pubsub-default") { + std::string v(value); + pstd::StringToLower(v); + if (v != "allchannels" && v != "resetchannels") { + res_.AppendStringRaw("-ERR Invalid argument \'" + value + "\' for CONFIG SET 'acl-pubsub-default'\r\n"); + return; + } + g_pika_conf->SetAclPubsubDefault(v); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "acllog-max-len") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival < 0) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'acllog-max-len'\r\n"); + return; + } + g_pika_conf->SetAclLogMaxLen(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else if (set_item == "max-conn-rbuf-size") { + if (pstd::string2int(value.data(), value.size(), &ival) == 0 || ival < PIKA_MAX_CONN_RBUF_LB || ival > PIKA_MAX_CONN_RBUF_HB * 2) { + res_.AppendStringRaw( "-ERR Invalid argument \'" + value + "\' for CONFIG SET 'max-conn-rbuf-size'\r\n"); + return; + } + g_pika_conf->SetMaxConnRbufSize(static_cast(ival)); + res_.AppendStringRaw("+OK\r\n"); + } else { + res_.AppendStringRaw("-ERR Unsupported CONFIG parameter: " + set_item + "\r\n"); + } +} + +void ConfigCmd::ConfigRewrite(std::string& ret) { + if (g_pika_conf->ConfigRewrite() != 0) { + ret = "+OK\r\n"; + } else { + ret = "-ERR Rewire CONFIG fail\r\n"; + } +} + +void ConfigCmd::ConfigRewriteReplicationID(std::string& ret) { + if (g_pika_conf->ConfigRewriteReplicationID() != 0) { + ret = "+OK\r\n"; + } else { + ret = "-ERR Rewire ReplicationID CONFIG fail\r\n"; + } +} + +void ConfigCmd::ConfigResetstat(std::string& ret) { + g_pika_server->ResetStat(); + ret = "+OK\r\n"; +} + +void MonitorCmd::DoInitial() { + if (argv_.size() != 1) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameMonitor); + return; + } +} + +void MonitorCmd::Do() { + std::shared_ptr conn_repl = GetConn(); + if (!conn_repl) { + res_.SetRes(CmdRes::kErrOther, kCmdNameMonitor); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + + g_pika_server->AddMonitorClient(std::dynamic_pointer_cast(conn_repl)); + res_.SetRes(CmdRes::kOk); +} + +void DbsizeCmd::DoInitial() { + if (argv_.size() != 1) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDbsize); + return; + } +} + +void DbsizeCmd::Do() { + std::shared_ptr dbs = g_pika_server->GetDB(db_name_); + if (!dbs) { + res_.SetRes(CmdRes::kInvalidDB); + } else { + if (g_pika_conf->slotmigrate()) { + int64_t dbsize = 0; + for (int i = 0; i < g_pika_conf->default_slot_num(); ++i) { + int32_t card = 0; + rocksdb::Status s = dbs->storage()->SCard(SlotKeyPrefix+std::to_string(i), &card); + if (s.ok() && card >= 0) { + dbsize += card; + } else { + res_.SetRes(CmdRes::kErrOther, "Get dbsize error"); + return; + } + } + res_.AppendInteger(dbsize); + } + KeyScanInfo key_scan_info = dbs->GetKeyScanInfo(); + std::vector key_infos = key_scan_info.key_infos; + if (key_infos.size() != (size_t)(storage::DataTypeNum)) { + res_.SetRes(CmdRes::kErrOther, "Mismatch in expected data types and actual key info count"); + return; + } + uint64_t dbsize = 0; + for (auto info : key_infos) { + dbsize += info.keys; + } + res_.AppendInteger(static_cast(dbsize)); + } +} + +void TimeCmd::DoInitial() { + if (argv_.size() != 1) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameTime); + return; + } +} + +void TimeCmd::Do() { + struct timeval tv; + if (gettimeofday(&tv, nullptr) == 0) { + res_.AppendArrayLen(2); + char buf[32]; + int32_t len = pstd::ll2string(buf, sizeof(buf), tv.tv_sec); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + len = pstd::ll2string(buf, sizeof(buf), tv.tv_usec); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } else { + res_.SetRes(CmdRes::kErrOther, strerror(errno)); + } +} + +void LastsaveCmd::DoInitial() { + if (argv_.size() != 1) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLastSave); + return; + } +} + +void LastsaveCmd::Do() { + res_.AppendInteger(g_pika_server->GetLastSave()); +} + +void DelbackupCmd::DoInitial() { + if (argv_.size() != 1) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDelbackup); + return; + } +} + +void DelbackupCmd::Do() { + std::string db_sync_prefix = g_pika_conf->bgsave_prefix(); + std::string db_sync_path = g_pika_conf->bgsave_path(); + std::vector dump_dir; + + // Dump file is not exist + if (!pstd::FileExists(db_sync_path)) { + res_.SetRes(CmdRes::kOk); + return; + } + // Directory traversal + if (pstd::GetChildren(db_sync_path, dump_dir) != 0) { + res_.SetRes(CmdRes::kOk); + return; + } + + int len = static_cast(dump_dir.size()); + for (auto& i : dump_dir) { + if (i.substr(0, db_sync_prefix.size()) != db_sync_prefix || i.size() != (db_sync_prefix.size() + 8)) { + continue; + } + + std::string str_date = i.substr(db_sync_prefix.size(), (i.size() - db_sync_prefix.size())); + char* end = nullptr; + std::strtol(str_date.c_str(), &end, 10); + if (*end != 0) { + continue; + } + + std::string dump_dir_name = db_sync_path + i + "/" + db_name_; + if (g_pika_server->CountSyncSlaves() == 0) { + LOG(INFO) << "Not syncing, delete dump file: " << dump_dir_name; + pstd::DeleteDirIfExist(dump_dir_name); + len--; + } else { + LOG(INFO) << "Syncing, can not delete " << dump_dir_name << " dump file" << std::endl; + } + } + res_.SetRes(CmdRes::kOk); +} + +void EchoCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameEcho); + return; + } + body_ = argv_[1]; +} + +void EchoCmd::Do() { res_.AppendString(body_); } + +void ScandbCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameEcho); + return; + } + if (argv_.size() == 1) { + type_ = storage::DataType::kAll; + } else { + if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else { + res_.SetRes(CmdRes::kInvalidDbType); + } + } +} + +void ScandbCmd::Do() { + std::shared_ptr dbs = g_pika_server->GetDB(db_name_); + if (!dbs) { + res_.SetRes(CmdRes::kInvalidDB); + } else { + dbs->ScanDatabase(type_); + res_.SetRes(CmdRes::kOk); + } +} + +void SlowlogCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlowlog); + return; + } + if (argv_.size() == 2 && (strcasecmp(argv_[1].data(), "reset") == 0)) { + condition_ = SlowlogCmd::kRESET; + } else if (argv_.size() == 2 && (strcasecmp(argv_[1].data(), "len") == 0)) { + condition_ = SlowlogCmd::kLEN; + } else if ((argv_.size() == 2 || argv_.size() == 3) && (strcasecmp(argv_[1].data(), "get") == 0)) { + condition_ = SlowlogCmd::kGET; + if (argv_.size() == 3 && (pstd::string2int(argv_[2].data(), argv_[2].size(), &number_) == 0)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kErrOther, "Unknown SLOWLOG subcommand or wrong # of args. Try GET, RESET, LEN."); + return; + } +} + +void SlowlogCmd::Do() { + if (condition_ == SlowlogCmd::kRESET) { + g_pika_server->SlowlogReset(); + res_.SetRes(CmdRes::kOk); + } else if (condition_ == SlowlogCmd::kLEN) { + res_.AppendInteger(g_pika_server->SlowlogLen()); + } else { + std::vector slowlogs; + g_pika_server->SlowlogObtain(number_, &slowlogs); + res_.AppendArrayLenUint64(slowlogs.size()); + for (const auto& slowlog : slowlogs) { + res_.AppendArrayLen(4); + res_.AppendInteger(slowlog.id); + res_.AppendInteger(slowlog.start_time); + res_.AppendInteger(slowlog.duration); + res_.AppendArrayLenUint64(slowlog.argv.size()); + for (const auto& arg : slowlog.argv) { + res_.AppendString(arg); + } + } + } +} + +void PaddingCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePadding); + return; + } +} + +void PaddingCmd::Do() { res_.SetRes(CmdRes::kOk); } + +std::string PaddingCmd::ToRedisProtocol() { + return PikaBinlogTransverter::ConstructPaddingBinlog( + BinlogType::TypeFirst, + argv_[1].size() + BINLOG_ITEM_HEADER_SIZE + PADDING_BINLOG_PROTOCOL_SIZE + SPACE_STROE_PARAMETER_LENGTH); +} + +void PKPatternMatchDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKPatternMatchDel); + return; + } + pattern_ = argv_[1]; + max_count_ = storage::BATCH_DELETE_LIMIT; + if (argv_.size() > 2) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &max_count_) == 0 || max_count_ < 1 || max_count_ > storage::BATCH_DELETE_LIMIT) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } +} + +void PKPatternMatchDelCmd::Do() { + int64_t count = 0; + rocksdb::Status s = db_->storage()->PKPatternMatchDelWithRemoveKeys(pattern_, &count, &remove_keys_, max_count_); + + if(s.ok()) { + res_.AppendInteger(count); + s_ = rocksdb::Status::OK(); + for (const auto& key : remove_keys_) { + RemSlotKey(key, db_); + } + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + if (count >= 0) { + s_ = rocksdb::Status::OK(); + for (const auto& key : remove_keys_) { + RemSlotKey(key, db_); + } + } + } +} + +void PKPatternMatchDelCmd::DoThroughDB() { + Do(); +} + +void PKPatternMatchDelCmd::DoUpdateCache() { + if(s_.ok()) { + db_->cache()->Del(remove_keys_); + } +} + +void PKPatternMatchDelCmd::DoBinlog() { + std::string opt = "del"; + for(auto& key: remove_keys_) { + argv_.clear(); + argv_.emplace_back(opt); + argv_.emplace_back(key); + Cmd::DoBinlog(); + } +} + +void DummyCmd::DoInitial() {} + +void DummyCmd::Do() {} + +void QuitCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameQuit); + } +} + +void QuitCmd::Do() { + res_.SetRes(CmdRes::kOk); + LOG(INFO) << "QutCmd will close connection " << GetConn()->String(); + GetConn()->SetClose(true); +} + +/* + * HELLO [ [AUTH ] [SETNAME ] ] + */ +void HelloCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHello); + return; + } +} + +void HelloCmd::Do() { + size_t next_arg = 1; + long ver = 0; + if (argv_.size() >= 2) { + if (pstd::string2int(argv_[next_arg].data(), argv_[next_arg].size(), &ver) == 0) { + res_.SetRes(CmdRes::kErrOther, "Protocol version is not an integer or out of range"); + return; + } + next_arg++; + + if (ver < 2 || ver > 3) { + res_.AppendContent("-NOPROTO unsupported protocol version"); + return; + } + } + + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNameHello); + return; + } + + for (; next_arg < argv_.size(); next_arg++) { + size_t more_args = argv_.size() - next_arg - 1; + const std::string opt = argv_[next_arg]; + if ((strcasecmp(opt.data(), "AUTH") == 0) && (more_args >= 2)) { + const std::string userName = argv_[next_arg + 1]; + const std::string pwd = argv_[next_arg + 2]; + bool defaultAuth = false; + if (userName == Acl::DefaultUser) { + defaultAuth = true; + } + auto authResult = AuthenticateUser(name(), userName, pwd, conn, defaultAuth); + switch (authResult) { + case AuthResult::INVALID_CONN: + res_.SetRes(CmdRes::kErrOther, kCmdNamePing); + return; + case AuthResult::INVALID_PASSWORD: + res_.AppendContent("-WRONGPASS invalid username-password pair or user is disabled."); + return; + case AuthResult::NO_REQUIRE_PASS: + res_.SetRes(CmdRes::kErrOther, "Client sent AUTH, but no password is set"); + default: + break; + } + next_arg += 2; + } else if ((strcasecmp(opt.data(), "SETNAME") == 0) && (more_args != 0U)) { + const std::string name = argv_[next_arg + 1]; + if (pstd::isspace(name)) { + res_.SetRes(CmdRes::kErrOther, "Client names cannot contain spaces, newlines or special characters."); + return; + } + conn->set_name(name); + next_arg++; + } else { + res_.SetRes(CmdRes::kErrOther, "Syntax error in HELLO option " + opt); + return; + } + } + + std::string raw; + std::vector fvs{ + {"server", "redis"}, + }; + // just for redis resp2 protocol + fvs.push_back({"proto", "2"}); + fvs.push_back({"mode", "classic"}); + int host_role = g_pika_server->role(); + switch (host_role) { + case PIKA_ROLE_SINGLE: + case PIKA_ROLE_MASTER: + fvs.push_back({"role", "master"}); + break; + case PIKA_ROLE_SLAVE: + fvs.push_back({"role", "slave"}); + break; + case PIKA_ROLE_MASTER | PIKA_ROLE_SLAVE: + fvs.push_back({"role", "master&&slave"}); + break; + default: + LOG(INFO) << "unknown role" << host_role << " client ip:port " << conn->ip_port(); + return; + } + + for (const auto& fv : fvs) { + RedisAppendLenUint64(raw, fv.field.size(), "$"); + RedisAppendContent(raw, fv.field); + if (fv.field == "proto") { + pstd::string2int(fv.value.data(), fv.value.size(), &ver); + RedisAppendLen(raw, static_cast(ver), ":"); + continue; + } + RedisAppendLenUint64(raw, fv.value.size(), "$"); + RedisAppendContent(raw, fv.value); + } + res_.AppendArrayLenUint64(fvs.size() * 2); + res_.AppendStringRaw(raw); +} + +void DiskRecoveryCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDiskRecovery); + return; + } +} + +void DiskRecoveryCmd::Do() { + struct statvfs disk_info; + int ret = statvfs(g_pika_conf->db_path().c_str(), &disk_info); + if (ret == -1) { + std::stringstream tmp_stream; + tmp_stream << "statvfs error:" << strerror(errno); + const std::string res = tmp_stream.str(); + res_.SetRes(CmdRes::kErrOther, res); + return; + } + int64_t least_free_size = g_pika_conf->least_resume_free_disk_size(); + uint64_t free_size = disk_info.f_bsize * disk_info.f_bfree; + if (free_size < least_free_size) { + res_.SetRes(CmdRes::kErrOther, "The available disk capacity is insufficient"); + return; + } + std::shared_mutex dbs_rw; + std::shared_lock db_rwl(dbs_rw); + // loop every db + for (const auto& db_item : g_pika_server->GetDB()) { + if (!db_item.second) { + continue; + } + db_item.second->SetBinlogIoErrorrelieve(); + background_errors_.clear(); + db_item.second->DBLockShared(); + db_item.second->storage()->GetUsage(storage::PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS, &background_errors_); + db_item.second->DBUnlockShared(); + for (const auto &item: background_errors_) { + if (item.second != 0) { + rocksdb::Status s = db_item.second->storage()->GetDBByIndex(item.first)->Resume(); + if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, "The restore operation failed."); + } + } + } + } + res_.SetRes(CmdRes::kOk, "The disk error has been recovered"); +} + +void ClearReplicationIDCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameClearReplicationID); + return; + } +} + +void ClearReplicationIDCmd::Do() { + g_pika_conf->SetReplicationID(""); + g_pika_conf->SetInternalUsedUnFinishedFullSync(""); + g_pika_conf->ConfigRewriteReplicationID(); + res_.SetRes(CmdRes::kOk, "ReplicationID is cleared"); +} + +void DisableWalCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDisableWal); + return; + } +} + +void DisableWalCmd::Do() { + std::string option = argv_[1].data(); + bool is_wal_disable = false; + if (option.compare("true") == 0) { + is_wal_disable = true; + } else if (option.compare("false") == 0) { + is_wal_disable = false; + } else { + res_.SetRes(CmdRes::kErrOther, "Invalid parameter"); + return; + } + db_->storage()->DisableWal(is_wal_disable); + res_.SetRes(CmdRes::kOk, "Wal options is changed"); +} + +void CacheCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameCache); + return; + } + if (!strcasecmp(argv_[1].data(), "clear")) { + if (argv_.size() == 3 && !strcasecmp(argv_[2].data(), "db")) { + condition_ = kCLEAR_DB; + } else if (argv_.size() == 3 && !strcasecmp(argv_[2].data(), "hitratio")) { + condition_ = kCLEAR_HITRATIO; + } else { + res_.SetRes(CmdRes::kErrOther, "Unknown cache subcommand or wrong # of args."); + } + } else if (argv_.size() >= 3 && !strcasecmp(argv_[1].data(), "del")) { + condition_ = kDEL_KEYS; + keys_.assign(argv_.begin() + 2, argv_.end()); + } else if (argv_.size() == 2 && !strcasecmp(argv_[1].data(), "randomkey")) { + condition_ = kRANDOM_KEY; + } else { + res_.SetRes(CmdRes::kErrOther, "Unknown cache subcommand or wrong # of args."); + } + return; +} + +void CacheCmd::Do() { + std::string key; + switch (condition_) { + case kCLEAR_DB: + g_pika_server->ClearCacheDbAsync(db_); + res_.SetRes(CmdRes::kOk); + break; + case kCLEAR_HITRATIO: + g_pika_server->ClearHitRatio(db_); + res_.SetRes(CmdRes::kOk); + break; + case kDEL_KEYS: + db_->cache()->Del(keys_); + res_.SetRes(CmdRes::kOk); + break; + case kRANDOM_KEY: + s_ = db_->cache()->RandomKey(&key); + if (!s_.ok()) { + res_.AppendStringLen(-1); + } else { + res_.AppendStringLen(key.size()); + res_.AppendContent(key); + } + break; + default: + res_.SetRes(CmdRes::kErrOther, "Unknown cmd"); + break; + } + return; +} + +void ClearCacheCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameClearCache); + return; + } +} + +void ClearCacheCmd::Do() { + // clean cache + if (PIKA_CACHE_NONE != g_pika_conf->cache_mode()) { + g_pika_server->ClearCacheDbAsync(db_); + } + res_.SetRes(CmdRes::kOk, "Cache is cleared"); +} + +#ifdef WITH_COMMAND_DOCS + +bool CommandCmd::CommandFieldCompare::operator()(const std::string& a, const std::string& b) const { + int av{0}; + int bv{0}; + if (auto avi = kFieldNameOrder.find(a); avi != kFieldNameOrder.end()) { + av = avi->second; + } + if (auto bvi = kFieldNameOrder.find(b); bvi != kFieldNameOrder.end()) { + bv = bvi->second; + } + return av < bv; +} + +CmdRes& CommandCmd::EncodableInt::EncodeTo(CmdRes& res) const { + res.AppendInteger(value_); + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableInt::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + return std::make_shared(value_ + pe->value_); + } + return std::make_shared(value_); +} + +CmdRes& CommandCmd::EncodableString::EncodeTo(CmdRes& res) const { + res.AppendString(value_); + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableString::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + return std::make_shared(value_ + pe->value_); + } + return std::make_shared(value_); +} + +template +CmdRes& CommandCmd::EncodableMap::EncodeTo(CmdRes& res, const Map& map, const Map& specialization) { + std::string raw_string; + RedisAppendLen(raw_string, map.size() * 2, kPrefix); + res.AppendStringRaw(raw_string); + for (const auto& kv : map) { + res.AppendString(kv.first); + if (auto iter = specialization.find(kv.first); iter != specialization.end()) { + res << *(*kv.second + iter->second); + } else { + res << *kv.second; + } + } + return res; +} + +CmdRes& CommandCmd::EncodableMap::EncodeTo(CmdRes& res) const { return EncodeTo(res, values_); } + +CommandCmd::EncodablePtr CommandCmd::EncodableMap::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + auto values = CommandCmd::EncodableMap::RedisMap(values_.cbegin(), values_.cend()); + for (const auto& pair : pe->values_) { + auto iter = values.find(pair.first); + if (iter == values.end()) { + values[pair.first] = pair.second; + } else { + iter->second = (*iter->second + pair.second); + } + } + return std::make_shared(values); + } + return std::make_shared( + CommandCmd::EncodableMap::RedisMap(values_.cbegin(), values_.cend())); +} + +CmdRes& CommandCmd::EncodableSet::EncodeTo(CmdRes& res) const { + std::string raw_string; + RedisAppendLen(raw_string, values_.size(), kPrefix); + res.AppendStringRaw(raw_string); + for (const auto& item : values_) { + res << *item; + } + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableSet::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + auto values = std::vector(values_.cbegin(), values_.cend()); + values.insert(values.end(), pe->values_.cbegin(), pe->values_.cend()); + return std::make_shared(values); + } + return std::make_shared( + std::vector(values_.cbegin(), values_.cend())); +} + +CmdRes& CommandCmd::EncodableArray::EncodeTo(CmdRes& res) const { + res.AppendArrayLen(values_.size()); + for (const auto& item : values_) { + res << *item; + } + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableArray::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + auto values = std::vector(values_.cbegin(), values_.cend()); + values.insert(values.end(), pe->values_.cbegin(), pe->values_.cend()); + return std::make_shared(values); + } + return std::make_shared( + std::vector(values_.cbegin(), values_.cend())); +} + +CmdRes& CommandCmd::EncodableStatus::EncodeTo(CmdRes& res) const { + res.AppendStringRaw(kPrefix + value_ + kNewLine); + return res; +} + +CommandCmd::EncodablePtr CommandCmd::EncodableStatus::MergeFrom(const CommandCmd::EncodablePtr& other) const { + if (auto pe = std::dynamic_pointer_cast(other)) { + return std::make_shared(value_ + pe->value_); + } + return std::make_shared(value_); +} + +const std::unordered_map CommandCmd::CommandFieldCompare::kFieldNameOrder{ + {kPikaField, 0}, {"name", 100}, {"type", 101}, + {"spec", 102}, {"index", 103}, {"display_text", 104}, + {"key_spec_index", 105}, {"token", 106}, {"summary", 107}, + {"since", 108}, {"group", 109}, {"complexity", 110}, + {"module", 111}, {"doc_flags", 112}, {"deprecated_since", 113}, + {"notes", 114}, {"flags", 15}, {"begin_search", 116}, + {"replaced_by", 17}, {"history", 18}, {"arguments", 119}, + {"subcommands", 120}, {"keyword", 121}, {"startfrom", 122}, + {"find_keys", 123}, {"lastkey", 124}, {"keynum", 125}, + {"keynumidx", 126}, {"firstkey", 127}, {"keystep", 128}, + {"limit", 129}, +}; +const std::string CommandCmd::EncodableMap::kPrefix = "*"; +const std::string CommandCmd::EncodableSet::kPrefix = "*"; +const std::string CommandCmd::EncodableStatus::kPrefix = "+"; + +void CommandCmd::DoInitial() { + if (!CheckArg(argv_.size())) { // The original redis command's arity is -1 + res_.SetRes(CmdRes::kWrongNum, kCmdNameEcho); + return; + } + if (argv_.size() < 2) { // But currently only docs subcommand is impled + res_.SetRes(CmdRes::kErrOther, "only docs subcommand supported"); + return; + } + if (command_ = argv_[1]; strcasecmp(command_.data(), "docs") != 0) { + res_.SetRes(CmdRes::kErrOther, "unknown command '" + command_ + "'"); + return; + } + cmds_begin_ = argv_.cbegin() + 2; + cmds_end_ = argv_.cend(); +} + +extern std::unique_ptr g_pika_cmd_table_manager; + +void CommandCmd::Do(std::shared_ptr dbs) { + std::unordered_map cmds; + std::unordered_map specializations; + if (cmds_begin_ == cmds_end_) { + cmds = kCommandDocs; + specializations.insert(kPikaSpecialization.cbegin(), kPikaSpecialization.cend()); + } else { + for (auto iter = cmds_begin_; iter != cmds_end_; ++iter) { + if (auto cmd = kCommandDocs.find(*iter); cmd != kCommandDocs.end()) { + cmds.insert(*cmd); + } + if (auto specialization = kPikaSpecialization.find(*iter); specialization != kPikaSpecialization.end()) { + specializations.insert(*specialization); + } + } + } + for (const auto& cmd : cmds) { + if (!g_pika_cmd_table_manager->CmdExist(cmd.first)) { + specializations[cmd.first] = kNotSupportedSpecialization; + } else if (auto iter = specializations.find(cmd.first); iter == specializations.end()) { + specializations[cmd.first] = kCompatibleSpecialization; + } + } + EncodableMap::EncodeTo(res_, cmds, specializations); +} + +#endif // WITH_COMMAND_DOCS diff --git a/tools/pika_migrate/src/pika_auxiliary_thread.cc b/tools/pika_migrate/src/pika_auxiliary_thread.cc new file mode 100644 index 0000000000..003a43c93b --- /dev/null +++ b/tools/pika_migrate/src/pika_auxiliary_thread.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_define.h" +#include "include/pika_auxiliary_thread.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +using namespace std::chrono_literals; + +PikaAuxiliaryThread::~PikaAuxiliaryThread() { + StopThread(); + LOG(INFO) << "PikaAuxiliary thread " << thread_id() << " exit!!!"; +} + +void* PikaAuxiliaryThread::ThreadMain() { + while (!should_stop()) { + if (g_pika_server->ShouldMetaSync()) { + g_pika_rm->SendMetaSyncRequest(); + } else if (g_pika_server->MetaSyncDone()) { + g_pika_rm->RunSyncSlaveDBStateMachine(); + } + + pstd::Status s = g_pika_rm->CheckSyncTimeout(pstd::NowMicros()); + if (!s.ok()) { + LOG(WARNING) << s.ToString(); + } + + g_pika_server->CheckLeaderProtectedMode(); + + // TODO(whoiami) timeout + s = g_pika_server->TriggerSendBinlogSync(); + if (!s.ok()) { + LOG(WARNING) << s.ToString(); + } + // send to peer + int res = g_pika_server->SendToPeer(); + if (res == 0) { + // sleep 100 ms + std::unique_lock lock(mu_); + cv_.wait_for(lock, 100ms); + } else { + // LOG_EVERY_N(INFO, 1000) << "Consume binlog number " << res; + } + } + return nullptr; +} diff --git a/tools/pika_migrate/src/pika_binlog.cc b/tools/pika_migrate/src/pika_binlog.cc new file mode 100644 index 0000000000..6f4ed2861d --- /dev/null +++ b/tools/pika_migrate/src/pika_binlog.cc @@ -0,0 +1,437 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_binlog.h" + +#include +#include +#include + +#include + +#include "include/pika_binlog_transverter.h" +#include "pstd/include/pstd_defer.h" +#include "pstd_status.h" + +using pstd::Status; + +std::string NewFileName(const std::string& name, const uint32_t current) { + char buf[256]; + snprintf(buf, sizeof(buf), "%s%u", name.c_str(), current); + return {buf}; +} + +/* + * Version + */ +Version::Version(const std::shared_ptr& save) : save_(save) { + assert(save_ != nullptr); +} + +Version::~Version() { StableSave(); } + +Status Version::StableSave() { + char* p = save_->GetData(); + memcpy(p, &pro_num_, sizeof(uint32_t)); + p += 4; + memcpy(p, &pro_offset_, sizeof(uint64_t)); + p += 8; + memcpy(p, &logic_id_, sizeof(uint64_t)); + p += 8; + memcpy(p, &term_, sizeof(uint32_t)); + return Status::OK(); +} + +Status Version::Init() { + Status s; + if (save_->GetData()) { + memcpy(reinterpret_cast(&pro_num_), save_->GetData(), sizeof(uint32_t)); + memcpy(reinterpret_cast(&pro_offset_), save_->GetData() + 4, sizeof(uint64_t)); + memcpy(reinterpret_cast(&logic_id_), save_->GetData() + 12, sizeof(uint64_t)); + memcpy(reinterpret_cast(&term_), save_->GetData() + 20, sizeof(uint32_t)); + return Status::OK(); + } else { + return Status::Corruption("version init error"); + } +} + +/* + * Binlog + */ +Binlog::Binlog(std::string binlog_path, const int file_size) + : opened_(false), + binlog_path_(std::move(binlog_path)), + file_size_(file_size), + binlog_io_error_(false) { + // To intergrate with old version, we don't set mmap file size to 100M; + // pstd::SetMmapBoundSize(file_size); + // pstd::kMmapBoundSize = 1024 * 1024 * 100; + + Status s; + + pstd::CreateDir(binlog_path_); + + filename_ = binlog_path_ + kBinlogPrefix; + const std::string manifest = binlog_path_ + kManifest; + std::string profile; + + if (!pstd::FileExists(manifest)) { + LOG(INFO) << "Binlog: Manifest file not exist, we create a new one."; + + profile = NewFileName(filename_, pro_num_); + s = pstd::NewWritableFile(profile, queue_); + if (!s.ok()) { + LOG(FATAL) << "Binlog: new " << filename_ << " " << s.ToString(); + } + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); + if (!s.ok()) { + LOG(FATAL) << "Binlog: new versionfile error " << s.ToString(); + } + + version_ = std::make_unique(versionfile_); + version_->StableSave(); + } else { + LOG(INFO) << "Binlog: Find the exist file."; + std::unique_ptr tmp_file; + s = pstd::NewRWFile(manifest, tmp_file); + versionfile_.reset(tmp_file.release()); + if (s.ok()) { + version_ = std::make_unique(versionfile_); + version_->Init(); + pro_num_ = version_->pro_num_; + + // Debug + // version_->debug(); + } else { + LOG(FATAL) << "Binlog: open versionfile error"; + } + + profile = NewFileName(filename_, pro_num_); + DLOG(INFO) << "Binlog: open profile " << profile; + s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); + if (!s.ok()) { + LOG(FATAL) << "Binlog: Open file " << profile << " error " << s.ToString(); + } + + uint64_t filesize = queue_->Filesize(); + DLOG(INFO) << "Binlog: filesize is " << filesize; + } + + InitLogFile(); +} + +Binlog::~Binlog() { + std::lock_guard l(mutex_); + Close(); +} + +void Binlog::Close() { + if (!opened_.load()) { + return; + } + opened_.store(false); +} + +void Binlog::InitLogFile() { + assert(queue_ != nullptr); + + uint64_t filesize = queue_->Filesize(); + block_offset_ = static_cast(filesize % kBlockSize); + + opened_.store(true); +} + +Status Binlog::IsOpened() { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + return Status::OK(); +} + +Status Binlog::GetProducerStatus(uint32_t* filenum, uint64_t* pro_offset, uint32_t* term, uint64_t* logic_id) { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + + std::shared_lock l(version_->rwlock_); + + *filenum = version_->pro_num_; + *pro_offset = version_->pro_offset_; + if (logic_id) { + *logic_id = version_->logic_id_; + } + if (term) { + *term = version_->term_; + } + + return Status::OK(); +} + +// Note: mutex lock should be held +Status Binlog::Put(const std::string& item) { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + uint32_t filenum = 0; + uint32_t term = 0; + uint64_t offset = 0; + uint64_t logic_id = 0; + + Lock(); + DEFER { + Unlock(); + }; + + Status s = GetProducerStatus(&filenum, &offset, &term, &logic_id); + if (!s.ok()) { + return s; + } + logic_id++; + std::string data = PikaBinlogTransverter::BinlogEncode(BinlogType::TypeFirst, + time(nullptr), term, logic_id, filenum, offset, item, {}); + + s = Put(data.c_str(), static_cast(data.size())); + if (!s.ok()) { + binlog_io_error_.store(true); + } + return s; +} + +// Note: mutex lock should be held +Status Binlog::Put(const char* item, int len) { + Status s; + + /* Check to roll log file */ + uint64_t filesize = queue_->Filesize(); + if (filesize > file_size_) { + std::unique_ptr queue; + std::string profile = NewFileName(filename_, pro_num_ + 1); + s = pstd::NewWritableFile(profile, queue); + if (!s.ok()) { + LOG(ERROR) << "Binlog: new " << filename_ << " " << s.ToString(); + return s; + } + queue_.reset(); + queue_ = std::move(queue); + pro_num_++; + + { + std::lock_guard l(version_->rwlock_); + version_->pro_offset_ = 0; + version_->pro_num_ = pro_num_; + version_->StableSave(); + } + InitLogFile(); + } + + int pro_offset; + s = Produce(pstd::Slice(item, len), &pro_offset); + if (s.ok()) { + std::lock_guard l(version_->rwlock_); + version_->pro_offset_ = pro_offset; + version_->logic_id_++; + version_->StableSave(); + } + + return s; +} + +Status Binlog::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n, int* temp_pro_offset) { + Status s; + assert(n <= 0xffffff); + assert(block_offset_ + kHeaderSize + n <= kBlockSize); + + char buf[kHeaderSize]; + + uint64_t now; + struct timeval tv; + gettimeofday(&tv, nullptr); + now = tv.tv_sec; + buf[0] = static_cast(n & 0xff); + buf[1] = static_cast((n & 0xff00) >> 8); + buf[2] = static_cast(n >> 16); + buf[3] = static_cast(now & 0xff); + buf[4] = static_cast((now & 0xff00) >> 8); + buf[5] = static_cast((now & 0xff0000) >> 16); + buf[6] = static_cast((now & 0xff000000) >> 24); + buf[7] = static_cast(t); + + s = queue_->Append(pstd::Slice(buf, kHeaderSize)); + if (s.ok()) { + s = queue_->Append(pstd::Slice(ptr, n)); + if (s.ok()) { + s = queue_->Flush(); + } + } + block_offset_ += static_cast(kHeaderSize + n); + + *temp_pro_offset += static_cast(kHeaderSize + n); + return s; +} + +Status Binlog::Produce(const pstd::Slice& item, int* temp_pro_offset) { + Status s; + const char* ptr = item.data(); + size_t left = item.size(); + bool begin = true; + + *temp_pro_offset = static_cast(version_->pro_offset_); + do { + const int leftover = static_cast(kBlockSize) - block_offset_; + assert(leftover >= 0); + if (static_cast(leftover) < kHeaderSize) { + if (leftover > 0) { + s = queue_->Append(pstd::Slice("\x00\x00\x00\x00\x00\x00\x00", leftover)); + if (!s.ok()) { + return s; + } + *temp_pro_offset += leftover; + } + block_offset_ = 0; + } + + const size_t avail = kBlockSize - block_offset_ - kHeaderSize; + const size_t fragment_length = (left < avail) ? left : avail; + RecordType type; + const bool end = (left == fragment_length); + if (begin && end) { + type = kFullType; + } else if (begin) { + type = kFirstType; + } else if (end) { + type = kLastType; + } else { + type = kMiddleType; + } + + s = EmitPhysicalRecord(type, ptr, fragment_length, temp_pro_offset); + ptr += fragment_length; + left -= fragment_length; + begin = false; + } while (s.ok() && left > 0); + + return s; +} + +Status Binlog::AppendPadding(pstd::WritableFile* file, uint64_t* len) { + if (*len < kHeaderSize) { + return Status::OK(); + } + + Status s; + char buf[kBlockSize]; + uint64_t now; + struct timeval tv; + gettimeofday(&tv, nullptr); + now = tv.tv_sec; + + uint64_t left = *len; + while (left > 0 && s.ok()) { + uint32_t size = (left >= kBlockSize) ? kBlockSize : left; + if (size < kHeaderSize) { + break; + } else { + uint32_t bsize = size - kHeaderSize; + std::string binlog(bsize, '*'); + buf[0] = static_cast(bsize & 0xff); + buf[1] = static_cast((bsize & 0xff00) >> 8); + buf[2] = static_cast(bsize >> 16); + buf[3] = static_cast(now & 0xff); + buf[4] = static_cast((now & 0xff00) >> 8); + buf[5] = static_cast((now & 0xff0000) >> 16); + buf[6] = static_cast((now & 0xff000000) >> 24); + // kBadRecord here + buf[7] = static_cast(kBadRecord); + s = file->Append(pstd::Slice(buf, kHeaderSize)); + if (s.ok()) { + s = file->Append(pstd::Slice(binlog.data(), binlog.size())); + if (s.ok()) { + s = file->Flush(); + left -= size; + } + } + } + } + *len -= left; + if (left != 0) { + LOG(WARNING) << "AppendPadding left bytes: " << left << " is less then kHeaderSize"; + } + return s; +} + +Status Binlog::SetProducerStatus(uint32_t pro_num, uint64_t pro_offset, uint32_t term, uint64_t index) { + if (!opened_.load()) { + return Status::Busy("Binlog is not open yet"); + } + + std::lock_guard l(mutex_); + + // offset smaller than the first header + if (pro_offset < 4) { + pro_offset = 0; + } + + queue_.reset(); + + std::string init_profile = NewFileName(filename_, 0); + if (pstd::FileExists(init_profile)) { + pstd::DeleteFile(init_profile); + } + + std::string profile = NewFileName(filename_, pro_num); + if (pstd::FileExists(profile)) { + pstd::DeleteFile(profile); + } + + pstd::NewWritableFile(profile, queue_); + Binlog::AppendPadding(queue_.get(), &pro_offset); + + pro_num_ = pro_num; + + { + std::lock_guard l(version_->rwlock_); + version_->pro_num_ = pro_num; + version_->pro_offset_ = pro_offset; + version_->term_ = term; + version_->logic_id_ = index; + version_->StableSave(); + } + + InitLogFile(); + return Status::OK(); +} + +Status Binlog::Truncate(uint32_t pro_num, uint64_t pro_offset, uint64_t index) { + queue_.reset(); + std::string profile = NewFileName(filename_, pro_num); + const int fd = open(profile.c_str(), O_RDWR | O_CLOEXEC, 0644); + if (fd < 0) { + return Status::IOError("fd open failed"); + } + if (ftruncate(fd, static_cast(pro_offset)) != 0) { + return Status::IOError("ftruncate failed"); + } + close(fd); + + pro_num_ = pro_num; + { + std::lock_guard l(version_->rwlock_); + version_->pro_num_ = pro_num; + version_->pro_offset_ = pro_offset; + version_->logic_id_ = index; + version_->StableSave(); + } + + Status s = pstd::AppendWritableFile(profile, queue_, version_->pro_offset_); + if (!s.ok()) { + return s; + } + + InitLogFile(); + + return Status::OK(); +} diff --git a/tools/pika_migrate/src/pika_binlog_reader.cc b/tools/pika_migrate/src/pika_binlog_reader.cc new file mode 100644 index 0000000000..b825d8864d --- /dev/null +++ b/tools/pika_migrate/src/pika_binlog_reader.cc @@ -0,0 +1,266 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_binlog_reader.h" + +#include + +using pstd::Status; + +PikaBinlogReader::PikaBinlogReader(uint32_t cur_filenum, uint64_t cur_offset) + : cur_filenum_(cur_filenum), + cur_offset_(cur_offset), + backing_store_(std::make_unique(kBlockSize)), + buffer_() { + last_record_offset_ = cur_offset % kBlockSize; +} + +PikaBinlogReader::PikaBinlogReader() : backing_store_(std::make_unique(kBlockSize)), buffer_() { + last_record_offset_ = 0 % kBlockSize; +} + +void PikaBinlogReader::GetReaderStatus(uint32_t* cur_filenum, uint64_t* cur_offset) { + std::shared_lock l(rwlock_); + *cur_filenum = cur_filenum_; + *cur_offset = cur_offset_; +} + +bool PikaBinlogReader::ReadToTheEnd() { + uint32_t pro_num; + uint64_t pro_offset; + logger_->GetProducerStatus(&pro_num, &pro_offset); + std::shared_lock l(rwlock_); + return (pro_num == cur_filenum_ && pro_offset == cur_offset_); +} + +int PikaBinlogReader::Seek(const std::shared_ptr& logger, uint32_t filenum, uint64_t offset) { + std::string confile = NewFileName(logger->filename(), filenum); + if (!pstd::FileExists(confile)) { + LOG(WARNING) << confile << " not exits"; + return -1; + } + std::unique_ptr readfile; + if (!pstd::NewSequentialFile(confile, readfile).ok()) { + LOG(WARNING) << "New swquential " << confile << " failed"; + return -1; + } + if (queue_) { + queue_.reset(); + } + queue_ = std::move(readfile); + logger_ = logger; + + std::lock_guard l(rwlock_); + cur_filenum_ = filenum; + cur_offset_ = offset; + last_record_offset_ = cur_filenum_ % kBlockSize; + + pstd::Status s; + uint64_t start_block = (cur_offset_ / kBlockSize) * kBlockSize; + s = queue_->Skip((cur_offset_ / kBlockSize) * kBlockSize); + uint64_t block_offset = cur_offset_ % kBlockSize; + uint64_t ret = 0; + uint64_t res = 0; + bool is_error = false; + + while (true) { + if (res >= block_offset) { + cur_offset_ = start_block + res; + break; + } + ret = 0; + is_error = GetNext(&ret); + if (is_error) { + return -1; + } + res += ret; + } + last_record_offset_ = cur_offset_ % kBlockSize; + return 0; +} + +bool PikaBinlogReader::GetNext(uint64_t* size) { + uint64_t offset = 0; + pstd::Status s; + bool is_error = false; + + while (true) { + buffer_.clear(); + s = queue_->Read(kHeaderSize, &buffer_, backing_store_.get()); + if (!s.ok()) { + is_error = true; + return is_error; + } + + const char* header = buffer_.data(); + const uint32_t a = static_cast(header[0]) & 0xff; + const uint32_t b = static_cast(header[1]) & 0xff; + const uint32_t c = static_cast(header[2]) & 0xff; + const unsigned int type = header[7]; + const uint32_t length = a | (b << 8) | (c << 16); + + if (length > (kBlockSize - kHeaderSize)) { + return true; + } + + if (type == kFullType) { + s = queue_->Read(length, &buffer_, backing_store_.get()); + offset += kHeaderSize + length; + break; + } else if (type == kFirstType) { + s = queue_->Read(length, &buffer_, backing_store_.get()); + offset += kHeaderSize + length; + } else if (type == kMiddleType) { + s = queue_->Read(length, &buffer_, backing_store_.get()); + offset += kHeaderSize + length; + } else if (type == kLastType) { + s = queue_->Read(length, &buffer_, backing_store_.get()); + offset += kHeaderSize + length; + break; + } else if (type == kBadRecord) { + s = queue_->Read(length, &buffer_, backing_store_.get()); + offset += kHeaderSize + length; + break; + } else { + is_error = true; + break; + } + } + *size = offset; + return is_error; +} + +unsigned int PikaBinlogReader::ReadPhysicalRecord(pstd::Slice* result, uint32_t* filenum, uint64_t* offset) { + pstd::Status s; + if (kBlockSize - last_record_offset_ <= kHeaderSize) { + queue_->Skip(kBlockSize - last_record_offset_); + std::lock_guard l(rwlock_); + cur_offset_ += (kBlockSize - last_record_offset_); + last_record_offset_ = 0; + } + buffer_.clear(); + s = queue_->Read(kHeaderSize, &buffer_, backing_store_.get()); + if (s.IsEndFile()) { + return kEof; + } else if (!s.ok()) { + return kBadRecord; + } + + const char* header = buffer_.data(); + const uint32_t a = static_cast(header[0]) & 0xff; + const uint32_t b = static_cast(header[1]) & 0xff; + const uint32_t c = static_cast(header[2]) & 0xff; + const unsigned int type = header[7]; + const uint32_t length = a | (b << 8) | (c << 16); + + if (length > (kBlockSize - kHeaderSize)) { + return kBadRecord; + } + + if (type == kZeroType || length == 0) { + buffer_.clear(); + return kOldRecord; + } + + buffer_.clear(); + s = queue_->Read(length, &buffer_, backing_store_.get()); + *result = pstd::Slice(buffer_.data(), buffer_.size()); + last_record_offset_ += kHeaderSize + length; + if (s.ok()) { + std::lock_guard l(rwlock_); + *filenum = cur_filenum_; + cur_offset_ += (kHeaderSize + length); + *offset = cur_offset_; + } + return type; +} + +Status PikaBinlogReader::Consume(std::string* scratch, uint32_t* filenum, uint64_t* offset) { + Status s; + + pstd::Slice fragment; + while (true) { + const unsigned int record_type = ReadPhysicalRecord(&fragment, filenum, offset); + + switch (record_type) { + case kFullType: + *scratch = std::string(fragment.data(), fragment.size()); + s = Status::OK(); + break; + case kFirstType: + scratch->assign(fragment.data(), fragment.size()); + s = Status::NotFound("Middle Status"); + break; + case kMiddleType: + scratch->append(fragment.data(), fragment.size()); + s = Status::NotFound("Middle Status"); + break; + case kLastType: + scratch->append(fragment.data(), fragment.size()); + s = Status::OK(); + break; + case kEof: + return Status::EndFile("Eof"); + case kBadRecord: + LOG(WARNING) + << "Read BadRecord record, will decode failed, this record may dbsync padded record, not processed here"; + return Status::IOError("Data Corruption"); + case kOldRecord: + return Status::EndFile("Eof"); + default: + return Status::IOError("Unknow reason"); + } + if (s.ok()) { + break; + } + } + // DLOG(INFO) << "Binlog Sender consumer a msg: " << scratch; + return Status::OK(); +} + +// Get a whole message; +// Append to scratch; +// the status will be OK, IOError or Corruption, EndFile; +Status PikaBinlogReader::Get(std::string* scratch, uint32_t* filenum, uint64_t* offset) { + if (!logger_ || !queue_) { + return Status::Corruption("Not seek"); + } + scratch->clear(); + Status s = Status::OK(); + + do { + if (ReadToTheEnd()) { + return Status::EndFile("End of cur log file"); + } + s = Consume(scratch, filenum, offset); + if (s.IsEndFile()) { + std::string confile = NewFileName(logger_->filename(), cur_filenum_ + 1); + + // sleep 10ms wait produce thread generate the new binlog + usleep(10000); + + // Roll to next file need retry; + if (pstd::FileExists(confile)) { + DLOG(INFO) << "BinlogSender roll to new binlog" << confile; + queue_.reset(); + queue_ = nullptr; + + pstd::NewSequentialFile(confile, queue_); + { + std::lock_guard l(rwlock_); + cur_filenum_++; + cur_offset_ = 0; + } + last_record_offset_ = 0; + } else { + return Status::IOError("File Does Not Exists"); + } + } else { + break; + } + } while (s.IsEndFile()); + + return Status::OK(); +} diff --git a/tools/pika_migrate/src/pika_binlog_transverter.cc b/tools/pika_migrate/src/pika_binlog_transverter.cc new file mode 100644 index 0000000000..a6f3d2b271 --- /dev/null +++ b/tools/pika_migrate/src/pika_binlog_transverter.cc @@ -0,0 +1,176 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_binlog_transverter.h" + +#include +#include +#include + +#include "pstd/include/pstd_coding.h" + +#include "include/pika_command.h" +#include "include/pika_define.h" +#include "storage/storage.h" + + +uint32_t BinlogItem::exec_time() const { return exec_time_; } + +uint32_t BinlogItem::term_id() const { return term_id_; } + +uint64_t BinlogItem::logic_id() const { return logic_id_; } + +uint32_t BinlogItem::filenum() const { return filenum_; } + +uint64_t BinlogItem::offset() const { return offset_; } + +std::string BinlogItem::content() const { return content_; } + +void BinlogItem::set_exec_time(uint32_t exec_time) { exec_time_ = exec_time; } + +void BinlogItem::set_term_id(uint32_t term_id) { term_id_ = term_id; } + +void BinlogItem::set_logic_id(uint64_t logic_id) { logic_id_ = logic_id; } + +void BinlogItem::set_filenum(uint32_t filenum) { filenum_ = filenum; } + +void BinlogItem::set_offset(uint64_t offset) { offset_ = offset; } + +std::string BinlogItem::ToString() const { + std::string str; + str.append("exec_time: " + std::to_string(exec_time_)); + str.append(",term_id: " + std::to_string(term_id_)); + str.append(",logic_id: " + std::to_string(logic_id_)); + str.append(",filenum: " + std::to_string(filenum_)); + str.append(",offset: " + std::to_string(offset_)); + str.append("\ncontent: "); + for (char idx : content_) { + if (idx == '\n') { + str.append("\\n"); + } else if (idx == '\r') { + str.append("\\r"); + } else { + str.append(1, idx); + } + } + str.append("\n"); + return str; +} + +std::string PikaBinlogTransverter::BinlogEncode(BinlogType type, uint32_t exec_time, uint32_t term_id, + uint64_t logic_id, uint32_t filenum, uint64_t offset, + const std::string& content, const std::vector& extends) { + std::string binlog; + pstd::PutFixed16(&binlog, type); + pstd::PutFixed32(&binlog, exec_time); + pstd::PutFixed32(&binlog, term_id); + pstd::PutFixed64(&binlog, logic_id); + pstd::PutFixed32(&binlog, filenum); + pstd::PutFixed64(&binlog, offset); + uint32_t content_length = content.size(); + pstd::PutFixed32(&binlog, content_length); + binlog.append(content); + return binlog; +} + +bool PikaBinlogTransverter::BinlogDecode(BinlogType type, const std::string& binlog, BinlogItem* binlog_item) { + uint16_t binlog_type = 0; + uint32_t content_length = 0; + pstd::Slice binlog_str = binlog; + pstd::GetFixed16(&binlog_str, &binlog_type); + if (binlog_type != type) { + LOG(ERROR) << "Binlog Item type error, expect type:" << type << " actualy type: " << binlog_type; + return false; + } + pstd::GetFixed32(&binlog_str, &binlog_item->exec_time_); + pstd::GetFixed32(&binlog_str, &binlog_item->term_id_); + pstd::GetFixed64(&binlog_str, &binlog_item->logic_id_); + pstd::GetFixed32(&binlog_str, &binlog_item->filenum_); + pstd::GetFixed64(&binlog_str, &binlog_item->offset_); + pstd::GetFixed32(&binlog_str, &content_length); + if (binlog_str.size() == content_length) { + binlog_item->content_.assign(binlog_str.data(), content_length); + } else { + LOG(ERROR) << "Binlog Item get content error, expect length:" << content_length + << " left length:" << binlog_str.size(); + return false; + } + return true; +} + +/* +******************* Type First Binlog Item Format ****************** + * +-----------------------------------------------------------------+ + * | Type (2 bytes) | Create Time (4 bytes) | Term Id (4 bytes) | + * |-----------------------------------------------------------------| + * | Logic Id (8 bytes) | File Num (4 bytes) | Offset (8 bytes) | + * |-----------------------------------------------------------------| + * | Content Length (4 bytes) | Content (content length bytes) | + * +-----------------------------------------------------------------+ + * |------------------------ 34 Bytes -------------------------------| + * + * content: *2\r\n$7\r\npadding\r\n$00001\r\n***\r\n + * length of *** -> total_len - PADDING_BINLOG_PROTOCOL_SIZE - SPACE_STROE_PARAMETER_LENGTH; + * + * We allocate five bytes to store the length of the parameter + */ +std::string PikaBinlogTransverter::ConstructPaddingBinlog(BinlogType type, uint32_t size) { + assert(size <= kBlockSize - kHeaderSize); + assert(BINLOG_ITEM_HEADER_SIZE + PADDING_BINLOG_PROTOCOL_SIZE + SPACE_STROE_PARAMETER_LENGTH <= size); + + std::string binlog; + pstd::PutFixed16(&binlog, type); + pstd::PutFixed32(&binlog, 0); + pstd::PutFixed32(&binlog, 0); + pstd::PutFixed64(&binlog, 0); + pstd::PutFixed32(&binlog, 0); + pstd::PutFixed64(&binlog, 0); + auto content_len = static_cast(size - BINLOG_ITEM_HEADER_SIZE); + int32_t parameter_len = content_len - PADDING_BINLOG_PROTOCOL_SIZE - SPACE_STROE_PARAMETER_LENGTH; + if (parameter_len < 0) { + return {}; + } + + std::string content; + RedisAppendLen(content, 2, "*"); + RedisAppendLen(content, 7, "$"); + RedisAppendContent(content, "padding"); + + std::string parameter_len_str; + std::ostringstream os; + os << parameter_len; + std::istringstream is(os.str()); + is >> parameter_len_str; + if (parameter_len_str.size() > SPACE_STROE_PARAMETER_LENGTH) { + return {}; + } + + content.append("$"); + content.append(SPACE_STROE_PARAMETER_LENGTH - parameter_len_str.size(), '0'); + content.append(parameter_len_str); + content.append(kNewLine); + RedisAppendContent(content, std::string(parameter_len, '*')); + + pstd::PutFixed32(&binlog, content_len); + binlog.append(content); + return binlog; +} + +bool PikaBinlogTransverter::BinlogItemWithoutContentDecode(BinlogType type, const std::string& binlog, + BinlogItem* binlog_item) { + uint16_t binlog_type = 0; + pstd::Slice binlog_str = binlog; + pstd::GetFixed16(&binlog_str, &binlog_type); + if (binlog_type != type) { + LOG(ERROR) << "Binlog Item type error, expect type:" << type << " actualy type: " << binlog_type; + return false; + } + pstd::GetFixed32(&binlog_str, &binlog_item->exec_time_); + pstd::GetFixed32(&binlog_str, &binlog_item->term_id_); + pstd::GetFixed64(&binlog_str, &binlog_item->logic_id_); + pstd::GetFixed32(&binlog_str, &binlog_item->filenum_); + pstd::GetFixed64(&binlog_str, &binlog_item->offset_); + return true; +} diff --git a/tools/pika_migrate/src/pika_bit.cc b/tools/pika_migrate/src/pika_bit.cc new file mode 100644 index 0000000000..ee48d0ba5f --- /dev/null +++ b/tools/pika_migrate/src/pika_bit.cc @@ -0,0 +1,355 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_bit.h" + +#include "pstd/include/pstd_string.h" +#include "include/pika_db.h" + + +#include "include/pika_define.h" +#include "include/pika_slot_command.h" +#include "include/pika_cache.h" +#include "pstd/include/pstd_string.h" +#include "include/pika_define.h" + +void BitSetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitSet); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &bit_offset_) == 0) { + res_.SetRes(CmdRes::kInvalidBitOffsetInt); + return; + } + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &on_) == 0) { + res_.SetRes(CmdRes::kInvalidBitInt); + return; + } + if (bit_offset_ < 0) { + res_.SetRes(CmdRes::kInvalidBitOffsetInt); + return; + } + // value no bigger than 2^18 + if ((bit_offset_ >> kMaxBitOpInputBit) > 0) { + res_.SetRes(CmdRes::kInvalidBitOffsetInt); + return; + } + if ((on_ & ~1) != 0) { + res_.SetRes(CmdRes::kInvalidBitInt); + return; + } +} + +void BitSetCmd::Do() { + std::string value; + int32_t bit_val = 0; + s_ = db_->storage()->SetBit(key_, bit_offset_, static_cast(on_), &bit_val); + if (s_.ok()) { + res_.AppendInteger(static_cast(bit_val)); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitSetCmd::DoThroughDB() { + Do(); +} + +void BitSetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SetBitIfKeyExist(key_, bit_offset_, on_); + } +} + + +void BitGetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitGet); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &bit_offset_) == 0) { + res_.SetRes(CmdRes::kInvalidBitOffsetInt); + return; + } + if (bit_offset_ < 0) { + res_.SetRes(CmdRes::kInvalidBitOffsetInt); + return; + } +} + +void BitGetCmd::Do() { + int32_t bit_val = 0; + s_ = db_->storage()->GetBit(key_, bit_offset_, &bit_val); + if (s_.ok()) { + res_.AppendInteger(static_cast(bit_val)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitGetCmd::ReadCache() { + int64_t bit_val = 0; + auto s = db_->cache()->GetBit(key_, bit_offset_, &bit_val); + if (s.ok()) { + res_.AppendInteger(bit_val); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void BitGetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void BitGetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_KV, key_, db_); + } +} + +void BitCountCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitCount); + return; + } + key_ = argv_[1]; + if (argv_.size() == 4) { + count_all_ = false; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &end_offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else if (argv_.size() == 2) { + count_all_ = true; + } else { + res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitCount); + } +} + +void BitCountCmd::Do() { + int32_t count = 0; + if (count_all_) { + s_ = db_->storage()->BitCount(key_, start_offset_, end_offset_, &count, false); + } else { + s_ = db_->storage()->BitCount(key_, start_offset_, end_offset_, &count, true); + } + + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitCountCmd::ReadCache() { + int64_t count = 0; + int64_t start = static_cast(start_offset_); + int64_t end = static_cast(end_offset_); + bool flag = true; + if (count_all_) { + flag = false; + } + rocksdb::Status s = db_->cache()->BitCount(key_, start, end, &count, flag); + + if (s.ok()) { + res_.AppendInteger(count); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void BitCountCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void BitCountCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_KV, key_, db_); + } +} + +void BitPosCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitPos); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &bit_val_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if ((bit_val_ & ~1) != 0) { + res_.SetRes(CmdRes::kInvalidBitPosArgument); + return; + } + if (argv_.size() == 3) { + pos_all_ = true; + endoffset_set_ = false; + } else if (argv_.size() == 4) { + pos_all_ = false; + endoffset_set_ = false; + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &start_offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else if (argv_.size() == 5) { + pos_all_ = false; + endoffset_set_ = true; + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &start_offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::string2int(argv_[4].data(), argv_[4].size(), &end_offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitPos); + } +} + +void BitPosCmd::Do() { + int64_t pos = 0; + rocksdb::Status s; + if (pos_all_) { + s_ = db_->storage()->BitPos(key_, static_cast(bit_val_), &pos); + } else if (!pos_all_ && !endoffset_set_) { + s_ = db_->storage()->BitPos(key_, static_cast(bit_val_), start_offset_, &pos); + } else if (!pos_all_ && endoffset_set_) { + s_ = db_->storage()->BitPos(key_, static_cast(bit_val_), start_offset_, end_offset_, &pos); + } + if (s_.ok()) { + res_.AppendInteger(static_cast(pos)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitPosCmd::ReadCache() { + int64_t pos = 0; + rocksdb::Status s; + int64_t bit = static_cast(bit_val_); + int64_t start = static_cast(start_offset_); + int64_t end = static_cast(end_offset_);\ + if (pos_all_) { + s = db_->cache()->BitPos(key_, bit, &pos); + } else if (!pos_all_ && !endoffset_set_) { + s = db_->cache()->BitPos(key_, bit, start, &pos); + } else if (!pos_all_ && endoffset_set_) { + s = db_->cache()->BitPos(key_, bit, start, end, &pos); + } + if (s.ok()) { + res_.AppendInteger(pos); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void BitPosCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void BitPosCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_KV, key_, db_); + } +} + +void BitOpCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); + return; + } + std::string op_str = argv_[1]; + if (strcasecmp(op_str.data(), "not") == 0) { + op_ = storage::kBitOpNot; + } else if (strcasecmp(op_str.data(), "and") == 0) { + op_ = storage::kBitOpAnd; + } else if (strcasecmp(op_str.data(), "or") == 0) { + op_ = storage::kBitOpOr; + } else if (strcasecmp(op_str.data(), "xor") == 0) { + op_ = storage::kBitOpXor; + } else { + res_.SetRes(CmdRes::kSyntaxErr, kCmdNameBitOp); + return; + } + if (op_ == storage::kBitOpNot && argv_.size() != 4) { + res_.SetRes(CmdRes::kWrongBitOpNotNum, kCmdNameBitOp); + return; + } else if (op_ != storage::kBitOpNot && argv_.size() < 4) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); + return; + } else if (argv_.size() >= kMaxBitOpInputKey) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBitOp); + return; + } + + dest_key_ = argv_[2]; + for (size_t i = 3; i <= argv_.size() - 1; i++) { + src_keys_.emplace_back(argv_[i].data()); + } +} + +void BitOpCmd::Do() { + int64_t result_length = 0; + s_ = db_->storage()->BitOp(op_, dest_key_, src_keys_, value_to_dest_, &result_length); + if (s_.ok()) { + res_.AppendInteger(result_length); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void BitOpCmd::DoThroughDB() { + Do(); +} + +void BitOpCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void BitOpCmd::DoBinlog() { + PikaCmdArgsType set_args; + //used "set" instead of "SET" to distinguish the binlog of SetCmd + set_args.emplace_back("set"); + set_args.emplace_back(dest_key_); + set_args.emplace_back(value_to_dest_); + set_cmd_->Initial(set_args, db_name_); + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + //value of this binlog might be strange if you print it out(eg. set bitkey_out1 «ѦFODoBinlog(); +} diff --git a/tools/pika_migrate/src/pika_cache.cc b/tools/pika_migrate/src/pika_cache.cc new file mode 100644 index 0000000000..b7d1f45eb1 --- /dev/null +++ b/tools/pika_migrate/src/pika_cache.cc @@ -0,0 +1,1628 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include + +#include "include/pika_cache.h" +#include "include/pika_cache_load_thread.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pika_codis_slot.h" +#include "cache/include/cache.h" +#include "cache/include/config.h" + +extern PikaServer* g_pika_server; +#define EXTEND_CACHE_SIZE(N) (N * 12 / 10) +using rocksdb::Status; + +PikaCache::PikaCache(int zset_cache_start_direction, int zset_cache_field_num_per_key) + : cache_status_(PIKA_CACHE_STATUS_NONE), + cache_num_(0), + zset_cache_start_direction_(zset_cache_start_direction), + zset_cache_field_num_per_key_(EXTEND_CACHE_SIZE(zset_cache_field_num_per_key)) { + cache_load_thread_ = std::make_unique (zset_cache_start_direction_, zset_cache_field_num_per_key_); + cache_load_thread_->StartThread(); +} + +PikaCache::~PikaCache() { + { + std::lock_guard l(rwlock_); + DestroyWithoutLock(); + } +} + +Status PikaCache::Init(uint32_t cache_num, cache::CacheConfig *cache_cfg) { + std::lock_guard l(rwlock_); + + if (nullptr == cache_cfg) { + return Status::Corruption("invalid arguments !!!"); + } + return InitWithoutLock(cache_num, cache_cfg); +} + +void PikaCache::ProcessCronTask(void) { + std::lock_guard l(rwlock_); + for (uint32_t i = 0; i < caches_.size(); ++i) { + std::unique_lock lm(*cache_mutexs_[i]); + caches_[i]->ActiveExpireCycle(); + } +} + +Status PikaCache::Reset(uint32_t cache_num, cache::CacheConfig *cache_cfg) { + std::lock_guard l(rwlock_); + + DestroyWithoutLock(); + return InitWithoutLock(cache_num, cache_cfg); +} + +void PikaCache::ResetConfig(cache::CacheConfig *cache_cfg) { + std::lock_guard l(rwlock_); + zset_cache_start_direction_ = cache_cfg->zset_cache_start_direction; + zset_cache_field_num_per_key_ = EXTEND_CACHE_SIZE(cache_cfg->zset_cache_field_num_per_key); + LOG(WARNING) << "zset-cache-start-direction: " << zset_cache_start_direction_ << ", zset_cache_field_num_per_key: " << zset_cache_field_num_per_key_; + cache::RedisCache::SetConfig(cache_cfg); +} + +void PikaCache::Destroy(void) { + std::lock_guard l(rwlock_); + DestroyWithoutLock(); +} + +void PikaCache::SetCacheStatus(int status) { cache_status_ = status; } + +int PikaCache::CacheStatus(void) { return cache_status_; } + +/*----------------------------------------------------------------------------- + * Normal Commands + *----------------------------------------------------------------------------*/ +void PikaCache::Info(CacheInfo &info) { + info.clear(); + std::unique_lock l(rwlock_); + info.status = cache_status_; + info.cache_num = cache_num_; + info.used_memory = cache::RedisCache::GetUsedMemory(); + info.async_load_keys_num = cache_load_thread_->AsyncLoadKeysNum(); + info.waitting_load_keys_num = cache_load_thread_->WaittingLoadKeysNum(); + cache::RedisCache::GetHitAndMissNum(&info.hits, &info.misses); + for (uint32_t i = 0; i < caches_.size(); ++i) { + std::lock_guard lm(*cache_mutexs_[i]); + info.keys_num += caches_[i]->DbSize(); + } +} + +bool PikaCache::Exists(std::string& key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Exists(key); +} + +void PikaCache::FlushCache(void) { + std::lock_guard l(rwlock_); + for (uint32_t i = 0; i < caches_.size(); ++i) { + std::lock_guard lm(*cache_mutexs_[i]); + caches_[i]->FlushCache(); + } +} + +Status PikaCache::Del(const std::vector &keys) { + rocksdb::Status s; + for (const auto &key : keys) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + s = caches_[cache_index]->Del(key); + } + return s; +} + +Status PikaCache::Expire(std::string& key, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Expire(key, ttl); +} + +Status PikaCache::Expireat(std::string& key, int64_t ttl_sec) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Expireat(key, ttl_sec); +} + +Status PikaCache::TTL(std::string& key, int64_t *ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->TTL(key, ttl); +} + +int64_t PikaCache::TTL(std::string &key) { + int64_t ret = 0; + int64_t timestamp = 0; + + int cache_index = CacheIndex(key); + Status s = caches_[cache_index]->TTL(key, ×tamp); + if (s.ok() || s.IsNotFound()) { + ret = timestamp; + } else if (!s.IsNotFound()) { + ret = -3; + } + return ret; +} + +Status PikaCache::Persist(std::string &key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Persist(key); +} + +Status PikaCache::Type(std::string& key, std::string *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Type(key, value); +} + +Status PikaCache::RandomKey(std::string *key) { + Status s; + srand((unsigned)time(nullptr)); + int cache_index = rand() % caches_.size(); + for (unsigned int i = 0; i < caches_.size(); ++i) { + cache_index = (cache_index + i) % caches_.size(); + + std::lock_guard lm(*cache_mutexs_[cache_index]); + s = caches_[cache_index]->RandomKey(key); + if (s.ok()) { + break; + } + } + return s; +} + +Status PikaCache::GetType(const std::string& key, bool single, std::vector& types) { + types.clear(); + + Status s; + std::string value; + int cache_indexk = CacheIndex(key); + s = caches_[cache_indexk]->Get(key, &value); + if (s.ok()) { + types.emplace_back("string"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t hashes_len = 0; + int cache_indexh = CacheIndex(key); + s = caches_[cache_indexh]->HLen(key, &hashes_len); + if (s.ok() && hashes_len != 0) { + types.emplace_back("hash"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t lists_len = 0; + int cache_indexl = CacheIndex(key); + s = caches_[cache_indexl]->LLen(key, &lists_len); + if (s.ok() && lists_len != 0) { + types.emplace_back("list"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t zsets_size = 0; + int cache_indexz = CacheIndex(key); + s = caches_[cache_indexz]->ZCard(key, &zsets_size); + if (s.ok() && zsets_size != 0) { + types.emplace_back("zset"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && !types.empty()) { + return s; + } + + uint64_t sets_size = 0; + int cache_indexs = CacheIndex(key); + s = caches_[cache_indexs]->SCard(key, &sets_size); + if (s.ok() && sets_size != 0) { + types.emplace_back("set"); + } else if (!s.IsNotFound()) { + return s; + } + if (single && types.empty()) { + types.emplace_back("none"); + } + return Status::OK(); +} + +/*----------------------------------------------------------------------------- + * String Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::Set(std::string& key, std::string &value, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Set(key, value, ttl); +} + +Status PikaCache::Setnx(std::string& key, std::string &value, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Setnx(key, value, ttl); +} + +Status PikaCache::SetnxWithoutTTL(std::string& key, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetnxWithoutTTL(key, value); +} + +Status PikaCache::Setxx(std::string& key, std::string &value, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Setxx(key, value, ttl); +} + +Status PikaCache::SetxxWithoutTTL(std::string& key, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetxxWithoutTTL(key, value); +} + +Status PikaCache::Get(std::string& key, std::string *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Get(key, value); +} + +Status PikaCache::MSet(const std::vector &kvs) { + for (const auto &item : kvs) { + auto [key, value] = item; + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetxxWithoutTTL(key, value); + } + return Status::OK(); +} + +Status PikaCache::MGet(const std::vector &keys, std::vector *vss) { + vss->resize(keys.size()); + rocksdb::Status ret; + for (int i = 0; i < keys.size(); ++i) { + int cache_index = CacheIndex(keys[i]); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto s = caches_[cache_index]->Get(keys[i], &(*vss)[i].value); + (*vss)[i].status = s; + if (!s.ok()) { + ret = s; + } + } + return ret; +} + +Status PikaCache::Incrxx(std::string& key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Incr(key); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Decrxx(std::string& key) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Decr(key); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::IncrByxx(std::string& key, uint64_t incr) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->IncrBy(key, incr); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::DecrByxx(std::string& key, uint64_t incr) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->DecrBy(key, incr); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Incrbyfloatxx(std::string& key, long double incr) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Incrbyfloat(key, incr); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Appendxx(std::string& key, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->Append(key, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::GetRange(std::string& key, int64_t start, int64_t end, std::string *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->GetRange(key, start, end, value); +} + +Status PikaCache::SetRangexx(std::string& key, int64_t start, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->SetRange(key, start, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::Strlen(std::string& key, int32_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->Strlen(key, len); +} + +/*----------------------------------------------------------------------------- + * Hash Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::HDel(std::string& key, std::vector &fields) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HDel(key, fields); +} + +Status PikaCache::HSet(std::string& key, std::string &field, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HSet(key, field, value); +} + +Status PikaCache::HSetIfKeyExist(std::string& key, std::string &field, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HSet(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HSetIfKeyExistAndFieldNotExist(std::string& key, std::string &field, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HSetnx(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HMSet(std::string& key, std::vector &fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HMSet(key, fvs); +} + +Status PikaCache::HMSetnx(std::string& key, std::vector &fvs, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->HMSet(key, fvs); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::HMSetnxWithoutTTL(std::string& key, std::vector &fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->HMSet(key, fvs); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::HMSetxx(std::string& key, std::vector &fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HMSet(key, fvs); + } else { + return Status::NotFound("key not exist"); + } +} + +Status PikaCache::HGet(std::string& key, std::string &field, std::string *value) { + + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HGet(key, field, value); +} + +Status PikaCache::HMGet(std::string& key, std::vector &fields, std::vector *vss) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HMGet(key, fields, vss); +} + +Status PikaCache::HGetall(std::string& key, std::vector *fvs) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HGetall(key, fvs); +} + +Status PikaCache::HKeys(std::string& key, std::vector *fields) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HKeys(key, fields); +} + +Status PikaCache::HVals(std::string& key, std::vector *values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HVals(key, values); +} + +Status PikaCache::HExists(std::string& key, std::string &field) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HExists(key, field); +} + +Status PikaCache::HIncrbyxx(std::string& key, std::string &field, int64_t value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HIncrby(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HIncrbyfloatxx(std::string& key, std::string &field, long double value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->HIncrbyfloat(key, field, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::HLen(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HLen(key, len); +} + +Status PikaCache::HStrlen(std::string& key, std::string &field, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->HStrlen(key, field, len); +} + +/*----------------------------------------------------------------------------- + * List Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::LIndex(std::string& key, int64_t index, std::string *element) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LIndex(key, index, element); +} + +Status PikaCache::LInsert(std::string& key, storage::BeforeOrAfter &before_or_after, std::string &pivot, + std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LInsert(key, before_or_after, pivot, value); +} + +Status PikaCache::LLen(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LLen(key, len); +} + +Status PikaCache::LPop(std::string& key, std::string *element) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LPop(key, element); +} + +Status PikaCache::LPush(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LPush(key, values); +} + +Status PikaCache::LPushx(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LPushx(key, values); +} + +Status PikaCache::LRange(std::string& key, int64_t start, int64_t stop, std::vector *values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LRange(key, start, stop, values); +} + +Status PikaCache::LRem(std::string& key, int64_t count, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LRem(key, count, value); +} + +Status PikaCache::LSet(std::string& key, int64_t index, std::string &value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LSet(key, index, value); +} + +Status PikaCache::LTrim(std::string& key, int64_t start, int64_t stop) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->LTrim(key, start, stop); +} + +Status PikaCache::RPop(std::string& key, std::string *element) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->RPop(key, element); +} + +Status PikaCache::RPush(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->RPush(key, values); +} + +Status PikaCache::RPushx(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->RPushx(key, values); +} + +Status PikaCache::RPushnx(std::string& key, std::vector &values, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->RPush(key, values); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::RPushnxWithoutTTL(std::string& key, std::vector &values) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->RPush(key, values); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +/*----------------------------------------------------------------------------- + * Set Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::SAdd(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SAdd(key, members); +} + +Status PikaCache::SAddIfKeyExist(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->SAdd(key, members); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::SAddnx(std::string& key, std::vector &members, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->SAdd(key, members); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::SAddnxWithoutTTL(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->SAdd(key, members); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::SCard(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SCard(key, len); +} + +Status PikaCache::SIsmember(std::string& key, std::string& member) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SIsmember(key, member); +} + +Status PikaCache::SMembers(std::string& key, std::vector *members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SMembers(key, members); +} + +Status PikaCache::SRem(std::string& key, std::vector &members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SRem(key, members); +} + +Status PikaCache::SRandmember(std::string& key, int64_t count, std::vector *members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SRandmember(key, count, members); +} + +/*----------------------------------------------------------------------------- + * ZSet Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::ZAdd(std::string& key, std::vector &score_members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZAdd(key, score_members); +} + +void PikaCache::GetMinMaxScore(std::vector &score_members, double &min, double &max) { + if (score_members.empty()) { + return; + } + min = max = score_members.front().score; + for (auto &item : score_members) { + if (item.score < min) { + min = item.score; + } + if (item.score > max) { + max = item.score; + } + } +} + +bool PikaCache::GetCacheMinMaxSM(cache::RedisCache *cache_obj, std::string& key, storage::ScoreMember &min_m, + storage::ScoreMember &max_m) { + if (cache_obj) { + std::vector score_members; + auto s = cache_obj->ZRange(key, 0, 0, &score_members); + if (!s.ok() || score_members.empty()) { + return false; + } + min_m = score_members.front(); + score_members.clear(); + + s = cache_obj->ZRange(key, -1, -1, &score_members); + if (!s.ok() || score_members.empty()) { + return false; + } + max_m = score_members.front(); + return true; + } + return false; +} + +Status PikaCache::ZAddIfKeyExist(std::string& key, std::vector &score_members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + Status s; + if (cache_obj->Exists(key)) { + std::unordered_set unique; + std::list filtered_score_members; + for (auto it = score_members.rbegin(); it != score_members.rend(); ++it) { + if (unique.find(it->member) == unique.end()) { + unique.insert(it->member); + filtered_score_members.push_front(*it); + } + } + std::vector new_score_members; + for (auto &item : filtered_score_members) { + new_score_members.push_back(std::move(item)); + } + + double min_score = storage::ZSET_SCORE_MIN; + double max_score = storage::ZSET_SCORE_MAX; + GetMinMaxScore(new_score_members, min_score, max_score); + + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (max_score < cache_max_score) { + cache_obj->ZAdd(key, new_score_members); + } else { + std::vector score_members_can_add; + std::vector members_need_remove; + bool left_close = false; + for (auto &item : new_score_members) { + if (item.score == cache_max_score) { + left_close = true; + score_members_can_add.push_back(item); + continue; + } + if (item.score < cache_max_score) { + score_members_can_add.push_back(item); + } else { + members_need_remove.push_back(item.member); + } + } + if (!score_members_can_add.empty()) { + cache_obj->ZAdd(key, score_members_can_add); + std::string cache_max_score_str = left_close ? "" : "(" + std::to_string(cache_max_score); + std::string max_str = "+inf"; + cache_obj->ZRemrangebyscore(key, cache_max_score_str, max_str); + } + if (!members_need_remove.empty()) { + cache_obj->ZRem(key, members_need_remove); + } + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (min_score > cache_min_score) { + cache_obj->ZAdd(key, new_score_members); + } else { + std::vector score_members_can_add; + std::vector members_need_remove; + bool right_close = false; + for (auto &item : new_score_members) { + if (item.score == cache_min_score) { + right_close = true; + score_members_can_add.push_back(item); + continue; + } + if (item.score > cache_min_score) { + score_members_can_add.push_back(item); + } else { + members_need_remove.push_back(item.member); + } + } + if (!score_members_can_add.empty()) { + cache_obj->ZAdd(key, score_members_can_add); + std::string cache_min_score_str = right_close ? "" : "(" + std::to_string(cache_min_score); + std::string min_str = "-inf"; + cache_obj->ZRemrangebyscore(key, min_str, cache_min_score_str); + } + if (!members_need_remove.empty()) { + cache_obj->ZRem(key, members_need_remove); + } + } + } + + return CleanCacheKeyIfNeeded(cache_obj, key); + } else { + return Status::NotFound("key not exist"); + } +} + +Status PikaCache::CleanCacheKeyIfNeeded(cache::RedisCache *cache_obj, std::string& key) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len > (unsigned long)zset_cache_field_num_per_key_) { + long start = 0; + long stop = 0; + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + start = -cache_len + zset_cache_field_num_per_key_; + stop = -1; + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + start = 0; + stop = cache_len - zset_cache_field_num_per_key_ - 1; + } + auto min = std::to_string(start); + auto max = std::to_string(stop); + cache_obj->ZRemrangebyrank(key, min, max); + } + return Status::OK(); +} + +Status PikaCache::ZAddnx(std::string& key, std::vector &score_members, int64_t ttl) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->ZAdd(key, score_members); + caches_[cache_index]->Expire(key, ttl); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::ZAddnxWithoutTTL(std::string& key, std::vector &score_members) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (!caches_[cache_index]->Exists(key)) { + caches_[cache_index]->ZAdd(key, score_members); + return Status::OK(); + } else { + return Status::NotFound("key exist"); + } +} + +Status PikaCache::ZCard(std::string& key, uint32_t *len, const std::shared_ptr& db) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + *len = db_len; + return Status::OK(); +} + +Status PikaCache::CacheZCard(std::string& key, uint64_t *len) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + return caches_[cache_index]->ZCard(key, len); +} + +RangeStatus PikaCache::CheckCacheRangeByScore(uint64_t cache_len, double cache_min, double cache_max, double min, + double max, bool left_close, bool right_close) { + bool cache_full = (cache_len == (unsigned long)zset_cache_field_num_per_key_); + + if (cache_full) { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + bool ret = (max < cache_max); + if (ret) { + if (max < cache_min) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + bool ret = min > cache_min; + if (ret) { + if (min > cache_max) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } else { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + bool ret = right_close ? max < cache_max : max <= cache_max; + if (ret) { + if (max < cache_min) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + bool ret = left_close ? min > cache_min : min >= cache_min; + if (ret) { + if (min > cache_max) { + return RangeStatus::RangeError; + } else { + return RangeStatus::RangeHit; + } + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } +} + +Status PikaCache::ZCount(std::string& key, std::string &min, std::string &max, uint64_t *len, ZCountCmd *cmd) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + + if (RangeStatus::RangeHit == CheckCacheRangeByScore(cache_len, cache_min_score, cache_max_score, cmd->MinScore(), + cmd->MaxScore(), cmd->LeftClose(), cmd->RightClose())) { + auto s = cache_obj->ZCount(key, min, max, len); + return s; + } else { + return Status::NotFound("key not in cache"); + } + } +} + +Status PikaCache::ZIncrby(std::string& key, std::string& member, double increment) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZIncrby(key, member, increment); +} + +bool PikaCache::ReloadCacheKeyIfNeeded(cache::RedisCache *cache_obj, std::string& key, int mem_len, int db_len, + const std::shared_ptr& db) { + if (mem_len == -1) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + mem_len = cache_len; + } + if (db_len == -1) { + db_len = 0; + db->storage()->ZCard(key, &db_len); + if (!db_len) { + return false; + } + } + if (db_len < zset_cache_field_num_per_key_) { + if (mem_len * 2 < db_len) { + cache_obj->Del(key); + PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key, db); + return true; + } else { + return false; + } + } else { + if (zset_cache_field_num_per_key_ && mem_len * 2 < zset_cache_field_num_per_key_) { + cache_obj->Del(key); + PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key, db); + return true; + } else { + return false; + } + } +} + +Status PikaCache::ZIncrbyIfKeyExist(std::string& key, std::string& member, double increment, ZIncrbyCmd *cmd, const std::shared_ptr& db) { + auto eps = std::numeric_limits::epsilon(); + if (-eps < increment && increment < eps) { + return Status::NotFound("icrement is 0, nothing to be done"); + } + if (!cmd->res().ok()) { + return Status::NotFound("key not exist"); + } + std::lock_guard l(rwlock_); + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + auto RemCacheRangebyscoreAndCheck = [this, cache_obj, &key, cache_len, db](double score) { + auto score_rm = std::to_string(score); + auto s = cache_obj->ZRemrangebyscore(key, score_rm, score_rm); + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, -1, db); + return s; + }; + auto RemCacheKeyMember = [this, cache_obj, &key, cache_len, db](const std::string& member, bool check = true) { + std::vector member_rm = {member}; + auto s = cache_obj->ZRem(key, member_rm); + if (check) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, -1, db); + } + return s; + }; + + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (cmd->Score() > cache_max_score) { + return RemCacheKeyMember(member); + } else if (cmd->Score() == cache_max_score) { + RemCacheKeyMember(member, false); + return RemCacheRangebyscoreAndCheck(cache_max_score); + } else { + std::vector score_member = {{cmd->Score(), member}}; + auto s = cache_obj->ZAdd(key, score_member); + CleanCacheKeyIfNeeded(cache_obj, key); + return s; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (cmd->Score() > cache_min_score) { + std::vector score_member = {{cmd->Score(), member}}; + auto s = cache_obj->ZAdd(key, score_member); + CleanCacheKeyIfNeeded(cache_obj, key); + return s; + } else if (cmd->Score() == cache_min_score) { + RemCacheKeyMember(member, false); + return RemCacheRangebyscoreAndCheck(cache_min_score); + } else { + std::vector member_rm = {member}; + return RemCacheKeyMember(member); + } + } + + return Status::NotFound("key not exist"); +} + +RangeStatus PikaCache::CheckCacheRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t &out_start, + int64_t &out_stop) { + out_start = start >= 0 ? start : db_len + start; + out_stop = stop >= 0 ? stop : db_len + stop; + out_start = out_start <= 0 ? 0 : out_start; + out_stop = out_stop >= db_len ? db_len - 1 : out_stop; + if (out_start > out_stop || out_start >= db_len || out_stop < 0) { + return RangeStatus::RangeError; + } else { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (out_start < cache_len && out_stop < cache_len) { + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (out_start >= db_len - cache_len && out_stop >= db_len - cache_len) { + out_start = out_start - (db_len - cache_len); + out_stop = out_stop - (db_len - cache_len); + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } +} + +RangeStatus PikaCache::CheckCacheRevRange(int32_t cache_len, int32_t db_len, int64_t start, int64_t stop, int64_t &out_start, + int64_t &out_stop) { + int64_t start_index = stop >= 0 ? db_len - stop - 1 : -stop - 1; + int64_t stop_index = start >= 0 ? db_len - start - 1 : -start - 1; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= db_len ? db_len - 1 : stop_index; + if (start_index > stop_index || start_index >= db_len || stop_index < 0) { + return RangeStatus::RangeError; + } else { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (start_index < cache_len && stop_index < cache_len) { + // cache reverse index + out_start = cache_len - stop_index - 1; + out_stop = cache_len - start_index - 1; + + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (start_index >= db_len - cache_len && stop_index >= db_len - cache_len) { + int cache_start = start_index - (db_len - cache_len); + int cache_stop = stop_index - (db_len - cache_len); + out_start = cache_len - cache_stop - 1; + out_stop = cache_len - cache_start - 1; + return RangeStatus::RangeHit; + } else { + return RangeStatus::RangeMiss; + } + } else { + return RangeStatus::RangeError; + } + } +} + +Status PikaCache::ZRange(std::string& key, int64_t start, int64_t stop, std::vector *score_members, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + auto db_obj = db->storage(); + Status s; + if (cache_obj->Exists(key)) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + int32_t db_len = 0; + db_obj->ZCard(key, &db_len); + int64_t out_start = 0; + int64_t out_stop = 0; + RangeStatus rs = CheckCacheRange(cache_len, db_len, start, stop, out_start, out_stop); + if (rs == RangeStatus::RangeHit) { + return cache_obj->ZRange(key, out_start, out_stop, score_members); + } else if (rs == RangeStatus::RangeMiss) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len, db); + return Status::NotFound("key not in cache"); + } else if (rs == RangeStatus::RangeError) { + return Status::NotFound("error range"); + } else { + return Status::Corruption("unknown error"); + } + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRangebyscore(std::string& key, std::string &min, std::string &max, + std::vector *score_members, ZRangebyscoreCmd *cmd) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + + if (RangeStatus::RangeHit == CheckCacheRangeByScore(cache_len, cache_min_sm.score, cache_max_sm.score, + cmd->MinScore(), cmd->MaxScore(), cmd->LeftClose(), + cmd->RightClose())) { + return cache_obj->ZRangebyscore(key, min, max, score_members, cmd->Offset(), cmd->Count()); + } else { + return Status::NotFound("key not in cache"); + } + } +} + +Status PikaCache::ZRank(std::string& key, std::string& member, int64_t *rank, const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + auto s = cache_obj->ZRank(key, member, rank); + if (s.ok()) { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + *rank = db_len - cache_len + *rank; + } + return s; + } else { + return Status::NotFound("key not in cache"); + } + } +} + +Status PikaCache::ZRem(std::string& key, std::vector &members, std::shared_ptr db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto s = caches_[cache_index]->ZRem(key, members); + ReloadCacheKeyIfNeeded(caches_[cache_index], key, -1, -1, db); + return s; +} + +Status PikaCache::ZRemrangebyrank(std::string& key, std::string &min, std::string &max, int32_t ele_deleted, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + auto db_obj = db->storage(); + int32_t db_len = 0; + db_obj->ZCard(key, &db_len); + db_len += ele_deleted; + auto start = std::stol(min); + auto stop = std::stol(max); + + int32_t start_index = start >= 0 ? start : db_len + start; + int32_t stop_index = stop >= 0 ? stop : db_len + stop; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= db_len ? db_len - 1 : stop_index; + if (start_index > stop_index) { + return Status::NotFound("error range"); + } + + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if ((uint32_t)start_index <= cache_len) { + auto cache_min_str = std::to_string(start_index); + auto cache_max_str = std::to_string(stop_index); + auto s = cache_obj->ZRemrangebyrank(key, cache_min_str, cache_max_str); + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len - ele_deleted, db); + return s; + } else { + return Status::NotFound("error range"); + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if ((uint32_t)stop_index >= db_len - cache_len) { + int32_t cache_min = start_index - (db_len - cache_len); + int32_t cache_max = stop_index - (db_len - cache_len); + cache_min = cache_min <= 0 ? 0 : cache_min; + cache_max = cache_max >= (int32_t)cache_len ? cache_len - 1 : cache_max; + + auto cache_min_str = std::to_string(cache_min); + auto cache_max_str = std::to_string(cache_max); + auto s = cache_obj->ZRemrangebyrank(key, cache_min_str, cache_max_str); + + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len - ele_deleted, db); + return s; + } else { + return Status::NotFound("error range"); + } + } else { + return Status::NotFound("error range"); + } + } +} + +Status PikaCache::ZRemrangebyscore(std::string& key, std::string &min, std::string &max, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto s = caches_[cache_index]->ZRemrangebyscore(key, min, max); + ReloadCacheKeyIfNeeded(caches_[cache_index], key, -1, -1, db); + return s; +} + +Status PikaCache::ZRevrange(std::string& key, int64_t start, int64_t stop, std::vector *score_members, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + auto db_obj = db->storage(); + Status s; + if (cache_obj->Exists(key)) { + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + int32_t db_len = 0; + db_obj->ZCard(key, &db_len); + int64_t out_start = 0; + int64_t out_stop = 0; + RangeStatus rs = CheckCacheRevRange(cache_len, db_len, start, stop, out_start, out_stop); + if (rs == RangeStatus::RangeHit) { + return cache_obj->ZRevrange(key, out_start, out_stop, score_members); + } else if (rs == RangeStatus::RangeMiss) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, db_len, db); + return Status::NotFound("key not in cache"); + } else if (rs == RangeStatus::RangeError) { + return Status::NotFound("error revrange"); + } else { + return Status::Corruption("unknown error"); + } + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRevrangebyscore(std::string& key, std::string &min, std::string &max, + std::vector *score_members, ZRevrangebyscoreCmd *cmd, + const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + storage::ScoreMember cache_min_sm; + storage::ScoreMember cache_max_sm; + if (!GetCacheMinMaxSM(cache_obj, key, cache_min_sm, cache_max_sm)) { + return Status::NotFound("key not exist"); + } + auto cache_min_score = cache_min_sm.score; + auto cache_max_score = cache_max_sm.score; + + auto rs = CheckCacheRangeByScore(cache_len, cache_min_score, cache_max_score, cmd->MinScore(), cmd->MaxScore(), + cmd->LeftClose(), cmd->RightClose()); + if (RangeStatus::RangeHit == rs) { + return cache_obj->ZRevrangebyscore(key, min, max, score_members, cmd->Offset(), cmd->Count()); + } else if (RangeStatus::RangeMiss == rs) { + ReloadCacheKeyIfNeeded(cache_obj, key, cache_len, -1, db); + return Status::NotFound("score range miss"); + } else { + return Status::NotFound("score range error"); + } + } +} + +bool PikaCache::CacheSizeEqsDB(std::string& key, const std::shared_ptr& db) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + + std::lock_guard l(rwlock_); + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + uint64_t cache_len = 0; + caches_[cache_index]->ZCard(key, &cache_len); + return (db_len == (int32_t)cache_len) && cache_len; +} + +Status PikaCache::ZRevrangebylex(std::string& key, std::string &min, std::string &max, + std::vector *members, const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZRevrangebylex(key, min, max, members); + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRevrank(std::string& key, std::string& member, int64_t *rank, const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto cache_obj = caches_[cache_index]; + uint64_t cache_len = 0; + cache_obj->ZCard(key, &cache_len); + if (cache_len <= 0) { + return Status::NotFound("key not in cache"); + } else { + auto s = cache_obj->ZRevrank(key, member, rank); + if (s.ok()) { + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + int32_t db_len = 0; + db->storage()->ZCard(key, &db_len); + *rank = db_len - cache_len + *rank; + } + return s; + } else { + return Status::NotFound("member not in cache"); + } + } +} +Status PikaCache::ZScore(std::string& key, std::string& member, double *score, const std::shared_ptr& db) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + auto s = caches_[cache_index]->ZScore(key, member, score); + if (!s.ok()) { + return Status::NotFound("key or member not in cache"); + } + return s; +} + +Status PikaCache::ZRangebylex(std::string& key, std::string &min, std::string &max, std::vector *members, + const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->ZRangebylex(key, min, max, members); + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZLexcount(std::string& key, std::string &min, std::string &max, uint64_t *len, + const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + return caches_[cache_index]->ZLexcount(key, min, max, len); + } else { + return Status::NotFound("key not in cache"); + } +} + +Status PikaCache::ZRemrangebylex(std::string& key, std::string &min, std::string &max, + const std::shared_ptr& db) { + if (CacheSizeEqsDB(key, db)) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + + return caches_[cache_index]->ZRemrangebylex(key, min, max); + } else { + return Status::NotFound("key not in cache"); + } +} + +/*----------------------------------------------------------------------------- + * Bit Commands + *----------------------------------------------------------------------------*/ +Status PikaCache::SetBit(std::string& key, size_t offset, int64_t value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->SetBit(key, offset, value); +} + +Status PikaCache::SetBitIfKeyExist(std::string& key, size_t offset, int64_t value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + if (caches_[cache_index]->Exists(key)) { + return caches_[cache_index]->SetBit(key, offset, value); + } + return Status::NotFound("key not exist"); +} + +Status PikaCache::GetBit(std::string& key, size_t offset, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->GetBit(key, offset, value); +} + +Status PikaCache::BitCount(std::string& key, int64_t start, int64_t end, int64_t *value, bool have_offset) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitCount(key, start, end, value, have_offset); +} + +Status PikaCache::BitPos(std::string& key, int64_t bit, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitPos(key, bit, value); +} + +Status PikaCache::BitPos(std::string& key, int64_t bit, int64_t start, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitPos(key, bit, start, value); +} + +Status PikaCache::BitPos(std::string& key, int64_t bit, int64_t start, int64_t end, int64_t *value) { + int cache_index = CacheIndex(key); + std::lock_guard lm(*cache_mutexs_[cache_index]); + return caches_[cache_index]->BitPos(key, bit, start, end, value); +} + +Status PikaCache::InitWithoutLock(uint32_t cache_num, cache::CacheConfig *cache_cfg) { + cache_status_ = PIKA_CACHE_STATUS_INIT; + + cache_num_ = cache_num; + if (cache_cfg != nullptr) { + cache::RedisCache::SetConfig(cache_cfg); + } + + for (uint32_t i = 0; i < cache_num; ++i) { + auto *cache = new cache::RedisCache(); + rocksdb::Status s = cache->Open(); + if (!s.ok()) { + LOG(ERROR) << "PikaCache::InitWithoutLock Open cache failed"; + DestroyWithoutLock(); + cache_status_ = PIKA_CACHE_STATUS_NONE; + return Status::Corruption("create redis cache failed"); + } + caches_.push_back(cache); + cache_mutexs_.push_back(std::make_shared()); + } + cache_status_ = PIKA_CACHE_STATUS_OK; + return Status::OK(); +} + +void PikaCache::DestroyWithoutLock(void) +{ + cache_status_ = PIKA_CACHE_STATUS_DESTROY; + + for (auto iter = caches_.begin(); iter != caches_.end(); ++iter) { + delete *iter; + } + caches_.clear(); + cache_mutexs_.clear(); +} + +int PikaCache::CacheIndex(const std::string& key) { + auto crc = crc32(0L, (const Bytef*)key.data(), (int)key.size()); + return (int)(crc % caches_.size()); +} + +Status PikaCache::WriteKVToCache(std::string& key, std::string &value, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return SetnxWithoutTTL(key, value); + } else { + return Del({key}); + } + } else { + return Setnx(key, value, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteHashToCache(std::string& key, std::vector &fvs, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return HMSetnxWithoutTTL(key, fvs); + } else { + return Del({key}); + } + } else { + return HMSetnx(key, fvs, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteListToCache(std::string& key, std::vector &values, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return RPushnxWithoutTTL(key, values); + } else { + return Del({key}); + } + } else { + return RPushnx(key, values, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteSetToCache(std::string& key, std::vector &members, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return SAddnxWithoutTTL(key, members); + } else { + return Del({key}); + } + } else { + return SAddnx(key, members, ttl); + } + return Status::OK(); +} + +Status PikaCache::WriteZSetToCache(std::string& key, std::vector &score_members, int64_t ttl) { + if (0 >= ttl) { + if (PIKA_TTL_NONE == ttl) { + return ZAddnxWithoutTTL(key, score_members); + } else { + return Del({key}); + } + } else { + return ZAddnx(key, score_members, ttl); + } + return Status::OK(); +} + +void PikaCache::PushKeyToAsyncLoadQueue(const char key_type, std::string& key, const std::shared_ptr& db) { + cache_load_thread_->Push(key_type, key, db); +} + +void PikaCache::ClearHitRatio(void) { + std::unique_lock l(rwlock_); + cache::RedisCache::ResetHitAndMissNum(); +} diff --git a/tools/pika_migrate/src/pika_cache_load_thread.cc b/tools/pika_migrate/src/pika_cache_load_thread.cc new file mode 100644 index 0000000000..f9bb040a40 --- /dev/null +++ b/tools/pika_migrate/src/pika_cache_load_thread.cc @@ -0,0 +1,214 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#include + +#include "include/pika_cache_load_thread.h" +#include "include/pika_server.h" +#include "include/pika_cache.h" +#include "pstd/include/scope_record_lock.h" + +extern PikaServer* g_pika_server; + +PikaCacheLoadThread::PikaCacheLoadThread(int zset_cache_start_direction, int zset_cache_field_num_per_key) + : should_exit_(false) + , loadkeys_cond_() + , async_load_keys_num_(0) + , waitting_load_keys_num_(0) + , zset_cache_start_direction_(zset_cache_start_direction) + , zset_cache_field_num_per_key_(zset_cache_field_num_per_key) +{ + set_thread_name("PikaCacheLoadThread"); +} + +PikaCacheLoadThread::~PikaCacheLoadThread() { + { + std::lock_guard lq(loadkeys_mutex_); + should_exit_ = true; + loadkeys_cond_.notify_all(); + } + + StopThread(); +} + +void PikaCacheLoadThread::Push(const char key_type, std::string& key, const std::shared_ptr& db) { + std::unique_lock lq(loadkeys_mutex_); + std::unique_lock lm(loadkeys_map_mutex_); + if (CACHE_LOAD_QUEUE_MAX_SIZE < loadkeys_queue_.size()) { + // 5s to print logs once + static uint64_t last_log_time_us = 0; + if (pstd::NowMicros() - last_log_time_us > 5000000) { + LOG(WARNING) << "PikaCacheLoadThread::Push waiting..."; + last_log_time_us = pstd::NowMicros(); + } + return; + } + + if (loadkeys_map_.find(key) == loadkeys_map_.end()) { + std::tuple> ktuple = std::make_tuple(key_type, key, db); + loadkeys_queue_.push_back(ktuple); + loadkeys_map_[key] = std::string(""); + loadkeys_cond_.notify_all(); + } +} + +bool PikaCacheLoadThread::LoadKV(std::string& key, const std::shared_ptr& db) { + std::string value; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->GetWithTTL(key, &value, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load kv failed, key=" << key; + return false; + } + db->cache()->WriteKVToCache(key, value, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadHash(std::string& key, const std::shared_ptr& db) { + int32_t len = 0; + db->storage()->HLen(key, &len); + if (0 >= len || CACHE_VALUE_ITEM_MAX_SIZE < len) { + return false; + } + + std::vector fvs; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->HGetallWithTTL(key, &fvs, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load hash failed, key=" << key; + return false; + } + db->cache()->WriteHashToCache(key, fvs, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadList(std::string& key, const std::shared_ptr& db) { + uint64_t len = 0; + db->storage()->LLen(key, &len); + if (len <= 0 || CACHE_VALUE_ITEM_MAX_SIZE < len) { + LOG(WARNING) << "can not load key, because item size:" << len + << " beyond max item size:" << CACHE_VALUE_ITEM_MAX_SIZE; + return false; + } + + std::vector values; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->LRangeWithTTL(key, 0, -1, &values, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load list failed, key=" << key; + return false; + } + db->cache()->WriteListToCache(key, values, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadSet(std::string& key, const std::shared_ptr& db) { + int32_t len = 0; + db->storage()->SCard(key, &len); + if (0 >= len || CACHE_VALUE_ITEM_MAX_SIZE < len) { + LOG(WARNING) << "can not load key, because item size:" << len + << " beyond max item size:" << CACHE_VALUE_ITEM_MAX_SIZE; + return false; + } + + std::vector values; + int64_t ttl_millsec = -1; + rocksdb::Status s = db->storage()->SMembersWithTTL(key, &values, &ttl_millsec); + if (!s.ok()) { + LOG(WARNING) << "load set failed, key=" << key; + return false; + } + db->cache()->WriteSetToCache(key, values, ttl_millsec > 0 ? ttl_millsec / 1000 : ttl_millsec); + return true; +} + +bool PikaCacheLoadThread::LoadZset(std::string& key, const std::shared_ptr& db) { + int32_t len = 0; + int start_index = 0; + int stop_index = -1; + db->storage()->ZCard(key, &len); + if (0 >= len) { + return false; + } + + uint64_t cache_len = 0; + db->cache()->CacheZCard(key, &cache_len); + if (cache_len != 0) { + return true; + } + if (zset_cache_start_direction_ == cache::CACHE_START_FROM_BEGIN) { + if (zset_cache_field_num_per_key_ <= len) { + stop_index = zset_cache_field_num_per_key_ - 1; + } + } else if (zset_cache_start_direction_ == cache::CACHE_START_FROM_END) { + if (zset_cache_field_num_per_key_ <= len) { + start_index = len - zset_cache_field_num_per_key_; + } + } + + std::vector score_members; + int64_t ttl = -1; + rocksdb::Status s = db->storage()->ZRangeWithTTL(key, start_index, stop_index, &score_members, &ttl); + if (!s.ok()) { + LOG(WARNING) << "load zset failed, key=" << key; + return false; + } + db->cache()->WriteZSetToCache(key, score_members, ttl); + return true; +} + +bool PikaCacheLoadThread::LoadKey(const char key_type, std::string& key, const std::shared_ptr& db) { + pstd::lock::ScopeRecordLock record_lock(db->LockMgr(), key); + switch (key_type) { + case 'k': + return LoadKV(key, db); + case 'h': + return LoadHash(key, db); + case 'l': + return LoadList(key, db); + case 's': + return LoadSet(key, db); + case 'z': + return LoadZset(key, db); + default: + LOG(WARNING) << "PikaCacheLoadThread::LoadKey invalid key type : " << key_type; + return false; + } +} + +void *PikaCacheLoadThread::ThreadMain() { + LOG(INFO) << "PikaCacheLoadThread::ThreadMain Start"; + + while (!should_exit_) { + std::deque>> load_keys; + { + std::unique_lock lq(loadkeys_mutex_); + waitting_load_keys_num_ = loadkeys_queue_.size(); + while (!should_exit_ && loadkeys_queue_.size() <= 0) { + loadkeys_cond_.wait(lq); + } + + if (should_exit_) { + return nullptr; + } + + for (int i = 0; i < CACHE_LOAD_NUM_ONE_TIME; ++i) { + if (!loadkeys_queue_.empty()) { + load_keys.push_back(loadkeys_queue_.front()); + loadkeys_queue_.pop_front(); + } + } + } + for (auto & load_key : load_keys) { + if (LoadKey(std::get<0>(load_key), std::get<1>(load_key), std::get<2>(load_key))) { + ++async_load_keys_num_; + } + + std::unique_lock lm(loadkeys_map_mutex_); + loadkeys_map_.erase(std::get<1>(load_key)); + } + } + + return nullptr; +} diff --git a/tools/pika_migrate/src/pika_client_conn.cc b/tools/pika_migrate/src/pika_client_conn.cc new file mode 100644 index 0000000000..768cb6d5ad --- /dev/null +++ b/tools/pika_migrate/src/pika_client_conn.cc @@ -0,0 +1,589 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "include/pika_admin.h" +#include "include/pika_client_conn.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_conf.h" +#include "include/pika_define.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "net/src/dispatch_thread.h" +#include "net/src/worker_thread.h" +#include "src/pstd/include/scope_record_lock.h" + +extern std::unique_ptr g_pika_conf; +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +PikaClientConn::PikaClientConn(int fd, const std::string& ip_port, net::Thread* thread, net::NetMultiplexer* mpx, + const net::HandleType& handle_type, int max_conn_rbuf_size) + : RedisConn(fd, ip_port, thread, mpx, handle_type, max_conn_rbuf_size), + server_thread_(reinterpret_cast(thread)), + current_db_(g_pika_conf->default_db()) { + InitUser(); + time_stat_.reset(new TimeStat()); +} + +std::shared_ptr PikaClientConn::DoCmd(const PikaCmdArgsType& argv, const std::string& opt, + const std::shared_ptr& resp_ptr, bool cache_miss_in_rtc) { + // Get command info + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(opt); + if (!c_ptr) { + std::shared_ptr tmp_ptr = std::make_shared(DummyCmd()); + tmp_ptr->res().SetRes(CmdRes::kErrOther, "unknown command \"" + opt + "\""); + if (IsInTxn()) { + SetTxnInitFailState(true); + } + return tmp_ptr; + } + c_ptr->SetCacheMissedInRtc(cache_miss_in_rtc); + c_ptr->SetConn(shared_from_this()); + c_ptr->SetResp(resp_ptr); + + // Check authed + if (AuthRequired()) { // the user is not authed, need to do auth + if (!(c_ptr->flag() & kCmdFlagsNoAuth)) { + c_ptr->res().SetRes(CmdRes::kErrOther, "NOAUTH Authentication required."); + return c_ptr; + } + } + // Initial + c_ptr->Initial(argv, current_db_); + if (!c_ptr->res().ok()) { + if (IsInTxn()) { + SetTxnInitFailState(true); + } + return c_ptr; + } + + int8_t subCmdIndex = -1; + std::string errKey; + auto checkRes = user_->CheckUserPermission(c_ptr, argv, subCmdIndex, &errKey); + std::string cmdName = c_ptr->name(); + if (subCmdIndex >= 0 && checkRes == AclDeniedCmd::CMD) { + cmdName += "|" + argv[1]; + } + + std::string object; + switch (checkRes) { + case AclDeniedCmd::CMD: + c_ptr->res().SetRes(CmdRes::kNone, fmt::format("-NOPERM this user has no permissions to run the '{}' command\r\n", + pstd::StringToLower(cmdName))); + object = cmdName; + break; + case AclDeniedCmd::KEY: + c_ptr->res().SetRes(CmdRes::kNone, + "-NOPERM this user has no permissions to access one of the keys used as arguments\r\n"); + object = errKey; + break; + case AclDeniedCmd::CHANNEL: + c_ptr->res().SetRes(CmdRes::kNone, + "-NOPERM this user has no permissions to access one of the channel used as arguments\r\n"); + object = errKey; + break; + case AclDeniedCmd::NO_SUB_CMD: + c_ptr->res().SetRes(CmdRes::kErrOther, fmt::format("unknown subcommand '{}' subcommand", argv[1])); + break; + case AclDeniedCmd::NO_AUTH: + c_ptr->res().AppendContent("-NOAUTH Authentication required."); + break; + default: + break; + } + + if (checkRes == AclDeniedCmd::CMD || checkRes == AclDeniedCmd::KEY || checkRes == AclDeniedCmd::CHANNEL) { + std::string cInfo; + ClientInfoToString(&cInfo, cmdName); + int32_t context = IsInTxn() ? static_cast(AclLogCtx::MULTI) : static_cast(AclLogCtx::TOPLEVEL); + + if (checkRes == AclDeniedCmd::CMD && IsInTxn() && cmdName == kCmdNameExec) { + object = kCmdNameMulti; + } + g_pika_server->Acl()->AddLogEntry(static_cast(checkRes), context, user_->Name(), object, cInfo); + + return c_ptr; + } + + if (IsInTxn() && opt != kCmdNameExec && opt != kCmdNameWatch && opt != kCmdNameDiscard && opt != kCmdNameMulti) { + if (c_ptr->is_write() && g_pika_server->readonly(current_db_)) { + SetTxnInitFailState(true); + c_ptr->res().SetRes(CmdRes::kErrOther, "READONLY You can't write against a read only replica."); + return c_ptr; + } + PushCmdToQue(c_ptr); + c_ptr->res().SetRes(CmdRes::kTxnQueued); + return c_ptr; + } + + bool is_monitoring = g_pika_server->HasMonitorClients(); + if (is_monitoring) { + ProcessMonitor(argv); + } + + g_pika_server->UpdateQueryNumAndExecCountDB(current_db_, opt, c_ptr->is_write()); + + // PubSub connection + // (P)SubscribeCmd will set is_pubsub_ + if (this->IsPubSub()) { + if (opt != kCmdNameSubscribe && opt != kCmdNameUnSubscribe && opt != kCmdNamePing && opt != kCmdNamePSubscribe && + opt != kCmdNamePUnSubscribe) { + c_ptr->res().SetRes(CmdRes::kErrOther, + "only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT allowed in this context"); + return c_ptr; + } + } + + // reject all the request before new master sync finished + if (g_pika_server->leader_protected_mode()) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Cannot process command before new leader sync finished"); + return c_ptr; + } + + if (!g_pika_server->IsDBExist(current_db_)) { + c_ptr->res().SetRes(CmdRes::kErrOther, "DB not found"); + return c_ptr; + } + + if (c_ptr->is_write()) { + if (g_pika_server->IsDBBinlogIoError(current_db_)) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Writing binlog failed, maybe no space left on device"); + return c_ptr; + } + std::vector cur_key = c_ptr->current_key(); + if (cur_key.empty() && opt != kCmdNameExec) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Internal ERROR"); + return c_ptr; + } + if (g_pika_server->readonly(current_db_) && opt != kCmdNameExec) { + c_ptr->res().SetRes(CmdRes::kErrOther, "READONLY You can't write against a read only replica."); + return c_ptr; + } + } else if (c_ptr->is_read() && c_ptr->flag_ == 0) { + const auto& server_guard = std::lock_guard(g_pika_server->GetDBLock()); + int role = 0; + auto status = g_pika_rm->CheckDBRole(current_db_, &role); + if (!status.ok()) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Internal ERROR"); + return c_ptr; + } else if ((role & PIKA_ROLE_SLAVE) == PIKA_ROLE_SLAVE) { + const auto& slave_db = g_pika_rm->GetSyncSlaveDBByName(DBInfo(current_db_)); + if (!slave_db) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Internal ERROR"); + return c_ptr; + } else if (slave_db->State() != ReplState::kConnected) { + c_ptr->res().SetRes(CmdRes::kErrOther, "Full sync not completed"); + return c_ptr; + } + } + } + + if (c_ptr->res().ok() && c_ptr->is_write() && name() != kCmdNameExec) { + if (c_ptr->name() == kCmdNameFlushdb) { + auto flushdb = std::dynamic_pointer_cast(c_ptr); + SetTxnFailedIfKeyExists(flushdb->GetFlushDBname()); + } else if (c_ptr->name() == kCmdNameFlushall) { + SetTxnFailedIfKeyExists(); + } else { + auto table_keys = c_ptr->current_key(); + for (auto& key : table_keys) { + key = c_ptr->db_name().append("_").append(key); + } + SetTxnFailedFromKeys(table_keys); + } + } + + // Process Command + c_ptr->Execute(); + time_stat_->process_done_ts_ = pstd::NowMicros(); + auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); + (*cmdstat_map)[opt].cmd_count.fetch_add(1); + (*cmdstat_map)[opt].cmd_time_consuming.fetch_add(time_stat_->total_time()); + + if (g_pika_conf->slowlog_slower_than() >= 0) { + ProcessSlowlog(argv, c_ptr->GetDoDuration()); + } + + return c_ptr; +} + +void PikaClientConn::ProcessSlowlog(const PikaCmdArgsType& argv, uint64_t do_duration) { + if (time_stat_->total_time() > g_pika_conf->slowlog_slower_than()) { + g_pika_server->SlowlogPushEntry(argv, time_stat_->start_ts() / 1000000, time_stat_->total_time()); + if (g_pika_conf->slowlog_write_errorlog()) { + bool trim = false; + std::string slow_log; + uint32_t cmd_size = 0; + for (const auto& i : argv) { + cmd_size += 1 + i.size(); // blank space and argument length + if (!trim) { + slow_log.append(" "); + slow_log.append(pstd::ToRead(i)); + if (slow_log.size() >= 1000) { + trim = true; + slow_log.resize(1000); + slow_log.append("...\""); + } + } + } + LOG(ERROR) << "ip_port: " << ip_port() << ", db: " << current_db_ << ", command:" << slow_log + << ", command_size: " << cmd_size - 1 << ", arguments: " << argv.size() + << ", total_time(ms): " << time_stat_->total_time() / 1000 + << ", before_queue_time(ms): " << time_stat_->before_queue_time() / 1000 + << ", queue_time(ms): " << time_stat_->queue_time() / 1000 + << ", process_time(ms): " << time_stat_->process_time() / 1000 + << ", cmd_time(ms): " << do_duration / 1000; + } + } +} + +void PikaClientConn::ProcessMonitor(const PikaCmdArgsType& argv) { + std::string monitor_message; + std::string db_name = current_db_.substr(2); + monitor_message = std::to_string(1.0 * static_cast(pstd::NowMicros()) / 1000000) + " [" + db_name + " " + + this->ip_port() + "]"; + for (const auto& iter : argv) { + monitor_message += " " + pstd::ToRead(iter); + } + g_pika_server->AddMonitorMessage(monitor_message); +} + +bool PikaClientConn::IsInterceptedByRTC(std::string& opt) { + // currently we only Intercept: Get, HGet + if (opt == kCmdNameGet && g_pika_conf->GetCacheString()) { + return true; + } + if (opt == kCmdNameHGet && g_pika_conf->GetCacheHash()) { + return true; + } + return false; +} + +void PikaClientConn::ProcessRedisCmds(const std::vector& argvs, bool async, + std::string* response) { + time_stat_->Reset(); + if (async) { + auto arg = new BgTaskArg(); + arg->cache_miss_in_rtc_ = false; + arg->redis_cmds = argvs; + time_stat_->enqueue_ts_ = time_stat_->before_queue_ts_ = pstd::NowMicros(); + arg->conn_ptr = std::dynamic_pointer_cast(shared_from_this()); + /** + * If using the pipeline method to transmit batch commands to Pika, it is unable to + * correctly distinguish between fast and slow commands. + * However, if using the pipeline method for Codis, it can correctly distinguish between + * fast and slow commands, but it cannot guarantee sequential execution. + */ + std::string opt = argvs[0][0]; + pstd::StringToLower(opt); + bool is_slow_cmd = g_pika_conf->is_slow_cmd(opt); + bool is_admin_cmd = g_pika_conf->is_admin_cmd(opt); + + // we don't intercept pipeline batch (argvs.size() > 1) + if (g_pika_conf->rtc_cache_read_enabled() && argvs.size() == 1 && IsInterceptedByRTC(opt) && + PIKA_CACHE_NONE != g_pika_conf->cache_mode() && !IsInTxn()) { + // read in cache + if (ReadCmdInCache(argvs[0], opt)) { + delete arg; + return; + } + arg->cache_miss_in_rtc_ = true; + time_stat_->before_queue_ts_ = pstd::NowMicros(); + } + + g_pika_server->ScheduleClientPool(&DoBackgroundTask, arg, is_slow_cmd, is_admin_cmd); + return; + } + BatchExecRedisCmd(argvs, false); +} + +void PikaClientConn::DoBackgroundTask(void* arg) { + std::unique_ptr bg_arg(static_cast(arg)); + std::shared_ptr conn_ptr = bg_arg->conn_ptr; + conn_ptr->time_stat_->dequeue_ts_ = pstd::NowMicros(); + if (bg_arg->redis_cmds.empty()) { + conn_ptr->NotifyEpoll(false); + return; + } + for (const auto& argv : bg_arg->redis_cmds) { + if (argv.empty()) { + conn_ptr->NotifyEpoll(false); + return; + } + } + + conn_ptr->BatchExecRedisCmd(bg_arg->redis_cmds, bg_arg->cache_miss_in_rtc_); +} + +void PikaClientConn::BatchExecRedisCmd(const std::vector& argvs, bool cache_miss_in_rtc) { + resp_num.store(static_cast(argvs.size())); + for (const auto& argv : argvs) { + std::shared_ptr resp_ptr = std::make_shared(); + resp_array.push_back(resp_ptr); + ExecRedisCmd(argv, resp_ptr, cache_miss_in_rtc); + } + time_stat_->process_done_ts_ = pstd::NowMicros(); + TryWriteResp(); +} + +bool PikaClientConn::ReadCmdInCache(const net::RedisCmdArgsType& argv, const std::string& opt) { + resp_num.store(1); + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(opt); + if (!c_ptr) { + return false; + } + // Check authed + if (AuthRequired()) { // the user is not authed, need to do auth + if (!(c_ptr->flag() & kCmdFlagsNoAuth)) { + return false; + } + } + // Initial + c_ptr->Initial(argv, current_db_); + // dont store cmd with too large key(only Get/HGet cmd can reach here) + // the cmd with large key should be non-exist in cache, except for pre-stored + if (c_ptr->IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + resp_num--; + return false; + } + // acl check + int8_t subCmdIndex = -1; + std::string errKey; + auto checkRes = user_->CheckUserPermission(c_ptr, argv, subCmdIndex, &errKey); + std::string object; + if (checkRes == AclDeniedCmd::CMD || checkRes == AclDeniedCmd::KEY || checkRes == AclDeniedCmd::CHANNEL || + checkRes == AclDeniedCmd::NO_SUB_CMD || checkRes == AclDeniedCmd::NO_AUTH) { + // acl check failed + return false; + } + // only read command(Get, HGet) will reach here, no need of record lock + bool read_status = c_ptr->DoReadCommandInCache(); + auto cmdstat_map = g_pika_cmd_table_manager->GetCommandStatMap(); + resp_num--; + if (read_status) { + time_stat_->process_done_ts_ = pstd::NowMicros(); + (*cmdstat_map)[argv[0]].cmd_count.fetch_add(1); + (*cmdstat_map)[argv[0]].cmd_time_consuming.fetch_add(time_stat_->total_time()); + resp_array.emplace_back(std::make_shared(std::move(c_ptr->res().message()))); + TryWriteResp(); + } + return read_status; +} + +void PikaClientConn::TryWriteResp() { + int expected = 0; + if (resp_num.compare_exchange_strong(expected, -1)) { + for (auto& resp : resp_array) { + WriteResp(*resp); + } + if (write_completed_cb_) { + write_completed_cb_(); + write_completed_cb_ = nullptr; + } + resp_array.clear(); + NotifyEpoll(true); + } +} + +void PikaClientConn::PushCmdToQue(std::shared_ptr cmd) { txn_cmd_que_.push(cmd); } + +bool PikaClientConn::IsInTxn() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::Start]; +} + +bool PikaClientConn::IsTxnInitFailed() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::InitCmdFailed]; +} + +bool PikaClientConn::IsTxnWatchFailed() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::WatchFailed]; +} + +bool PikaClientConn::IsTxnExecing() { + std::lock_guard lg(txn_state_mu_); + return txn_state_[TxnStateBitMask::Execing] && txn_state_[TxnStateBitMask::Start]; +} + +void PikaClientConn::SetTxnWatchFailState(bool is_failed) { + std::lock_guard lg(txn_state_mu_); + txn_state_[TxnStateBitMask::WatchFailed] = is_failed; +} + +void PikaClientConn::SetTxnInitFailState(bool is_failed) { + std::lock_guard lg(txn_state_mu_); + txn_state_[TxnStateBitMask::InitCmdFailed] = is_failed; +} + +void PikaClientConn::SetTxnStartState(bool is_start) { + std::lock_guard lg(txn_state_mu_); + txn_state_[TxnStateBitMask::Start] = is_start; +} + +void PikaClientConn::ClearTxnCmdQue() { txn_cmd_que_ = std::queue>{}; } + +void PikaClientConn::AddKeysToWatch(const std::vector& db_keys) { + for (const auto& it : db_keys) { + watched_db_keys_.emplace(it); + } + + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher != nullptr) { + dispatcher->AddWatchKeys(watched_db_keys_, shared_from_this()); + } +} + +void PikaClientConn::RemoveWatchedKeys() { + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher != nullptr) { + watched_db_keys_.clear(); + dispatcher->RemoveWatchKeys(shared_from_this()); + } +} + +void PikaClientConn::SetTxnFailedFromKeys(const std::vector& db_keys) { + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher != nullptr) { + auto involved_conns = std::vector>{}; + involved_conns = dispatcher->GetInvolvedTxn(db_keys); + for (auto& conn : involved_conns) { + if (auto c = std::dynamic_pointer_cast(conn); c != nullptr) { + c->SetTxnWatchFailState(true); + } + } + } +} + +// if key in target_db exists, then the key been watched multi will be failed +void PikaClientConn::SetTxnFailedIfKeyExists(std::string target_db_name) { + auto dispatcher = dynamic_cast(server_thread()); + if (dispatcher == nullptr) { + return; + } + auto involved_conns = dispatcher->GetAllTxns(); + for (auto& conn : involved_conns) { + std::shared_ptr c; + if (c = std::dynamic_pointer_cast(conn); c == nullptr) { + continue; + } + + for (const auto& db_key : c->watched_db_keys_) { + size_t pos = db_key.find('_'); + if (pos == std::string::npos) { + continue; + } + + auto db_name = db_key.substr(0, pos); + auto key = db_key.substr(pos + 1); + + if (target_db_name == "" || target_db_name == "all" || target_db_name == db_name) { + auto db = g_pika_server->GetDB(db_name); + // if watched key exists, set watch state to failed + if (db->storage()->Exists({key}) > 0) { + c->SetTxnWatchFailState(true); + break; + } + } + } + } +} + +void PikaClientConn::ExitTxn() { + if (IsInTxn()) { + RemoveWatchedKeys(); + ClearTxnCmdQue(); + std::lock_guard lg(txn_state_mu_); + txn_state_.reset(); + } +} + +void PikaClientConn::ExecRedisCmd(const PikaCmdArgsType& argv, std::shared_ptr& resp_ptr, + bool cache_miss_in_rtc) { + // get opt + std::string opt = argv[0]; + pstd::StringToLower(opt); + if (opt == kClusterPrefix) { + if (argv.size() >= 2) { + opt += argv[1]; + pstd::StringToLower(opt); + } + } + + std::shared_ptr cmd_ptr = DoCmd(argv, opt, resp_ptr, cache_miss_in_rtc); + *resp_ptr = std::move(cmd_ptr->res().message()); + resp_num--; +} + +std::queue> PikaClientConn::GetTxnCmdQue() { return txn_cmd_que_; } + +void PikaClientConn::DoAuth(const std::shared_ptr& user) { + user_ = user; + authenticated_ = true; +} + +void PikaClientConn::UnAuth(const std::shared_ptr& user) { + user_ = user; + // If the user does not have a password, and the user is valid, then the user does not need authentication + authenticated_ = user_->HasFlags(static_cast(AclUserFlag::NO_PASS)) && + !user_->HasFlags(static_cast(AclUserFlag::DISABLED)); +} + +bool PikaClientConn::IsAuthed() const { return authenticated_; } +void PikaClientConn::InitUser() { + if (!g_pika_conf->GetUserBlackList().empty()) { + user_ = g_pika_server->Acl()->GetUserLock(Acl::DefaultLimitUser); + } else { + user_ = g_pika_server->Acl()->GetUserLock(Acl::DefaultUser); + } + authenticated_ = user_->HasFlags(static_cast(AclUserFlag::NO_PASS)) && + !user_->HasFlags(static_cast(AclUserFlag::DISABLED)); +} +bool PikaClientConn::AuthRequired() const { + // If the user does not have a password, and the user is valid, then the user does not need authentication + // Otherwise, you need to determine whether go has been authenticated + if (IsAuthed()) { + return false; + } + if (user_->HasFlags(static_cast(AclUserFlag::DISABLED))) { + return true; + } + if (user_->HasFlags(static_cast(AclUserFlag::NO_PASS))) { + return false; + } + return true; +} +std::string PikaClientConn::UserName() const { return user_->Name(); } + +void PikaClientConn::ClientInfoToString(std::string* info, const std::string& cmdName) { + uint64_t age = pstd::NowMicros() - last_interaction().tv_usec; + + std::string flags; + g_pika_server->ClientIsMonitor(std::dynamic_pointer_cast(shared_from_this())) ? flags.append("O") + : flags.append("S"); + if (IsPubSub()) { + flags.append("P"); + } + + info->append(fmt::format( + "id={} addr={} name={} age={} idle={} flags={} db={} sub={} psub={} multi={} " + "cmd={} user={} resp=2", + fd(), ip_port(), name(), age, age / 1000000, flags, GetCurrentTable(), + IsPubSub() ? g_pika_server->ClientPubSubChannelSize(shared_from_this()) : 0, + IsPubSub() ? g_pika_server->ClientPubSubChannelPatternSize(shared_from_this()) : 0, -1, cmdName, user_->Name())); +} + +// compare addr in ClientInfo +bool AddrCompare(const ClientInfo& lhs, const ClientInfo& rhs) { return rhs.ip_port < lhs.ip_port; } + +bool IdleCompare(const ClientInfo& lhs, const ClientInfo& rhs) { return lhs.last_interaction < rhs.last_interaction; } diff --git a/tools/pika_migrate/src/pika_client_processor.cc b/tools/pika_migrate/src/pika_client_processor.cc new file mode 100644 index 0000000000..5a1c60cee0 --- /dev/null +++ b/tools/pika_migrate/src/pika_client_processor.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_client_processor.h" + +#include + +PikaClientProcessor::PikaClientProcessor(size_t worker_num, size_t max_queue_size, const std::string& name_prefix) { + pool_ = std::make_unique(worker_num, max_queue_size, name_prefix + "Pool"); +} + +PikaClientProcessor::~PikaClientProcessor() { + LOG(INFO) << "PikaClientProcessor exit!!!"; +} + +int PikaClientProcessor::Start() { + int res = pool_->start_thread_pool(); + if (res != net::kSuccess) { + return res; + } + return res; +} + +void PikaClientProcessor::Stop() { + pool_->stop_thread_pool(); +} + +void PikaClientProcessor::SchedulePool(net::TaskFunc func, void* arg) { pool_->Schedule(func, arg); } + +size_t PikaClientProcessor::ThreadPoolCurQueueSize() { + size_t cur_size = 0; + if (pool_) { + pool_->cur_queue_size(&cur_size); + } + return cur_size; +} + +size_t PikaClientProcessor::ThreadPoolMaxQueueSize() { + size_t cur_size = 0; + if (pool_) { + cur_size = pool_->max_queue_size(); + } + return cur_size; +} diff --git a/tools/pika_migrate/src/pika_cmd_table_manager.cc b/tools/pika_migrate/src/pika_cmd_table_manager.cc new file mode 100644 index 0000000000..974fceb0ee --- /dev/null +++ b/tools/pika_migrate/src/pika_cmd_table_manager.cc @@ -0,0 +1,110 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_cmd_table_manager.h" + +#include +#include + +#include "include/acl.h" +#include "include/pika_conf.h" +#include "pstd/include/pstd_mutex.h" + +extern std::unique_ptr g_pika_conf; + +PikaCmdTableManager::PikaCmdTableManager() { + cmds_ = std::make_unique(); + cmds_->reserve(300); +} + +void PikaCmdTableManager::InitCmdTable(void) { + ::InitCmdTable(cmds_.get()); + for (const auto& cmd : *cmds_) { + if (cmd.second->flag() & kCmdFlagsWrite) { + cmd.second->AddAclCategory(static_cast(AclCategory::WRITE)); + } + if (cmd.second->flag() & kCmdFlagsRead && + !(cmd.second->AclCategory() & static_cast(AclCategory::SCRIPTING))) { + cmd.second->AddAclCategory(static_cast(AclCategory::READ)); + } + if (cmd.second->flag() & kCmdFlagsAdmin) { + cmd.second->AddAclCategory(static_cast(AclCategory::ADMIN) | + static_cast(AclCategory::DANGEROUS)); + } + if (cmd.second->flag() & kCmdFlagsPubSub) { + cmd.second->AddAclCategory(static_cast(AclCategory::PUBSUB)); + } + if (cmd.second->flag() & kCmdFlagsFast) { + cmd.second->AddAclCategory(static_cast(AclCategory::FAST)); + } + if (cmd.second->flag() & kCmdFlagsSlow) { + cmd.second->AddAclCategory(static_cast(AclCategory::SLOW)); + } + } + + CommandStatistics statistics; + for (auto& iter : *cmds_) { + cmdstat_map_.emplace(iter.first, statistics); + iter.second->SetCmdId(cmdId_++); + } +} + +void PikaCmdTableManager::RenameCommand(const std::string before, const std::string after) { + auto it = cmds_->find(before); + if (it != cmds_->end()) { + if (after.length() > 0) { + cmds_->insert(std::pair>(after, std::move(it->second))); + } else { + LOG(ERROR) << "The value of rename-command is null"; + } + cmds_->erase(it); + } +} + +std::unordered_map* PikaCmdTableManager::GetCommandStatMap() { + return &cmdstat_map_; +} + +std::shared_ptr PikaCmdTableManager::GetCmd(const std::string& opt) { + const std::string& internal_opt = opt; + return NewCommand(internal_opt); +} + +std::shared_ptr PikaCmdTableManager::NewCommand(const std::string& opt) { + Cmd* cmd = GetCmdFromDB(opt, *cmds_); + if (cmd) { + return std::shared_ptr(cmd->Clone()); + } + return nullptr; +} + +CmdTable* PikaCmdTableManager::GetCmdTable() { return cmds_.get(); } + +uint32_t PikaCmdTableManager::GetMaxCmdId() { return cmdId_; } + +bool PikaCmdTableManager::CheckCurrentThreadDistributionMapExist(const std::thread::id& tid) { + std::shared_lock l(map_protector_); + return thread_distribution_map_.find(tid) != thread_distribution_map_.end(); +} + +void PikaCmdTableManager::InsertCurrentThreadDistributionMap() { + auto tid = std::this_thread::get_id(); + std::unique_ptr distribution = std::make_unique(); + distribution->Init(); + std::lock_guard l(map_protector_); + thread_distribution_map_.emplace(tid, std::move(distribution)); +} + +bool PikaCmdTableManager::CmdExist(const std::string& cmd) const { return cmds_->find(cmd) != cmds_->end(); } + +std::vector PikaCmdTableManager::GetAclCategoryCmdNames(uint32_t flag) { + std::vector result; + for (const auto& item : (*cmds_)) { + if (item.second->AclCategory() & flag) { + result.emplace_back(item.first); + } + } + return result; +} diff --git a/tools/pika_migrate/src/pika_command.cc b/tools/pika_migrate/src/pika_command.cc new file mode 100644 index 0000000000..63199c3481 --- /dev/null +++ b/tools/pika_migrate/src/pika_command.cc @@ -0,0 +1,1076 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include +#include "include/pika_acl.h" +#include "include/pika_admin.h" +#include "include/pika_bit.h" +#include "include/pika_command.h" +#include "include/pika_geo.h" +#include "include/pika_hash.h" +#include "include/pika_hyperloglog.h" +#include "include/pika_kv.h" +#include "include/pika_list.h" +#include "include/pika_pubsub.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_set.h" +#include "include/pika_slot_command.h" +#include "include/pika_stream.h" +#include "include/pika_transaction.h" +#include "include/pika_zset.h" +#include "pstd_defer.h" +#include "src/pstd/include/scope_record_lock.h" + +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +void InitCmdTable(CmdTable* cmd_table) { + // Admin + ////Slaveof + std::unique_ptr slaveofptr = + std::make_unique(kCmdNameSlaveof, -3, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlaveof, std::move(slaveofptr))); + + std::unique_ptr dbslaveofptr = + std::make_unique(kCmdNameDbSlaveof, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameDbSlaveof, std::move(dbslaveofptr))); + + std::unique_ptr authptr = + std::make_unique(kCmdNameAuth, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsNoAuth | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameAuth, std::move(authptr))); + + std::unique_ptr bgsaveptr = std::make_unique( + kCmdNameBgsave, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBgsave, std::move(bgsaveptr))); + + std::unique_ptr compactptr = + std::make_unique(kCmdNameCompact, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow | kCmdFlagsSuspend); + cmd_table->insert(std::pair>(kCmdNameCompact, std::move(compactptr))); + + std::unique_ptr compactrangeptr = std::make_unique(kCmdNameCompactRange, 4, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSuspend); + cmd_table->insert(std::pair>(kCmdNameCompactRange, std::move(compactrangeptr))); + std::unique_ptr purgelogsto = + std::make_unique(kCmdNamePurgelogsto, -2, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePurgelogsto, std::move(purgelogsto))); + + std::unique_ptr pingptr = + std::make_unique(kCmdNamePing, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePing, std::move(pingptr))); + + std::unique_ptr helloptr = + std::make_unique(kCmdNameHello, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsNoAuth | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHello, std::move(helloptr))); + + std::unique_ptr selectptr = + std::make_unique(kCmdNameSelect, 2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSelect, std::move(selectptr))); + + std::unique_ptr flushallptr = std::make_unique( + kCmdNameFlushall, 1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameFlushall, std::move(flushallptr))); + + std::unique_ptr flushdbptr = std::make_unique( + kCmdNameFlushdb, -1, kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsAdmin | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameFlushdb, std::move(flushdbptr))); + + std::unique_ptr clientptr = + std::make_unique(kCmdNameClient, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameClient, std::move(clientptr))); + + std::unique_ptr shutdownptr = std::make_unique( + kCmdNameShutdown, 1, kCmdFlagsRead | kCmdFlagsLocal | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameShutdown, std::move(shutdownptr))); + + std::unique_ptr infoptr = + std::make_unique(kCmdNameInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameInfo, std::move(infoptr))); + + std::unique_ptr configptr = + std::make_unique(kCmdNameConfig, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameConfig, std::move(configptr))); + + std::unique_ptr monitorptr = + std::make_unique(kCmdNameMonitor, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameMonitor, std::move(monitorptr))); + + std::unique_ptr dbsizeptr = + std::make_unique(kCmdNameDbsize, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDbsize, std::move(dbsizeptr))); + + std::unique_ptr timeptr = + std::make_unique(kCmdNameTime, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameTime, std::move(timeptr))); + + std::unique_ptr delbackupptr = + std::make_unique(kCmdNameDelbackup, 1, kCmdFlagsRead | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameDelbackup, std::move(delbackupptr))); + + std::unique_ptr echoptr = + std::make_unique(kCmdNameEcho, 2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameEcho, std::move(echoptr))); + + std::unique_ptr scandbptr = + std::make_unique(kCmdNameScandb, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameScandb, std::move(scandbptr))); + + std::unique_ptr slowlogptr = + std::make_unique(kCmdNameSlowlog, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlowlog, std::move(slowlogptr))); + + std::unique_ptr paddingptr = std::make_unique(kCmdNamePadding, 2, kCmdFlagsWrite | kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNamePadding, std::move(paddingptr))); + + std::unique_ptr pkpatternmatchdelptr = + std::make_unique(kCmdNamePKPatternMatchDel, -2, kCmdFlagsWrite | kCmdFlagsAdmin); + cmd_table->insert( + std::pair>(kCmdNamePKPatternMatchDel, std::move(pkpatternmatchdelptr))); + std::unique_ptr dummyptr = std::make_unique(kCmdDummy, 0, kCmdFlagsWrite); + cmd_table->insert(std::pair>(kCmdDummy, std::move(dummyptr))); + + std::unique_ptr quitptr = + std::make_unique(kCmdNameQuit, 1, kCmdFlagsRead | kCmdFlagsNoAuth | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameQuit, std::move(quitptr))); + + std::unique_ptr diskrecoveryptr = + std::make_unique(kCmdNameDiskRecovery, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameDiskRecovery, std::move(diskrecoveryptr))); + + std::unique_ptr clearreplicationidptr = std::make_unique( + kCmdNameClearReplicationID, 1, kCmdFlagsWrite | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameClearReplicationID, std::move(clearreplicationidptr))); + std::unique_ptr disablewalptr = std::make_unique(kCmdNameDisableWal, 2, kCmdFlagsAdmin); + cmd_table->insert(std::pair>(kCmdNameDisableWal, std::move(disablewalptr))); + std::unique_ptr cacheptr = std::make_unique(kCmdNameCache, -2, kCmdFlagsAdmin | kCmdFlagsRead); + cmd_table->insert(std::pair>(kCmdNameCache, std::move(cacheptr))); + std::unique_ptr clearcacheptr = std::make_unique(kCmdNameClearCache, 1, kCmdFlagsAdmin | kCmdFlagsWrite); + cmd_table->insert(std::pair>(kCmdNameClearCache, std::move(clearcacheptr))); + std::unique_ptr lastsaveptr = std::make_unique(kCmdNameLastSave, 1, kCmdFlagsAdmin | kCmdFlagsRead | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLastSave, std::move(lastsaveptr))); + +#ifdef WITH_COMMAND_DOCS + std::unique_ptr commandptr = + std::make_unique(kCmdNameCommand, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameCommand, std::move(commandptr))); +#endif + + // Slots related + std::unique_ptr slotsinfoptr = + std::make_unique(kCmdNameSlotsInfo, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsInfo, std::move(slotsinfoptr))); + + std::unique_ptr slotmgrttagslotasyncptr = std::make_unique( + kCmdNameSlotsMgrtTagSlotAsync, 8, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtTagSlotAsync, std::move(slotmgrttagslotasyncptr))); + + std::unique_ptr slotmgrtasyncstatus = std::make_unique( + kCmdNameSlotsMgrtAsyncStatus, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtAsyncStatus, std::move(slotmgrtasyncstatus))); + + std::unique_ptr slotmgrtasynccancel = std::make_unique( + kCmdNameSlotsMgrtAsyncCancel, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtAsyncCancel, std::move(slotmgrtasynccancel))); + + std::unique_ptr slotmgrttagoneptr = + std::make_unique(kCmdNameSlotsMgrtTagOne, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtTagOne, std::move(slotmgrttagoneptr))); + + std::unique_ptr slotmgrtoneptr = + std::make_unique(kCmdNameSlotsMgrtOne, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsMgrtOne, std::move(slotmgrtoneptr))); + + std::unique_ptr slotmgrttagslotptr = std::make_unique( + kCmdNameSlotsMgrtTagSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtTagSlot, std::move(slotmgrttagslotptr))); + + std::unique_ptr slotmgrttagslottagptr = + std::make_unique(kCmdNameSlotsMgrtSlot, 5, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtSlot, std::move(slotmgrttagslottagptr))); + + std::unique_ptr slotsdelptr = + std::make_unique(kCmdNameSlotsDel, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsDel, std::move(slotsdelptr))); + + std::unique_ptr slotshashkeyptr = + std::make_unique(kCmdNameSlotsHashKey, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsHashKey, std::move(slotshashkeyptr))); + + std::unique_ptr slotsscanptr = + std::make_unique(kCmdNameSlotsScan, -3, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsScan, std::move(slotsscanptr))); + + std::unique_ptr slotsmgrtexecwrapper = std::make_unique( + kCmdNameSlotsMgrtExecWrapper, -3, kCmdFlagsWrite | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsMgrtExecWrapper, std::move(slotsmgrtexecwrapper))); + + std::unique_ptr slotsreloadptr = + std::make_unique(kCmdNameSlotsReload, 1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsReload, std::move(slotsreloadptr))); + + std::unique_ptr slotsreloadoffptr = + std::make_unique(kCmdNameSlotsReloadOff, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsReloadOff, std::move(slotsreloadoffptr))); + + std::unique_ptr slotscleanupptr = + std::make_unique(kCmdNameSlotsCleanup, -2, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSlotsCleanup, std::move(slotscleanupptr))); + + std::unique_ptr slotscleanupoffptr = + std::make_unique(kCmdNameSlotsCleanupOff, -1, kCmdFlagsRead | kCmdFlagsAdmin | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameSlotsCleanupOff, std::move(slotscleanupoffptr))); + + // Kv + ////SetCmd + std::unique_ptr setptr = + std::make_unique(kCmdNameSet, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSet, std::move(setptr))); + ////GetCmd + std::unique_ptr getptr = + std::make_unique(kCmdNameGet, 2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGet, std::move(getptr))); + ////DelCmd + std::unique_ptr delptr = + std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDel, std::move(delptr))); + std::unique_ptr Unlinkptr = + std::make_unique(kCmdNameUnlink, -2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameUnlink, std::move(Unlinkptr))); + ////IncrCmd + std::unique_ptr incrptr = + std::make_unique(kCmdNameIncr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameIncr, std::move(incrptr))); + ////IncrbyCmd + std::unique_ptr incrbyptr = std::make_unique( + kCmdNameIncrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameIncrby, std::move(incrbyptr))); + ////IncrbyfloatCmd + std::unique_ptr incrbyfloatptr = std::make_unique( + kCmdNameIncrbyfloat, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameIncrbyfloat, std::move(incrbyfloatptr))); + ////DecrCmd + std::unique_ptr decrptr = + std::make_unique(kCmdNameDecr, 2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDecr, std::move(decrptr))); + ////DecrbyCmd + std::unique_ptr decrbyptr = std::make_unique( + kCmdNameDecrby, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameDecrby, std::move(decrbyptr))); + ////GetsetCmd + std::unique_ptr getsetptr = std::make_unique( + kCmdNameGetset, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameGetset, std::move(getsetptr))); + ////AppendCmd + std::unique_ptr appendptr = std::make_unique( + kCmdNameAppend, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameAppend, std::move(appendptr))); + ////MgetCmd + std::unique_ptr mgetptr = + std::make_unique(kCmdNameMget, -2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameMget, std::move(mgetptr))); + ////KeysCmd + std::unique_ptr keysptr = + std::make_unique(kCmdNameKeys, -2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameKeys, std::move(keysptr))); + ////SetnxCmd + std::unique_ptr setnxptr = + std::make_unique(kCmdNameSetnx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSetnx, std::move(setnxptr))); + ////SetexCmd + std::unique_ptr setexptr = + std::make_unique(kCmdNameSetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSetex, std::move(setexptr))); + ////PsetexCmd + std::unique_ptr psetexptr = + std::make_unique(kCmdNamePsetex, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePsetex, std::move(psetexptr))); + ////DelvxCmd + std::unique_ptr delvxptr = + std::make_unique(kCmdNameDelvx, 3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameDelvx, std::move(delvxptr))); + ////MSetCmd + std::unique_ptr msetptr = + std::make_unique(kCmdNameMset, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameMset, std::move(msetptr))); + ////MSetnxCmd + std::unique_ptr msetnxptr = std::make_unique( + kCmdNameMsetnx, -3, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameMsetnx, std::move(msetnxptr))); + ////GetrangeCmd + std::unique_ptr getrangeptr = std::make_unique( + kCmdNameGetrange, 4, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGetrange, std::move(getrangeptr))); + ////SetrangeCmd + std::unique_ptr setrangeptr = std::make_unique( + kCmdNameSetrange, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSetrange, std::move(setrangeptr))); + ////StrlenCmd + std::unique_ptr strlenptr = + std::make_unique(kCmdNameStrlen, 2, kCmdFlagsRead | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameStrlen, std::move(strlenptr))); + ////ExistsCmd + std::unique_ptr existsptr = + std::make_unique(kCmdNameExists, -2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameExists, std::move(existsptr))); + ////ExpireCmd + std::unique_ptr expireptr = std::make_unique( + kCmdNameExpire, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameExpire, std::move(expireptr))); + ////PexpireCmd + std::unique_ptr pexpireptr = std::make_unique( + kCmdNamePexpire, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePexpire, std::move(pexpireptr))); + ////ExpireatCmd + std::unique_ptr expireatptr = + std::make_unique(kCmdNameExpireat, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameExpireat, std::move(expireatptr))); + ////PexpireatCmd + std::unique_ptr pexpireatptr = + std::make_unique(kCmdNamePexpireat, 3, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePexpireat, std::move(pexpireatptr))); + ////TtlCmd + std::unique_ptr ttlptr = + std::make_unique(kCmdNameTtl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameTtl, std::move(ttlptr))); + ////PttlCmd + std::unique_ptr pttlptr = + std::make_unique(kCmdNamePttl, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePttl, std::move(pttlptr))); + ////PersistCmd + std::unique_ptr persistptr = + std::make_unique(kCmdNamePersist, 2, kCmdFlagsWrite | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePersist, std::move(persistptr))); + ////TypeCmd + std::unique_ptr typeptr = + std::make_unique(kCmdNameType, 2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameType, std::move(typeptr))); + ////ScanCmd + std::unique_ptr scanptr = + std::make_unique(kCmdNameScan, -2, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameScan, std::move(scanptr))); + ////ScanxCmd + std::unique_ptr scanxptr = + std::make_unique(kCmdNameScanx, -3, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameScanx, std::move(scanxptr))); + ////PKSetexAtCmd + std::unique_ptr pksetexatptr = std::make_unique( + kCmdNamePKSetexAt, 4, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKSetexAt, std::move(pksetexatptr))); + ////PKScanRange + std::unique_ptr pkscanrangeptr = std::make_unique( + kCmdNamePKScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKScanRange, std::move(pkscanrangeptr))); + ////PKRScanRange + std::unique_ptr pkrscanrangeptr = std::make_unique( + kCmdNamePKRScanRange, -4, kCmdFlagsRead | kCmdFlagsOperateKey | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKRScanRange, std::move(pkrscanrangeptr))); + + // Hash + ////HDelCmd + std::unique_ptr hdelptr = + std::make_unique(kCmdNameHDel, -3, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHDel, std::move(hdelptr))); + ////HSetCmd + std::unique_ptr hsetptr = + std::make_unique(kCmdNameHSet, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHSet, std::move(hsetptr))); + ////HGetCmd + std::unique_ptr hgetptr = + std::make_unique(kCmdNameHGet, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache |kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHGet, std::move(hgetptr))); + ////HGetallCmd + std::unique_ptr hgetallptr = + std::make_unique(kCmdNameHGetall, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); + cmd_table->insert(std::pair>(kCmdNameHGetall, std::move(hgetallptr))); + ////HExistsCmd + std::unique_ptr hexistsptr = + std::make_unique(kCmdNameHExists, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameHExists, std::move(hexistsptr))); + ////HIncrbyCmd + std::unique_ptr hincrbyptr = + std::make_unique(kCmdNameHIncrby, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHIncrby, std::move(hincrbyptr))); + ////HIncrbyfloatCmd + std::unique_ptr hincrbyfloatptr = + std::make_unique(kCmdNameHIncrbyfloat, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHIncrbyfloat, std::move(hincrbyfloatptr))); + ////HKeysCmd + std::unique_ptr hkeysptr = + std::make_unique(kCmdNameHKeys, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHKeys, std::move(hkeysptr))); + ////HLenCmd + std::unique_ptr hlenptr = + std::make_unique(kCmdNameHLen, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHLen, std::move(hlenptr))); + ////HMgetCmd + std::unique_ptr hmgetptr = + std::make_unique(kCmdNameHMget, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache |kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHMget, std::move(hmgetptr))); + ////HMsetCmd + std::unique_ptr hmsetptr = + std::make_unique(kCmdNameHMset, -4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHMset, std::move(hmsetptr))); + ////HSetnxCmd + std::unique_ptr hsetnxptr = + std::make_unique(kCmdNameHSetnx, 4, kCmdFlagsWrite | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHSetnx, std::move(hsetnxptr))); + ////HStrlenCmd + std::unique_ptr hstrlenptr = + std::make_unique(kCmdNameHStrlen, 3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameHStrlen, std::move(hstrlenptr))); + ////HValsCmd + std::unique_ptr hvalsptr = + std::make_unique(kCmdNameHVals, 2, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow | kCmdFlagsUpdateCache | kCmdFlagsDoThroughDB | kCmdFlagsReadCache); + cmd_table->insert(std::pair>(kCmdNameHVals, std::move(hvalsptr))); + ////HScanCmd + std::unique_ptr hscanptr = std::make_unique( + kCmdNameHScan, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameHScan, std::move(hscanptr))); + ////HScanxCmd + std::unique_ptr hscanxptr = std::make_unique( + kCmdNameHScanx, -3, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameHScanx, std::move(hscanxptr))); + ////PKHScanRange + std::unique_ptr pkhscanrangeptr = std::make_unique( + kCmdNamePKHScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKHScanRange, std::move(pkhscanrangeptr))); + ////PKHRScanRange + std::unique_ptr pkhrscanrangeptr = std::make_unique( + kCmdNamePKHRScanRange, -4, kCmdFlagsRead | kCmdFlagsHash | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePKHRScanRange, std::move(pkhrscanrangeptr))); + + // List + std::unique_ptr lindexptr = + std::make_unique(kCmdNameLIndex, 3, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLIndex, std::move(lindexptr))); + std::unique_ptr linsertptr = + std::make_unique(kCmdNameLInsert, 5, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLInsert, std::move(linsertptr))); + + std::unique_ptr llenptr = + std::make_unique(kCmdNameLLen, 2, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLLen, std::move(llenptr))); + std::unique_ptr blpopptr = std::make_unique( + kCmdNameBLPop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBLPop, std::move(blpopptr))); + + std::unique_ptr lpopptr = + std::make_unique(kCmdNameLPop, -2, kCmdFlagsWrite | kCmdFlagsList |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLPop, std::move(lpopptr))); + + std::unique_ptr lpushptr = std::make_unique( + kCmdNameLPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLPush, std::move(lpushptr))); + + std::unique_ptr lpushxptr = std::make_unique(kCmdNameLPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameLPushx, std::move(lpushxptr))); + + std::unique_ptr lrangeptr = std::make_unique( + kCmdNameLRange, 4, kCmdFlagsRead | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLRange, std::move(lrangeptr))); + std::unique_ptr lremptr = + std::make_unique(kCmdNameLRem, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLRem, std::move(lremptr))); + std::unique_ptr lsetptr = + std::make_unique(kCmdNameLSet, 4, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLSet, std::move(lsetptr))); + std::unique_ptr ltrimptr = + std::make_unique(kCmdNameLTrim, 4, kCmdFlagsWrite | kCmdFlagsList |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameLTrim, std::move(ltrimptr))); + + std::unique_ptr brpopptr = std::make_unique( + kCmdNameBRpop, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBRpop, std::move(brpopptr))); + std::unique_ptr rpopptr = + std::make_unique(kCmdNameRPop, -2, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameRPop, std::move(rpopptr))); + std::unique_ptr rpoplpushptr = std::make_unique( + kCmdNameRPopLPush, 3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameRPopLPush, std::move(rpoplpushptr))); + std::unique_ptr rpushptr = + std::make_unique(kCmdNameRPush, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameRPush, std::move(rpushptr))); + std::unique_ptr rpushxptr = + std::make_unique(kCmdNameRPushx, -3, kCmdFlagsWrite | kCmdFlagsList | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameRPushx, std::move(rpushxptr))); + + // Zset + ////ZAddCmd + std::unique_ptr zaddptr = + std::make_unique(kCmdNameZAdd, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZAdd, std::move(zaddptr))); + ////ZCardCmd + std::unique_ptr zcardptr = + std::make_unique(kCmdNameZCard, 2, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZCard, std::move(zcardptr))); + ////ZScanCmd + std::unique_ptr zscanptr = std::make_unique( + kCmdNameZScan, -3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZScan, std::move(zscanptr))); + ////ZIncrbyCmd + std::unique_ptr zincrbyptr = + std::make_unique(kCmdNameZIncrby, 4, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast) ; + cmd_table->insert(std::pair>(kCmdNameZIncrby, std::move(zincrbyptr))); + ////ZRangeCmd + std::unique_ptr zrangeptr = + std::make_unique(kCmdNameZRange, -4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRange, std::move(zrangeptr))); + ////ZRevrangeCmd + std::unique_ptr zrevrangeptr = + std::make_unique(kCmdNameZRevrange, -4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRevrange, std::move(zrevrangeptr))); + ////ZRangebyscoreCmd + std::unique_ptr zrangebyscoreptr = std::make_unique( + kCmdNameZRangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRangebyscore, std::move(zrangebyscoreptr))); + ////ZRevrangebyscoreCmd + std::unique_ptr zrevrangebyscoreptr = std::make_unique( + kCmdNameZRevrangebyscore, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameZRevrangebyscore, std::move(zrevrangebyscoreptr))); + ////ZCountCmd + std::unique_ptr zcountptr = + std::make_unique(kCmdNameZCount, 4, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZCount, std::move(zcountptr))); + ////ZRemCmd + std::unique_ptr zremptr = + std::make_unique(kCmdNameZRem, -3, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZRem, std::move(zremptr))); + ////ZUnionstoreCmd + std::unique_ptr zunionstoreptr = + std::make_unique(kCmdNameZUnionstore, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZUnionstore, std::move(zunionstoreptr))); + ////ZInterstoreCmd + std::unique_ptr zinterstoreptr = + std::make_unique(kCmdNameZInterstore, -4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZInterstore, std::move(zinterstoreptr))); + ////ZRankCmd + std::unique_ptr zrankptr = + std::make_unique(kCmdNameZRank, 3, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZRank, std::move(zrankptr))); + ////ZRevrankCmd + std::unique_ptr zrevrankptr = + std::make_unique(kCmdNameZRevrank, 3, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZRevrank, std::move(zrevrankptr))); + ////ZScoreCmd + std::unique_ptr zscoreptr = + std::make_unique(kCmdNameZScore, 3, kCmdFlagsRead | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZScore, std::move(zscoreptr))); + ////ZRangebylexCmd + std::unique_ptr zrangebylexptr = + std::make_unique(kCmdNameZRangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRangebylex, std::move(zrangebylexptr))); + ////ZRevrangebylexCmd + std::unique_ptr zrevrangebylexptr = std::make_unique( + kCmdNameZRevrangebylex, -4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRevrangebylex, std::move(zrevrangebylexptr))); + ////ZLexcountCmd + std::unique_ptr zlexcountptr = + std::make_unique(kCmdNameZLexcount, 4, kCmdFlagsRead | kCmdFlagsZset | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZLexcount, std::move(zlexcountptr))); + ////ZRemrangebyrankCmd + std::unique_ptr zremrangebyrankptr = std::make_unique( + kCmdNameZRemrangebyrank, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameZRemrangebyrank, std::move(zremrangebyrankptr))); + ////ZRemrangebyscoreCmd + std::unique_ptr zremrangebyscoreptr = std::make_unique( + kCmdNameZRemrangebyscore, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameZRemrangebyscore, std::move(zremrangebyscoreptr))); + ////ZRemrangebylexCmd + std::unique_ptr zremrangebylexptr = std::make_unique( + kCmdNameZRemrangebylex, 4, kCmdFlagsWrite | kCmdFlagsZset |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameZRemrangebylex, std::move(zremrangebylexptr))); + ////ZPopmax + std::unique_ptr zpopmaxptr = std::make_unique( + kCmdNameZPopmax, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZPopmax, std::move(zpopmaxptr))); + ////ZPopmin + std::unique_ptr zpopminptr = std::make_unique( + kCmdNameZPopmin, -2, kCmdFlagsWrite | kCmdFlagsZset | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameZPopmin, std::move(zpopminptr))); + + // Set + ////SAddCmd + std::unique_ptr saddptr = + std::make_unique(kCmdNameSAdd, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSAdd, std::move(saddptr))); + ////SPopCmd + std::unique_ptr spopptr = + std::make_unique(kCmdNameSPop, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSPop, std::move(spopptr))); + ////SCardCmd + std::unique_ptr scardptr = + std::make_unique(kCmdNameSCard, 2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSCard, std::move(scardptr))); + ////SMembersCmd + std::unique_ptr smembersptr = + std::make_unique(kCmdNameSMembers, 2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSMembers, std::move(smembersptr))); + ////SScanCmd + std::unique_ptr sscanptr = + std::make_unique(kCmdNameSScan, -3, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSScan, std::move(sscanptr))); + ////SRemCmd + std::unique_ptr sremptr = + std::make_unique(kCmdNameSRem, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSRem, std::move(sremptr))); + ////SUnionCmd + std::unique_ptr sunionptr = std::make_unique( + kCmdNameSUnion, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSUnion, std::move(sunionptr))); + ////SUnionstoreCmd + std::unique_ptr sunionstoreptr = + std::make_unique(kCmdNameSUnionstore, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSUnionstore, std::move(sunionstoreptr))); + ////SInterCmd + std::unique_ptr sinterptr = std::make_unique( + kCmdNameSInter, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSInter, std::move(sinterptr))); + ////SInterstoreCmd + std::unique_ptr sinterstoreptr = + std::make_unique(kCmdNameSInterstore, -3, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSInterstore, std::move(sinterstoreptr))); + ////SIsmemberCmd + std::unique_ptr sismemberptr = + std::make_unique(kCmdNameSIsmember, 3, kCmdFlagsRead | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSIsmember, std::move(sismemberptr))); + ////SDiffCmd + std::unique_ptr sdiffptr = + std::make_unique(kCmdNameSDiff, -2, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSDiff, std::move(sdiffptr))); + ////SDiffstoreCmd + std::unique_ptr sdiffstoreptr = + std::make_unique(kCmdNameSDiffstore, -3, kCmdFlagsWrite | kCmdFlagsSet |kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSDiffstore, std::move(sdiffstoreptr))); + ////SMoveCmd + std::unique_ptr smoveptr = + std::make_unique(kCmdNameSMove, 4, kCmdFlagsWrite | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameSMove, std::move(smoveptr))); + ////SRandmemberCmd + std::unique_ptr srandmemberptr = + std::make_unique(kCmdNameSRandmember, -2, kCmdFlagsRead | kCmdFlagsSet | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameSRandmember, std::move(srandmemberptr))); + + // BitMap + ////bitsetCmd + std::unique_ptr bitsetptr = + std::make_unique(kCmdNameBitSet, 4, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + cmd_table->insert(std::pair>(kCmdNameBitSet, std::move(bitsetptr))); + ////bitgetCmd + std::unique_ptr bitgetptr = + std::make_unique(kCmdNameBitGet, 3, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBitGet, std::move(bitgetptr))); + ////bitcountCmd + std::unique_ptr bitcountptr = + std::make_unique(kCmdNameBitCount, -2, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsReadCache | kCmdFlagsUpdateCache); + cmd_table->insert(std::pair>(kCmdNameBitCount, std::move(bitcountptr))); + ////bitposCmd + std::unique_ptr bitposptr = + std::make_unique(kCmdNameBitPos, -3, kCmdFlagsRead | kCmdFlagsBit | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameBitPos, std::move(bitposptr))); + ////bitopCmd + std::unique_ptr bitopptr = + std::make_unique(kCmdNameBitOp, -3, kCmdFlagsWrite | kCmdFlagsBit | kCmdFlagsSlow | kCmdFlagsDoThroughDB | kCmdFlagsUpdateCache); + cmd_table->insert(std::pair>(kCmdNameBitOp, std::move(bitopptr))); + + // HyperLogLog + ////pfaddCmd + std::unique_ptr pfaddptr = std::make_unique( + kCmdNamePfAdd, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNamePfAdd, std::move(pfaddptr))); + ////pfcountCmd + std::unique_ptr pfcountptr = std::make_unique( + kCmdNamePfCount, -2, kCmdFlagsRead | kCmdFlagsHyperLogLog | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePfCount, std::move(pfcountptr))); + ////pfmergeCmd + std::unique_ptr pfmergeptr = std::make_unique( + kCmdNamePfMerge, -2, kCmdFlagsWrite | kCmdFlagsHyperLogLog | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNamePfMerge, std::move(pfmergeptr))); + + // GEO + ////GepAdd + std::unique_ptr geoaddptr = std::make_unique( + kCmdNameGeoAdd, -5, kCmdFlagsWrite | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoAdd, std::move(geoaddptr))); + ////GeoPos + std::unique_ptr geoposptr = std::make_unique( + kCmdNameGeoPos, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoPos, std::move(geoposptr))); + ////GeoDist + std::unique_ptr geodistptr = std::make_unique( + kCmdNameGeoDist, -4, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoDist, std::move(geodistptr))); + ////GeoHash + std::unique_ptr geohashptr = std::make_unique( + kCmdNameGeoHash, -2, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoHash, std::move(geohashptr))); + ////GeoRadius + std::unique_ptr georadiusptr = std::make_unique( + kCmdNameGeoRadius, -6, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameGeoRadius, std::move(georadiusptr))); + ////GeoRadiusByMember + std::unique_ptr georadiusbymemberptr = std::make_unique( + kCmdNameGeoRadiusByMember, -5, kCmdFlagsRead | kCmdFlagsGeo | kCmdFlagsSlow); + cmd_table->insert( + std::pair>(kCmdNameGeoRadiusByMember, std::move(georadiusbymemberptr))); + + // PubSub + ////Publish + std::unique_ptr publishptr = + std::make_unique(kCmdNamePublish, 3, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNamePublish, std::move(publishptr))); + ////Subscribe + std::unique_ptr subscribeptr = + std::make_unique(kCmdNameSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNameSubscribe, std::move(subscribeptr))); + ////UnSubscribe + std::unique_ptr unsubscribeptr = + std::make_unique(kCmdNameUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNameUnSubscribe, std::move(unsubscribeptr))); + ////PSubscribe + std::unique_ptr psubscribeptr = + std::make_unique(kCmdNamePSubscribe, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNamePSubscribe, std::move(psubscribeptr))); + ////PUnSubscribe + std::unique_ptr punsubscribeptr = + std::make_unique(kCmdNamePUnSubscribe, -1, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNamePUnSubscribe, std::move(punsubscribeptr))); + ////PubSub + std::unique_ptr pubsubptr = + std::make_unique(kCmdNamePubSub, -2, kCmdFlagsRead | kCmdFlagsPubSub | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNamePubSub, std::move(pubsubptr))); + + ////ACL + std::unique_ptr aclptr = std::make_unique(KCmdNameAcl, -2, kCmdFlagsAdmin | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(KCmdNameAcl, std::move(aclptr))); + + // Transaction + ////Multi + std::unique_ptr multiptr = + std::make_unique(kCmdNameMulti, 1, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameMulti, std::move(multiptr))); + ////Exec + std::unique_ptr execptr = std::make_unique( + kCmdNameExec, 1, kCmdFlagsRead | kCmdFlagsWrite | kCmdFlagsSuspend | kCmdFlagsSlow ); + cmd_table->insert(std::pair>(kCmdNameExec, std::move(execptr))); + ////Discard + std::unique_ptr discardptr = std::make_unique(kCmdNameDiscard, 1, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameDiscard, std::move(discardptr))); + ////Watch + std::unique_ptr watchptr = std::make_unique(kCmdNameWatch, -2, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameWatch, std::move(watchptr))); + ////Unwatch + std::unique_ptr unwatchptr = std::make_unique(kCmdNameUnWatch, 1, kCmdFlagsRead | kCmdFlagsFast ); + cmd_table->insert(std::pair>(kCmdNameUnWatch, std::move(unwatchptr))); + + // Stream + ////XAdd + std::unique_ptr xaddptr = + std::make_unique(kCmdNameXAdd, -4, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameXAdd, std::move(xaddptr))); + ////XLen + std::unique_ptr xlenptr = + std::make_unique(kCmdNameXLen, 2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameXLen, std::move(xlenptr))); + ////XRead + std::unique_ptr xreadptr = + std::make_unique(kCmdNameXRead, -3, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXRead, std::move(xreadptr))); + ////XRange + std::unique_ptr xrangeptr = + std::make_unique(kCmdNameXRange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXRange, std::move(xrangeptr))); + ////XRerange + std::unique_ptr xrerverangeptr = + std::make_unique(kCmdNameXRevrange, -4, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXRevrange, std::move(xrerverangeptr))); + ////XTrim + std::unique_ptr xtrimptr = + std::make_unique(kCmdNameXTrim, -2, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXTrim, std::move(xtrimptr))); + ////XDel + std::unique_ptr xdelptr = + std::make_unique(kCmdNameXDel, -3, kCmdFlagsWrite | kCmdFlagsStream | kCmdFlagsFast); + cmd_table->insert(std::pair>(kCmdNameXDel, std::move(xdelptr))); + ////XINFO + std::unique_ptr xinfoptr = + std::make_unique(kCmdNameXInfo, -2, kCmdFlagsRead | kCmdFlagsStream | kCmdFlagsSlow); + cmd_table->insert(std::pair>(kCmdNameXInfo, std::move(xinfoptr))); +} + +Cmd* GetCmdFromDB(const std::string& opt, const CmdTable& cmd_table) { + auto it = cmd_table.find(opt); + if (it != cmd_table.end()) { + return it->second.get(); + } + return nullptr; +} + +bool Cmd::CheckArg(uint64_t num) const { return !((arity_ > 0 && num != arity_) || (arity_ < 0 && num < -arity_)); } + +Cmd::Cmd(std::string name, int arity, uint32_t flag, uint32_t aclCategory) + : name_(std::move(name)), arity_(arity), flag_(flag), aclCategory_(aclCategory), cache_missed_in_rtc_(false) { +} + +void Cmd::Initial(const PikaCmdArgsType& argv, const std::string& db_name) { + argv_ = argv; + db_name_ = db_name; + res_.clear(); // Clear res content + db_ = g_pika_server->GetDB(db_name_); + sync_db_ = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + Clear(); // Clear cmd, Derived class can has own implement + DoInitial(); +}; + +std::vector Cmd::current_key() const { return {""}; } + +void Cmd::Execute() { + ProcessCommand(); +} + +void Cmd::ProcessCommand(const HintKeys& hint_keys) { + if (stage_ == kNone) { + InternalProcessCommand(hint_keys); + } else { + if (stage_ == kBinlogStage) { + DoBinlog(); + } else if (stage_ == kExecuteStage) { + DoCommand(hint_keys); + } + } +} + +void Cmd::InternalProcessCommand(const HintKeys& hint_keys) { + pstd::lock::MultiRecordLock record_lock(db_->LockMgr()); + if (is_write()) { + record_lock.Lock(current_key()); + } + uint64_t start_us = 0; + if (g_pika_conf->slowlog_slower_than() >= 0) { + start_us = pstd::NowMicros(); + } + + if (!IsSuspend()) { + db_->DBLockShared(); + } + + DoCommand(hint_keys); + if (g_pika_conf->slowlog_slower_than() >= 0) { + do_duration_ += pstd::NowMicros() - start_us; + } + DoBinlog(); + + if (!IsSuspend()) { + db_->DBUnlockShared(); + } + if (is_write()) { + record_lock.Unlock(current_key()); + } +} + +void Cmd::DoCommand(const HintKeys& hint_keys) { + if (IsNeedCacheDo() + && PIKA_CACHE_NONE != g_pika_conf->cache_mode() + && db_->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { + if (!cache_missed_in_rtc_ + && IsNeedReadCache()) { + ReadCache(); + } + if (is_read() + && (res().CacheMiss() || cache_missed_in_rtc_)) { + pstd::lock::MultiScopeRecordLock record_lock(db_->LockMgr(), current_key()); + DoThroughDB(); + if (IsNeedUpdateCache()) { + DoUpdateCache(); + } + } else if (is_write()) { + DoThroughDB(); + if (IsNeedUpdateCache()) { + DoUpdateCache(); + } + } + } else { + Do(); + } + if (!IsAdmin() && res().ok()) { + if (res().noexist()) { + g_pika_server->incr_server_keyspace_misses(); + } else { + g_pika_server->incr_server_keyspace_hits(); + } + } +} + +bool Cmd::DoReadCommandInCache() { + if (!IsSuspend()) { + db_->DBLockShared(); + } + DEFER { + if (!IsSuspend()) { + db_->DBUnlockShared(); + } + }; + + if (db_->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { + if (IsNeedReadCache()) { + ReadCache(); + } + // return true only the read command hit + if (is_read() && !res().CacheMiss()) { + return true; + } + } + return false; +} + + +void Cmd::DoBinlog() { + if (res().ok() && is_write() && g_pika_conf->write_binlog()) { + std::shared_ptr conn_ptr = GetConn(); + std::shared_ptr resp_ptr = GetResp(); + // Consider that dummy cmd appended by system, both conn and resp are null. + if ((!conn_ptr || !resp_ptr) && (name_ != kCmdDummy)) { + if (!conn_ptr) { + LOG(WARNING) << sync_db_->SyncDBInfo().ToString() << " conn empty."; + } + if (!resp_ptr) { + LOG(WARNING) << sync_db_->SyncDBInfo().ToString() << " resp empty."; + } + res().SetRes(CmdRes::kErrOther); + return; + } + + Status s = sync_db_->ConsensusProposeLog(shared_from_this()); + if (!s.ok()) { + LOG(WARNING) << sync_db_->SyncDBInfo().ToString() << " Writing binlog failed, maybe no space left on device " + << s.ToString(); + res().SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } +} + +bool Cmd::hasFlag(uint32_t flag) const { return (flag_ & flag); } +bool Cmd::is_read() const { return (flag_ & kCmdFlagsRead); } +bool Cmd::is_write() const { return (flag_ & kCmdFlagsWrite); } +bool Cmd::IsLocal() const { return (flag_ & kCmdFlagsLocal); } + +int8_t Cmd::SubCmdIndex(const std::string& cmdName) { + if (subCmdName_.empty()) { + return -1; + } + for (size_t i = 0; i < subCmdName_.size(); ++i) { + if (!strcasecmp(subCmdName_[i].data(), cmdName.data())) { + return i; + } + } + return -1; +} + +// Others need to be suspended when a suspend command run +bool Cmd::IsSuspend() const { return (flag_ & kCmdFlagsSuspend); } +// std::string Cmd::CurrentSubCommand() const { return ""; }; +bool Cmd::HasSubCommand() const { return subCmdName_.size() > 0; }; +std::vector Cmd::SubCommand() const { return subCmdName_; }; +bool Cmd::IsAdmin() const { return (flag_ & kCmdFlagsAdmin); } +bool Cmd::IsNeedUpdateCache() const { return (flag_ & kCmdFlagsUpdateCache); } +bool Cmd::IsNeedCacheDo() const { + if (g_pika_conf->IsCacheDisabledTemporarily()) { + return false; + } + + if (hasFlag(kCmdFlagsKv)) { + if (!g_pika_conf->GetCacheString()) { + return false; + } + } else if (hasFlag(kCmdFlagsSet)) { + if (!g_pika_conf->GetCacheSet()) { + return false; + } + } else if (hasFlag(kCmdFlagsZset)) { + if (!g_pika_conf->GetCacheZset()) { + return false; + } + } else if (hasFlag(kCmdFlagsHash)) { + if (!g_pika_conf->GetCacheHash()) { + return false; + } + } else if (hasFlag(kCmdFlagsList)) { + if (!g_pika_conf->GetCacheList()) { + return false; + } + } else if (hasFlag(kCmdFlagsBit)) { + if (!g_pika_conf->GetCacheBit()) { + return false; + } + } + return (hasFlag(kCmdFlagsDoThroughDB)); +} + +bool Cmd::IsNeedReadCache() const { return hasFlag(kCmdFlagsReadCache); } + +bool Cmd::HashtagIsConsistent(const std::string& lhs, const std::string& rhs) const { return true; } + +std::string Cmd::name() const { return name_; } +CmdRes& Cmd::res() { return res_; } + +std::string Cmd::db_name() const { return db_name_; } + +PikaCmdArgsType& Cmd::argv() { return argv_; } + +uint32_t Cmd::AclCategory() const { return aclCategory_; } + +void Cmd::AddAclCategory(uint32_t aclCategory) { aclCategory_ |= aclCategory; } +uint32_t Cmd::flag() const { return flag_; } + +std::string Cmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLenUint64(content, argv_.size(), "*"); + + for (const auto& v : argv_) { + RedisAppendLenUint64(content, v.size(), "$"); + RedisAppendContent(content, v); + } + + return content; +} + +void Cmd::LogCommand() const { + std::string command; + for (const auto& item : argv_) { + command.append(" "); + command.append(item); + } + LOG(INFO) << "command:" << command; +} + +void Cmd::SetConn(const std::shared_ptr& conn) { conn_ = conn; } + +std::shared_ptr Cmd::GetConn() { return conn_.lock(); } + +void Cmd::SetResp(const std::shared_ptr& resp) { resp_ = resp; } + +std::shared_ptr Cmd::GetResp() { return resp_.lock(); } + +void Cmd::SetStage(CmdStage stage) { stage_ = stage; } +bool Cmd::IsCacheMissedInRtc() const { return cache_missed_in_rtc_; } +void Cmd::SetCacheMissedInRtc(bool value) { cache_missed_in_rtc_ = value; } diff --git a/tools/pika_migrate/src/pika_command_docs.cc b/tools/pika_migrate/src/pika_command_docs.cc new file mode 100644 index 0000000000..50087d17d3 --- /dev/null +++ b/tools/pika_migrate/src/pika_command_docs.cc @@ -0,0 +1,10845 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifdef WITH_COMMAND_DOCS + +# include "include/pika_admin.h" + +# include +# include +# include +# include + +static CommandCmd::EncodablePtr operator""_RedisInt(unsigned long long value) { + return std::make_shared(value); +} + +static CommandCmd::EncodablePtr operator""_RedisString(const char* value, std::size_t length) { + return std::make_shared(std::string(value, length)); +} + +static CommandCmd::EncodablePtr operator""_RedisStatus(const char* value, std::size_t length) { + return std::make_shared(std::string(value, length)); +} + +static CommandCmd::EncodablePtr RedisMap(CommandCmd::EncodableMap::RedisMap values) { + return std::make_shared(std::move(values)); +} + +static CommandCmd::EncodablePtr RedisSet(std::vector values) { + return std::make_shared(std::move(values)); +} + +static CommandCmd::EncodablePtr RedisArray(std::vector values) { + return std::make_shared(std::move(values)); +} + +const std::string CommandCmd::kPikaField{"pika"}; +const CommandCmd::EncodablePtr CommandCmd::kNotSupportedLiteral = "当前还未支持"_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kCompatibleLiteral = + "该接口完全支持,使用方式与redis没有任何区别"_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kBitSpecLiteral = + "BIT操作:与Redis不同,Pika的bit操作范围为2^21, bitmap的最大值为256Kb。redis setbit 只是对key的value值更新。但是pika使用rocksdb作为存储引擎,rocksdb只会新写入数据并且只在compact的时候才从硬盘删除旧数据。如果pika的bit操作范围和redis一致都是2^32的话,那么有可能每次对同一个key setbit时,rocksdb都会存储一个512M大小的value。这会产生 严重的性能隐患。因此我们对pika的bit操作范围作了取舍。"_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kHyperLogLiteral = + "50w以内误差均小于1%, 100w以内误差小于3%, 但付出了时间代价."_RedisString; +const CommandCmd::EncodablePtr CommandCmd::kPubSubLiteral = "暂不支持keyspace notifications"_RedisString; + +const CommandCmd::EncodablePtr CommandCmd::kNotSupportedSpecialization = RedisMap({{kPikaField, kNotSupportedLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kCompatibleSpecialization = RedisMap({{kPikaField, kCompatibleLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kBitSpecialization = RedisMap({{kPikaField, kBitSpecLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kHyperLogSpecialization = RedisMap({{kPikaField, kHyperLogLiteral}}); +const CommandCmd::EncodablePtr CommandCmd::kPubSubSpecialization = RedisMap({{kPikaField, kPubSubLiteral}}); + +const std::unordered_map CommandCmd::kPikaSpecialization{ + {"pexpire", RedisMap({{kPikaField, "无法精确到毫秒,底层会自动截断按秒级别进行处理"_RedisString}})}, + {"pexpireat", RedisMap({{kPikaField, "无法精确到毫秒,底层会自动截断按秒级别进行处理"_RedisString}})}, + {"scan", + RedisMap( + {{kPikaField, + "会顺序迭代当前db的快照,由于pika允许重名五次,所以scan有优先输出顺序,依次为:string -> hash -> list -> zset -> set"_RedisString}})}, + {"type", + RedisMap( + {{kPikaField, + "另外由于pika允许重名五次,所以type有优先输出顺序,依次为:string -> hash -> list -> zset -> set,如果这个key在string中存在,那么只输出sting,如果不存在,那么则输出hash的,依次类推"_RedisString}})}, + {"keys", + RedisMap( + {{kPikaField, + "KEYS命令支持参数支持扫描指定类型的数据,用法如 \"keys * [string, hash, list, zset, set]\""_RedisString}})}, + {"bitop", kBitSpecialization}, + {"getbit", kBitSpecialization}, + {"setbit", kBitSpecialization}, + {"hset", RedisMap({{kPikaField, "暂不支持单条命令设置多个field value,如有需求请用HMSET"_RedisString}})}, + {"srandmember", RedisMap({{kPikaField, "时间复杂度O( n ),耗时较多"_RedisString}})}, + {"zadd", RedisMap({{kPikaField, "的选项 [NX|XX] [CH] [INCR] 暂不支持"_RedisString}})}, + {"pfadd", kHyperLogSpecialization}, + {"pfcount", kHyperLogSpecialization}, + {"pfmerge", kHyperLogSpecialization}, + {"psubscribe", kPubSubSpecialization}, + {"pubsub", kPubSubSpecialization}, + {"publish", kPubSubSpecialization}, + {"punsubscribe", kPubSubSpecialization}, + {"subscribe", kPubSubSpecialization}, + {"unsubscribe", kPubSubSpecialization}, + {"info", + RedisMap( + {{kPikaField, + "info支持全部输出,也支持匹配形式的输出,例如可以通过info stats查看状态信息,需要注意的是key space与redis不同,pika对于key space的展示选择了分类型展示而非redis的分库展示(因为pika没有库),pika对于key space的统计是被动的,需要手动触发,然后pika会在后台进行统计,pika的key space统计是精确的。触发方式为执行:keyspace命令即可,然后pika会在后台统计,此时可以使用:keyspace readonly命令来进行查看,readonly参数可以避免反复进行统计,如果当前数据为0,则证明还在统计中"_RedisString}})}, + {"client", RedisMap({{kPikaField, + "当前client命令支持client list及client kill,client list显示的内容少于redis"_RedisString}})}, + {"select", RedisMap({{kPikaField, "该命令在3.1.0版前无任何效果,自3.1.0版开始与Redis一致"_RedisString}})}, + {"ping", RedisMap({{kPikaField, "该命令仅支持无参数使用,即使用PING,客户端返回PONG"_RedisString}})}, + {"type", + RedisMap( + {{kPikaField, + "pika不同类型的key name 是允许重复的,例如:string 类型里有 key1,hash list set zset类型可以同时存在 key1,在使用 type命令查询时,只能得到一个,如果要查询同一个 name 所有的类型,需要使用 ptype 命令查询"_RedisString}})}, +}; + +const std::unordered_map CommandCmd::kCommandDocs{ + {"zremrangebyscore", + RedisMap({ + {"summary", + "Removes members in a sorted set within a range of scores. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"sunion", RedisMap({ + {"summary", "Returns the union of multiple sets."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"debug", RedisMap({ + {"summary", "A container for debugging commands."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + })}, + {"readonly", + RedisMap({ + {"summary", "Enables read-only queries for a connection to a Redis Cluster replica node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency", + RedisMap({ + {"summary", "A container for latency diagnostics commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"latency|doctor", RedisMap({ + {"summary", "Returns a human-readable latency analysis report."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency|histogram", + RedisMap({ + {"summary", + "Returns the cumulative distribution of latencies of a subset or all commands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N) where N is the number of commands with latency information being retrieved."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"latency|history", RedisMap({ + {"summary", "Returns timestamp-latency samples for an event."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "event"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "event"_RedisString}, + }), + })}, + })}, + {"latency|graph", RedisMap({ + {"summary", "Returns a latency graph for an event."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "event"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "event"_RedisString}, + }), + })}, + })}, + {"latency|latest", RedisMap({ + {"summary", "Returns the latest latency samples for all events."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"latency|reset", RedisMap({ + {"summary", "Resets the latency data for one or more events."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "event"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "event"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + })}, + })}, + {"setbit", + RedisMap({ + {"summary", + "Sets or clears the bit at offset of the string value. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"lpush", + RedisMap({ + {"summary", "Prepends one or more elements to a list. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"punsubscribe", + RedisMap({ + {"summary", "Stops listening to messages published to channels that match one or more patterns."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N+M) where N is the number of patterns the client is already subscribed and M is the number of total patterns subscribed in the system (by any client)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"role", RedisMap({ + {"summary", "Returns the replication role."_RedisString}, + {"since", "2.8.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"lmove", + RedisMap({ + {"summary", + "Returns an element after popping it from one list and pushing it to another. Deletes the list if the last element was moved."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "wherefrom"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "whereto"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"memory", + RedisMap({ + {"summary", "A container for memory diagnostics commands."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"memory|doctor", RedisMap({ + {"summary", "Outputs a memory problems report."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"memory|malloc-stats", + RedisMap({ + {"summary", "Returns the allocator statistics."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on how much memory is allocated, could be slow"_RedisString}, + })}, + {"memory|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"memory|purge", RedisMap({ + {"summary", "Asks the allocator to release memory."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on how much memory is allocated, could be slow"_RedisString}, + })}, + {"memory|stats", RedisMap({ + {"summary", "Returns details about memory usage."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"memory|usage", RedisMap({ + {"summary", "Estimates the memory usage of a key."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of samples."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "SAMPLES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + })}, + })}, + {"time", RedisMap({ + {"summary", "Returns the server time."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"sunsubscribe", + RedisMap({ + {"summary", "Stops listening to messages posted to shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of clients already subscribed to a shard channel."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"module", + RedisMap({ + {"summary", "A container for module commands."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"module|load", RedisMap({ + {"summary", "Loads a module."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "path"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "path"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"module|loadex", RedisMap({ + {"summary", "Loads a module using extended parameters."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "path"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "path"_RedisString}, + }), + RedisMap({ + {"name", "configs"_RedisString}, + {"type", "block"_RedisString}, + {"token", "CONFIG"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "name"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "args"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "args"_RedisString}, + {"token", "ARGS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"module|list", RedisMap({ + {"summary", "Returns all loaded modules."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of loaded modules."_RedisString}, + })}, + {"module|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"module|unload", RedisMap({ + {"summary", "Unloads a module."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "name"_RedisString}, + }), + })}, + })}, + })}, + })}, + {"bzmpop", + RedisMap({ + {"summary", + "Removes and returns a member by score from one or more sorted sets. Blocks until a member is available otherwise. Deletes the sorted set if the last element was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"readwrite", + RedisMap({ + {"summary", "Enables read-write queries for a connection to a Reids Cluster replica node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"zadd", + RedisMap({ + {"summary", + "Adds one or more members to a sorted set, or updates their scores. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)) for each item added, where N is the number of elements in the sorted set."_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple elements."_RedisString}), + RedisArray({"3.0.2"_RedisString, "Added the `XX`, `NX`, `CH` and `INCR` options."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `GT` and `LT` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "3.0.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "comparison"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "change"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "change"_RedisString}, + {"token", "CH"_RedisString}, + {"since", "3.0.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "increment"_RedisString}, + {"token", "INCR"_RedisString}, + {"since", "3.0.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "score"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "score"_RedisString}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + }), + })}, + })}, + {"swapdb", + RedisMap({ + {"summary", "Swaps two Redis databases."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N) where N is the count of clients watching or blocking on keys from both databases."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "index1"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index1"_RedisString}, + }), + RedisMap({ + {"name", "index2"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index2"_RedisString}, + }), + })}, + })}, + {"incrby", + RedisMap({ + {"summary", + "Increments the integer value of a key by a number. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"zscore", RedisMap({ + {"summary", "Returns the score of a member in a sorted set."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"spop", + RedisMap({ + {"summary", + "Returns one or more random members from a set after removing them. Deletes the set if the last member was popped."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "Without the count argument O(1), otherwise O(N) where N is the value of the passed count."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.2.0"_RedisString, "Added the `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "3.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"mset", RedisMap({ + {"summary", "Atomically creates or modifies the string values of one or more keys."_RedisString}, + {"since", "1.0.1"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N) where N is the number of keys to set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"geosearch", + RedisMap({ + {"summary", "Queries a geospatial index for members inside an area of a box or a circle."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "from"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"token", "FROMMEMBER"_RedisString}, + }), + RedisMap({ + {"name", "fromlonlat"_RedisString}, + {"type", "block"_RedisString}, + {"token", "FROMLONLAT"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "by"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "circle"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + {"token", "BYRADIUS"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "box"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "width"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "width"_RedisString}, + {"token", "BYBOX"_RedisString}, + }), + RedisMap({ + {"name", "height"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "height"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"hget", RedisMap({ + {"summary", "Returns the value of a field in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + })}, + })}, + {"zscan", + RedisMap({ + {"summary", "Iterates over members and scores of a sorted set."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xreadgroup", + RedisMap({ + {"summary", + "Returns new or historical messages from a stream for a consumer in a group. Blocks until a message is available otherwise."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "For each stream mentioned: O(M) with M being the number of elements returned. If M is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1). On the other side when XREADGROUP blocks, XADD will pay the O(N) time in order to serve the N clients blocked on the stream getting new data."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "group-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "GROUP"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "BLOCK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "noack"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "noack"_RedisString}, + {"token", "NOACK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "streams"_RedisString}, + {"type", "block"_RedisString}, + {"token", "STREAMS"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"copy", + RedisMap({ + {"summary", "Copies the value of a key to a new key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N) worst case for collections, where N is the number of nested items. O(1) for string values."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "destination-db"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "destination-db"_RedisString}, + {"token", "DB"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"getbit", RedisMap({ + {"summary", "Returns a bit value by offset."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + })}, + {"xautoclaim", + RedisMap({ + {"summary", + "Changes, or acquires, ownership of messages in a consumer group, as if the messages were delivered to as consumer group member."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1) if COUNT is small."_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"7.0.0"_RedisString, + "Added an element to the reply array, containing deleted entries the command cleared from the PEL"_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + RedisMap({ + {"name", "min-idle-time"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min-idle-time"_RedisString}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "justid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "justid"_RedisString}, + {"token", "JUSTID"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lpushx", + RedisMap({ + {"summary", "Prepends one or more elements to a list only when the list exists."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sdiffstore", RedisMap({ + {"summary", "Stores the difference of multiple sets in a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"setrange", + RedisMap({ + {"summary", + "Overwrites a part of a string value with another by an offset. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(1), not counting the time taken to copy the new string in place. Usually, this string is very small so the amortized complexity is O(1). Otherwise, complexity is O(M) with M being the length of the value argument."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"eval_ro", RedisMap({ + {"summary", "Executes a read-only server-side Lua script."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "script"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "script"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bgsave", RedisMap({ + {"summary", "Asynchronously saves the database(s) to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"3.2.2"_RedisString, "Added the `SCHEDULE` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "schedule"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "schedule"_RedisString}, + {"token", "SCHEDULE"_RedisString}, + {"since", "3.2.2"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"discard", RedisMap({ + {"summary", "Discards a transaction."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(N), when N is the number of queued commands"_RedisString}, + })}, + {"psync", RedisMap({ + {"summary", "An internal command used in replication."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "server"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "replicationid"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "replicationid"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + })}, + {"keys", + RedisMap({ + {"summary", "Returns all key names that match a pattern."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N) with N being the number of keys in the database, under the assumption that the key names in the database and the given pattern have limited length."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + }), + })}, + })}, + {"flushall", + RedisMap({ + {"summary", "Removes all keys from all databases."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of keys in all databases"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Added the `ASYNC` flushing mode modifier."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `SYNC` flushing mode modifier."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + {"since", "4.0.0"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + })}, + })}, + {"incrbyfloat", + RedisMap({ + {"summary", + "Increment the floating point value of a key by a number. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"expireat", + RedisMap({ + {"summary", "Sets the expiration time of a key to a Unix timestamp."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "unix-time-seconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-seconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zunion", + RedisMap({ + {"summary", "Returns the union of multiple sorted sets."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N)+O(M*log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"monitor", RedisMap({ + {"summary", "Listens for all requests received by the server in real-time."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + })}, + {"substr", + RedisMap({ + {"summary", "Returns a substring from a string value."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.0.0"_RedisString}, + {"replaced_by", "`GETRANGE`"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + })}, + })}, + {"setex", + RedisMap({ + {"summary", + "Sets the string value and expiration time of a key. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.6.12"_RedisString}, + {"replaced_by", "`SET` with the `EX` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"pfselftest", RedisMap({ + {"summary", "An internal command for testing HyperLogLog values."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "N/A"_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + })}, + {"blpop", + RedisMap({ + {"summary", + "Removes and returns the first element in a list. Blocks until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of provided keys."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"ssubscribe", RedisMap({ + {"summary", "Listens for messages published to shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of shard channels to subscribe to."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"rpush", + RedisMap({ + {"summary", "Appends one or more elements to a list. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sdiff", RedisMap({ + {"summary", "Returns the difference of multiple sets."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"geosearchstore", + RedisMap({ + {"summary", + "Queries a geospatial index for members inside an area of a box or a circle, optionally stores the result."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements in the grid-aligned bounding box area around the shape provided as the filter and M is the number of items inside the shape"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "from"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"token", "FROMMEMBER"_RedisString}, + }), + RedisMap({ + {"name", "fromlonlat"_RedisString}, + {"type", "block"_RedisString}, + {"token", "FROMLONLAT"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "by"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "circle"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + {"token", "BYRADIUS"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "box"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "width"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "width"_RedisString}, + {"token", "BYBOX"_RedisString}, + }), + RedisMap({ + {"name", "height"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "height"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "storedist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "storedist"_RedisString}, + {"token", "STOREDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zmscore", RedisMap({ + {"summary", "Returns the score of one or more members in a sorted set."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(N) where N is the number of members being requested."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"smismember", + RedisMap({ + {"summary", "Determines whether multiple members belong to a set."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the number of elements being checked for membership"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"append", + RedisMap({ + {"summary", "Appends a string to the value of a key. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(1). The amortized time complexity is O(1) assuming the appended value is small and the already present value is of any size, since the dynamic string library used by Redis will double the free space available on every reallocation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"zrangebylex", RedisMap({ + {"summary", "Returns members in a sorted set within a lexicographical range."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `BYLEX` argument"_RedisString}, + {"arguments", RedisArray( + { + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"eval", + RedisMap({ + {"summary", "Executes a server-side Lua script."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "script"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "script"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"slaveof", + RedisMap({ + {"summary", "Sets a Redis server as a replica of another, or promotes it to being a master."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "5.0.0"_RedisString}, + {"replaced_by", "`REPLICAOF`"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + })}, + })}, + {"reset", RedisMap({ + {"summary", "Resets the connection."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"zinter", + RedisMap({ + {"summary", "Returns the intersect of multiple sorted sets."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pexpire", + RedisMap({ + {"summary", "Sets the expiration time of a key in milliseconds."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"command", + RedisMap({ + {"summary", "Returns detailed information about all commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of Redis commands"_RedisString}, + {"subcommands", + RedisMap({ + {"command|getkeys", + RedisMap({ + {"summary", "Extracts the key names from an arbitrary command."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of arguments to the command"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|docs", + RedisMap({ + {"summary", "Returns documentary information about one, multiple or all commands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of commands to look up"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command-name"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|count", RedisMap({ + {"summary", "Returns a count of commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"command|getkeysandflags", + RedisMap({ + {"summary", "Extracts the key names and access flags for an arbitrary command."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of arguments to the command"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|info", + RedisMap({ + {"summary", "Returns information about one, multiple or all commands."_RedisString}, + {"since", "2.8.13"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of commands to look up"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Allowed to be called with no argument to get info on all commands."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "command-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command-name"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"command|list", + RedisMap({ + {"summary", "Returns a list of command names."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of Redis commands"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "filterby"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "FILTERBY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "module-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "module-name"_RedisString}, + {"token", "MODULE"_RedisString}, + }), + RedisMap({ + {"name", "category"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "category"_RedisString}, + {"token", "ACLCAT"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "PATTERN"_RedisString}, + }), + })}, + }), + })}, + })}, + {"command|help", + RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"lrange", + RedisMap({ + {"summary", "Returns a range of elements from a list."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(S+N) where S is the distance of start offset from HEAD for small lists, from nearest end (HEAD or TAIL) for large lists; and N is the number of elements in the specified range."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + })}, + })}, + {"lindex", + RedisMap({ + {"summary", "Returns an element from a list by its index."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the number of elements to traverse to get to the element at index. This makes asking for the first or the last element of the list O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "index"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index"_RedisString}, + }), + })}, + })}, + {"blmove", + RedisMap({ + {"summary", + "Pops an element from a list, pushes it to another list and returns it. Blocks until an element is available otherwise. Deletes the list if the last element was moved."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "wherefrom"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "whereto"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"ttl", + RedisMap({ + {"summary", "Returns the expiration time in seconds of a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.8.0"_RedisString, "Added the -2 reply."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"xread", + RedisMap({ + {"summary", + "Returns messages from multiple streams with IDs greater than the ones requested. Blocks until a message is available otherwise."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "BLOCK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "streams"_RedisString}, + {"type", "block"_RedisString}, + {"token", "STREAMS"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"xgroup", + RedisMap({ + {"summary", "A container for consumer groups commands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"xgroup|delconsumer", RedisMap({ + {"summary", "Deletes a consumer from a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + })}, + })}, + {"xgroup|create", + RedisMap({ + {"summary", "Creates a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `entries_read` named argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "id-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + }), + RedisMap({ + {"name", "new-id"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "new-id"_RedisString}, + {"token", "$"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "mkstream"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mkstream"_RedisString}, + {"token", "MKSTREAM"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "entries-read"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "entries-read"_RedisString}, + {"token", "ENTRIESREAD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xgroup|destroy", + RedisMap({ + {"summary", "Destroys a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) where N is the number of entries in the group's pending entries list (PEL)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + })}, + })}, + {"xgroup|createconsumer", RedisMap({ + {"summary", "Creates a consumer in a consumer group."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + })}, + })}, + {"xgroup|setid", + RedisMap({ + {"summary", "Sets the last-delivered ID of a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the optional `entries_read` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "id-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + }), + RedisMap({ + {"name", "new-id"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "new-id"_RedisString}, + {"token", "$"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "entriesread"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "entries-read"_RedisString}, + {"token", "ENTRIESREAD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xgroup|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"hmget", RedisMap({ + {"summary", "Returns the values of all fields in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields being requested."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"quit", RedisMap({ + {"summary", "Closes the connection."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "7.2.0"_RedisString}, + {"replaced_by", "just closing the connection"_RedisString}, + })}, + {"unlink", + RedisMap({ + {"summary", "Asynchronously deletes one or more keys."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"mget", RedisMap({ + {"summary", "Atomically returns the string values of one or more keys."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N) where N is the number of keys to retrieve."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"unwatch", RedisMap({ + {"summary", "Forgets about watched keys of a transaction."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"zpopmax", + RedisMap({ + {"summary", + "Returns the highest-scoring members from a sorted set after removing them. Deletes the sorted set if the last member was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lpos", + RedisMap({ + {"summary", "Returns the index of matching elements in a list."_RedisString}, + {"since", "6.0.6"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the number of elements in the list, for the average case. When searching for elements near the head or the tail of the list, or when the MAXLEN option is provided, the command may run in constant time."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + RedisMap({ + {"name", "rank"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "rank"_RedisString}, + {"token", "RANK"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "num-matches"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "num-matches"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "len"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "len"_RedisString}, + {"token", "MAXLEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"bitcount", + RedisMap({ + {"summary", "Counts the number of set bits (population counting) in a string."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(N)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `BYTE|BIT` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byte"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byte"_RedisString}, + {"token", "BYTE"_RedisString}, + }), + RedisMap({ + {"name", "bit"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bit"_RedisString}, + {"token", "BIT"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"xdel", RedisMap({ + {"summary", "Returns the number of messages after removing them from a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(1) for each single item to delete in the stream, regardless of the stream size."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"xpending", + RedisMap({ + {"summary", + "Returns the information and entries from a stream consumer group's pending entries list."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) with N being the number of elements returned, so asking for a small fixed number of entries per call is O(1). O(M), where M is the total number of entries scanned when used with the IDLE filter. When the command returns just the summary and the list of consumers is small, it runs in O(1) time; otherwise, an additional O(N) time for iterating every consumer."_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `IDLE` option and exclusive range intervals."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "filters"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "min-idle-time"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "min-idle-time"_RedisString}, + {"token", "IDLE"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"auth", + RedisMap({ + {"summary", "Authenticates the connection."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) where N is the number of passwords defined for the user"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, "Added ACL style (username and password)."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + {"since", "6.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "password"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + }), + })}, + })}, + {"select", RedisMap({ + {"summary", "Changes the selected database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "index"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index"_RedisString}, + }), + })}, + })}, + {"hmset", RedisMap({ + {"summary", "Sets the values of multiple fields."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields being set."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "4.0.0"_RedisString}, + {"replaced_by", "`HSET` with multiple field-value pairs"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"hstrlen", RedisMap({ + {"summary", "Returns the length of the value of a field."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + })}, + })}, + {"decr", + RedisMap({ + {"summary", + "Decrements the integer value of a key by one. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"hdel", + RedisMap({ + {"summary", + "Deletes one or more fields and their values from a hash. Deletes the hash if no fields remain."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields to be removed."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `field` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"replicaof", RedisMap({ + {"summary", "Configures a server as replica of another, or promotes it to a master."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + })}, + })}, + {"psubscribe", + RedisMap({ + {"summary", "Listens for messages published to channels that match one or more patterns."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of patterns the client is already subscribed to."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"hset", + RedisMap({ + {"summary", "Creates or modifies the value of a field in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", + "O(1) for each field/value pair added, so O(N) to add N field/value pairs when the command is called with multiple field/value pairs."_RedisString}, + {"history", + RedisSet({ + RedisArray({"4.0.0"_RedisString, "Accepts multiple `field` and `value` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"brpop", + RedisMap({ + {"summary", + "Removes and returns the last element in a list. Blocks until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of provided keys."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"exists", RedisMap({ + {"summary", "Determines whether one or more keys exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(N) where N is the number of keys to check."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.0.3"_RedisString, "Accepts multiple `key` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"getrange", + RedisMap({ + {"summary", "Returns a substring of the string stored at a key."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", + "O(N) where N is the length of the returned string. The complexity is ultimately determined by the returned length, but because creating a substring from an existing string is very cheap, it can be considered O(1) for small strings."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + })}, + })}, + {"llen", RedisMap({ + {"summary", "Returns the length of a list."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"xclaim", + RedisMap({ + {"summary", + "Changes, or acquires, ownership of a message in a consumer group, as if the message was delivered a consumer group member."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(log N) with N being the number of messages in the PEL of the consumer group."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "consumer"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "consumer"_RedisString}, + }), + RedisMap({ + {"name", "min-idle-time"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min-idle-time"_RedisString}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "ms"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "ms"_RedisString}, + {"token", "IDLE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + {"token", "TIME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "RETRYCOUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "justid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "justid"_RedisString}, + {"token", "JUSTID"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "lastid"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "lastid"_RedisString}, + {"token", "LASTID"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zrevrange", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of indexes in reverse order."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `REV` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xtrim", + RedisMap({ + {"summary", "Deletes messages from the beginning of a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N), with N being the number of evicted entries. Constant times are very small however, since entries are organized in macro nodes containing multiple entries that can be released with a single deallocation."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, + "Added the `MINID` trimming strategy and the `LIMIT` option."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "trim"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "strategy"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "maxlen"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "maxlen"_RedisString}, + {"token", "MAXLEN"_RedisString}, + }), + RedisMap({ + {"name", "minid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "minid"_RedisString}, + {"token", "MINID"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "operator"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "equal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "equal"_RedisString}, + {"token", "="_RedisString}, + }), + RedisMap({ + {"name", "approximately"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "approximately"_RedisString}, + {"token", "~"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "threshold"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "threshold"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"acl", RedisMap({ + {"summary", "A container for Access List Control commands."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"acl|list", RedisMap({ + {"summary", "Dumps the effective rules in ACL file format."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"acl|users", RedisMap({ + {"summary", "Lists all ACL users."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|setuser", + RedisMap({ + {"summary", "Creates and modifies an ACL user and its rules."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of rules provided."_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added Pub/Sub channel patterns."_RedisString}), + RedisArray( + {"7.0.0"_RedisString, "Added selectors and key based permissions."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "rule"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "rule"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|log", + RedisMap({ + {"summary", "Lists recent security events generated due to ACL rules."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) with N being the number of entries shown."_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.2.0"_RedisString, + "Added entry ID, timestamp created, and timestamp last updated."_RedisString}), + })}, + {"arguments", RedisArray( + { + RedisMap( + { + {"name", "operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "reset"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "reset"_RedisString}, + {"token", "RESET"_RedisString}, + }), + })}, + }), + })}, + })}, + {"acl|dryrun", + RedisMap({ + {"summary", + "Simulates the execution of a command by a user, without executing the command."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "command"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "command"_RedisString}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", + RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|cat", + RedisMap({ + {"summary", "Lists the ACL categories, or the commands inside a category."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1) since the categories and commands are a fixed set."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "category"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "category"_RedisString}, + {"flags", + RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|deluser", + RedisMap({ + {"summary", "Deletes ACL users, and terminates their connections."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1) amortized time considering the typical user."_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + {"flags", + RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|save", + RedisMap({ + {"summary", "Saves the effective ACL rules in the configured ACL file."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|genpass", + RedisMap({ + {"summary", + "Generates a pseudorandom, secure password that can be used to identify ACL users."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray( + { + RedisMap({ + {"name", "bits"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "bits"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"acl|getuser", RedisMap( + { + {"summary", "Lists the ACL rules of a user."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N). Where N is the number of password, command and pattern rules that the user has."_RedisString}, + {"history", RedisSet( + { + RedisArray({"6.2.0"_RedisString, + "Added Pub/Sub channel patterns."_RedisString}), + RedisArray({"7.0.0"_RedisString, + "Added selectors and changed the format of key and channel patterns from a list to their rule representation."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + })}, + })}, + {"acl|load", RedisMap({ + {"summary", "Reloads the rules from the configured ACL file."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N). Where N is the number of configured users."_RedisString}, + })}, + {"acl|whoami", + RedisMap({ + {"summary", "Returns the authenticated username of the current connection."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"sadd", + RedisMap({ + {"summary", "Adds one or more members to a set. Creates the key if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `member` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"zlexcount", + RedisMap({ + {"summary", "Returns the number of members in a sorted set within a lexicographical range."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"sinter", + RedisMap({ + {"summary", "Returns the intersect of multiple sets."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"georadiusbymember_ro", + RedisMap({ + {"summary", "Returns members from a geospatial index that are within a distance from a member."_RedisString}, + {"since", "3.2.10"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` with the `BYRADIUS` and `FROMMEMBER` arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"smove", RedisMap({ + {"summary", "Moves a member from one set to another."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"del", + RedisMap({ + {"summary", "Deletes one or more keys."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N) where N is the number of keys that will be removed. When a key to remove holds a value other than a string, the individual complexity for this key is O(M) where M is the number of elements in the list, set, sorted set or hash. Removing a single key that holds a string value is O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"zrem", + RedisMap({ + {"summary", + "Removes one or more members from a sorted set. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(M*log(N)) with N being the number of elements in the sorted set and M the number of elements to be removed."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple elements."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bzpopmin", + RedisMap({ + {"summary", + "Removes and returns the member with the lowest score from one or more sorted sets. Blocks until a member is available otherwise. Deletes the sorted set if the last element was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"xsetid", + RedisMap({ + {"summary", "An internal command for replicating stream values."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Added the `entries_added` and `max_deleted_entry_id` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "last-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "last-id"_RedisString}, + }), + RedisMap({ + {"name", "entries-added"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "entries-added"_RedisString}, + {"token", "ENTRIESADDED"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "max-deleted-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max-deleted-id"_RedisString}, + {"token", "MAXDELETEDID"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zdiffstore", + RedisMap({ + {"summary", "Stores the difference of multiple sorted sets in a key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"setnx", RedisMap({ + {"summary", "Set the string value of a key only when the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.6.12"_RedisString}, + {"replaced_by", "`SET` with the `NX` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"getset", + RedisMap({ + {"summary", "Returns the previous string value of a key after setting it to a new value."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`SET` with the `!GET` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"unsubscribe", + RedisMap({ + {"summary", "Stops listening to messages posted to channels."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of clients already subscribed to a channel."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"lcs", RedisMap({ + {"summary", "Finds the longest common substring."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N*M) where N and M are the lengths of s1 and s2, respectively"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key1"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key1"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key2"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key2"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "len"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "len"_RedisString}, + {"token", "LEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "idx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "idx"_RedisString}, + {"token", "IDX"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "min-match-len"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "min-match-len"_RedisString}, + {"token", "MINMATCHLEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withmatchlen"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withmatchlen"_RedisString}, + {"token", "WITHMATCHLEN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lastsave", RedisMap({ + {"summary", "Returns the Unix timestamp of the last successful save to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"xrange", + RedisMap({ + {"summary", "Returns the messages from a stream within a range of IDs."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) with N being the number of elements being returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1)."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added exclusive ranges."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"set", + RedisMap({ + {"summary", + "Sets the string value of a key, ignoring its type. The key is created if it doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.6.12"_RedisString, "Added the `EX`, `PX`, `NX` and `XX` options."_RedisString}), + RedisArray({"6.0.0"_RedisString, "Added the `KEEPTTL` option."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `GET`, `EXAT` and `PXAT` option."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Allowed the `NX` and `GET` options to be used together."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "2.6.12"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "get"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "get"_RedisString}, + {"token", "GET"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "expiration"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "EX"_RedisString}, + {"since", "2.6.12"_RedisString}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "PX"_RedisString}, + {"since", "2.6.12"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-seconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-seconds"_RedisString}, + {"token", "EXAT"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + {"token", "PXAT"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + RedisMap({ + {"name", "keepttl"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "keepttl"_RedisString}, + {"token", "KEEPTTL"_RedisString}, + {"since", "6.0.0"_RedisString}, + }), + })}, + }), + })}, + })}, + {"geopos", RedisMap({ + {"summary", "Returns the longitude and latitude of members from a geospatial index."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", "O(N) where N is the number of members requested."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bgrewriteaof", RedisMap({ + {"summary", "Asynchronously rewrites the append-only file to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"hincrby", + RedisMap({ + {"summary", + "Increments the integer value of a field in a hash by a number. Uses 0 as initial value if the field doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"lolwut", RedisMap({ + {"summary", "Displays computer art and the Redis version"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "version"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "version"_RedisString}, + {"token", "VERSION"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"get", RedisMap({ + {"summary", "Returns the string value of a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"persist", RedisMap({ + {"summary", "Removes the expiration time of a key."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"pexpireat", + RedisMap({ + {"summary", "Sets the expiration time of a key to a Unix milliseconds timestamp."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"sunionstore", RedisMap({ + {"summary", "Stores the union of multiple sets in a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the total number of elements in all given sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"migrate", + RedisMap({ + {"summary", "Atomically transfers a key from one Redis instance to another."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "This command actually executes a DUMP+DEL in the source instance, and a RESTORE in the target instance. See the pages of these commands for time complexity. Also an O(N) data transfer between the two instances is performed."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.0.0"_RedisString, "Added the `COPY` and `REPLACE` options."_RedisString}), + RedisArray({"3.0.6"_RedisString, "Added the `KEYS` option."_RedisString}), + RedisArray({"4.0.7"_RedisString, "Added the `AUTH` option."_RedisString}), + RedisArray({"6.0.0"_RedisString, "Added the `AUTH2` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + RedisMap({ + {"name", "key-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "empty-string"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "empty-string"_RedisString}, + {"token", ""_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "destination-db"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "destination-db"_RedisString}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "copy"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "copy"_RedisString}, + {"token", "COPY"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "authentication"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "auth"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + {"token", "AUTH"_RedisString}, + {"since", "4.0.7"_RedisString}, + }), + RedisMap({ + {"name", "auth2"_RedisString}, + {"type", "block"_RedisString}, + {"token", "AUTH2"_RedisString}, + {"since", "6.0.0"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "password"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + }), + })}, + }), + })}, + }), + RedisMap({ + {"name", "keys"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "KEYS"_RedisString}, + {"since", "3.0.6"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"xadd", + RedisMap({ + {"summary", "Appends a new message to a stream. Creates the key if it doesn't exist."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted."_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"6.2.0"_RedisString, + "Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Added support for the `-*` explicit ID form."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "nomkstream"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nomkstream"_RedisString}, + {"token", "NOMKSTREAM"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "trim"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "strategy"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "maxlen"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "maxlen"_RedisString}, + {"token", "MAXLEN"_RedisString}, + }), + RedisMap({ + {"name", "minid"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "minid"_RedisString}, + {"token", "MINID"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "operator"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "equal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "equal"_RedisString}, + {"token", "="_RedisString}, + }), + RedisMap({ + {"name", "approximately"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "approximately"_RedisString}, + {"token", "~"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "threshold"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "threshold"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "id-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "auto-id"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "auto-id"_RedisString}, + {"token", "*"_RedisString}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"sinterstore", + RedisMap({ + {"summary", "Stores the intersect of multiple sets in a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"zrank", + RedisMap({ + {"summary", "Returns the index of a member in a sorted set ordered by ascending scores."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N))"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.2.0"_RedisString, "Added the optional `WITHSCORE` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "withscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscore"_RedisString}, + {"token", "WITHSCORE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pexpiretime", + RedisMap({ + {"summary", "Returns the expiration time of a key as a Unix milliseconds timestamp."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"bitop", RedisMap({ + {"summary", "Performs bitwise operations on multiple strings, and stores the result."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(N)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "and"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "and"_RedisString}, + {"token", "AND"_RedisString}, + }), + RedisMap({ + {"name", "or"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "or"_RedisString}, + {"token", "OR"_RedisString}, + }), + RedisMap({ + {"name", "xor"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xor"_RedisString}, + {"token", "XOR"_RedisString}, + }), + RedisMap({ + {"name", "not"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "not"_RedisString}, + {"token", "NOT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "destkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destkey"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"wait", + RedisMap({ + {"summary", + "Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numreplicas"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numreplicas"_RedisString}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"hexists", RedisMap({ + {"summary", "Determines whether a field exists in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + })}, + })}, + {"strlen", RedisMap({ + {"summary", "Returns the length of a string value."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"sort_ro", + RedisMap({ + {"summary", "Returns the sorted elements of a list, a set, or a sorted set."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "by-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "BY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "get-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "GET"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "sorting"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sorting"_RedisString}, + {"token", "ALPHA"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"subscribe", RedisMap({ + {"summary", "Listens for messages published to channels."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of channels to subscribe to."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"touch", + RedisMap({ + {"summary", + "Returns the number of existing keys out of those specified after updating the time they were last accessed."_RedisString}, + {"since", "3.2.1"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(N) where N is the number of keys that will be touched."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"hvals", RedisMap({ + {"summary", "Returns all values in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the size of the hash."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"zmpop", + RedisMap({ + {"summary", + "Returns the highest- or lowest-scoring members from one or more sorted sets after removing them. Deletes the sorted set if the last member was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(K) + O(M*log(N)) where K is the number of provided keys, N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"object", + RedisMap({ + {"summary", "A container for object introspection commands."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"object|freq", + RedisMap({ + {"summary", "Returns the logarithmic access frequency counter of a Redis object."_RedisString}, + {"since", "4.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"object|encoding", RedisMap({ + {"summary", "Returns the internal encoding of a Redis object."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"object|idletime", + RedisMap({ + {"summary", "Returns the time since the last access to a Redis object."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"object|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"object|refcount", RedisMap({ + {"summary", "Returns the reference count of a value of a key."_RedisString}, + {"since", "2.2.3"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + })}, + })}, + {"smembers", RedisMap({ + {"summary", "Returns all members of a set."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the set cardinality."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"save", RedisMap({ + {"summary", "Synchronously saves the database(s) to disk."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the total number of keys in all databases"_RedisString}, + })}, + {"script", + RedisMap({ + {"summary", "A container for Lua scripts management commands."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"script|exists", + RedisMap({ + {"summary", "Determines whether server-side Lua scripts exist in the script cache."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", + "O(N) with N being the number of scripts to check (so checking a single script is an O(1) operation)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sha1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "sha1"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"script|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"script|debug", RedisMap({ + {"summary", "Sets the debug mode of server-side Lua scripts."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "mode"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "yes"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "yes"_RedisString}, + {"token", "YES"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + }), + RedisMap({ + {"name", "no"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "no"_RedisString}, + {"token", "NO"_RedisString}, + }), + })}, + }), + })}, + })}, + {"script|kill", RedisMap({ + {"summary", "Terminates a server-side Lua script during execution."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"script|flush", + RedisMap({ + {"summary", "Removes all server-side Lua scripts from the script cache."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) with N being the number of scripts in cache"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, + "Added the `ASYNC` and `SYNC` flushing mode modifiers."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"script|load", + RedisMap({ + {"summary", "Loads a server-side Lua script to the script cache."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) with N being the length in bytes of the script body."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "script"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "script"_RedisString}, + }), + })}, + })}, + })}, + })}, + {"zrevrangebylex", + RedisMap({ + {"summary", "Returns members in a sorted set within a lexicographical range in reverse order."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `REV` and `BYLEX` arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"asking", RedisMap({ + {"summary", "Signals that a cluster client is following an -ASK redirect."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"hscan", + RedisMap({ + {"summary", "Iterates over fields and values of a hash."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"expiretime", RedisMap({ + {"summary", "Returns the expiration time of a key as a Unix timestamp."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"scard", RedisMap({ + {"summary", "Returns the number of members in a set."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"function", + RedisMap({ + {"summary", "A container for function commands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"function|delete", RedisMap({ + {"summary", "Deletes a library and its functions."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "library-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "library-name"_RedisString}, + }), + })}, + })}, + {"function|kill", RedisMap({ + {"summary", "Terminates a function during execution."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"function|flush", RedisMap({ + {"summary", "Deletes all libraries and functions."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions deleted"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"function|load", RedisMap({ + {"summary", "Creates a library."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1) (considering compilation time is redundant)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "function-code"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "function-code"_RedisString}, + }), + })}, + })}, + {"function|restore", + RedisMap({ + {"summary", "Restores all libraries from a payload."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions on the payload"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "serialized-value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "serialized-value"_RedisString}, + }), + RedisMap({ + {"name", "policy"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "flush"_RedisString}, + {"token", "FLUSH"_RedisString}, + }), + RedisMap({ + {"name", "append"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "append"_RedisString}, + {"token", "APPEND"_RedisString}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + }), + })}, + }), + })}, + })}, + {"function|dump", RedisMap({ + {"summary", "Dumps all libraries into a serialized binary payload."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions"_RedisString}, + })}, + {"function|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"function|list", RedisMap({ + {"summary", "Returns information about all libraries."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(N) where N is the number of functions"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "library-name-pattern"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "library-name-pattern"_RedisString}, + {"token", "LIBRARYNAME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withcode"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcode"_RedisString}, + {"token", "WITHCODE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"function|stats", RedisMap({ + {"summary", "Returns information about a function during execution."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"georadiusbymember", + RedisMap({ + {"summary", + "Queries a geospatial index for members within a distance from a member, optionally stores the result."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` and `FROMMEMBER` arguments"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "store"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "storekey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "STORE"_RedisString}, + }), + RedisMap({ + {"name", "storedistkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 2_RedisInt}, + {"token", "STOREDIST"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zdiff", + RedisMap({ + {"summary", "Returns the difference between multiple sorted sets."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(L + (N-K)log(N)) worst case where L is the total number of elements in all the sets, N is the size of the first set, and K is the size of the result set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"georadius_ro", + RedisMap({ + {"summary", + "Returns members from a geospatial index that are within a distance from a coordinate."_RedisString}, + {"since", "3.2.10"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` with the `BYRADIUS` argument"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `ANY` option for `COUNT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + })}, + })}, + {"pubsub", + RedisMap( + { + {"summary", "A container for Pub/Sub commands."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"pubsub|numsub", + RedisMap({ + {"summary", "Returns a count of subscribers to channels."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) for the NUMSUB subcommand, where N is the number of requested channels"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"pubsub|numpat", RedisMap({ + {"summary", "Returns a count of unique pattern subscriptions."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"pubsub|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"pubsub|shardnumsub", + RedisMap({ + {"summary", "Returns the count of subscribers of shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) for the SHARDNUMSUB subcommand, where N is the number of requested shard channels"_RedisString}, + {"arguments", RedisArray( + { + RedisMap( + { + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + {"flags", RedisArray( + { + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"pubsub|shardchannels", RedisMap( + { + {"summary", "Returns the active shard channels."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) where N is the number of active shard channels, and assuming constant time pattern matching (relatively short shard channels)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pubsub|channels", + RedisMap({ + {"summary", "Returns the active channels."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N) where N is the number of active channels, and assuming constant time pattern matching (relatively short channels and patterns)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + })}, + })}, + {"zrandmember", RedisMap({ + {"summary", "Returns one or more random members from a sorted set."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(N) where N is the number of members returned"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "options"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"pfcount", + RedisMap({ + {"summary", + "Returns the approximated cardinality of the set(s) observed by the HyperLogLog key(s)."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", + "O(1) with a very small average constant time when called with a single key. O(N) with N being the number of keys, and much bigger constant times, when called with multiple keys."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"move", RedisMap({ + {"summary", "Moves a key to another database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "db"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "db"_RedisString}, + }), + })}, + })}, + {"blmpop", + RedisMap({ + {"summary", + "Pops the first element from one of multiple lists. Blocks until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N+M) where N is the number of provided keys and M is the number of elements returned."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"publish", + RedisMap({ + {"summary", "Posts a message to a channel."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", + "O(N+M) where N is the number of clients subscribed to the receiving channel and M is the total number of subscribed patterns (by any client)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "channel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "channel"_RedisString}, + }), + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + }), + })}, + })}, + {"xlen", RedisMap({ + {"summary", "Return the number of messages in a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"info", + RedisMap({ + {"summary", "Returns information and statistics about the server."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added support for taking multiple section arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "section"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "section"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sismember", RedisMap({ + {"summary", "Determines whether a member belongs to a set."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"cluster", + RedisMap({ + {"summary", "A container for Redis Cluster commands."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"cluster|links", RedisMap({ + {"summary", "Returns a list of all TCP links to and from peer nodes."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of Cluster nodes"_RedisString}, + })}, + {"cluster|flushslots", RedisMap({ + {"summary", "Deletes all slots information from a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|setslot", + RedisMap({ + {"summary", "Binds a hash slot to a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + }), + RedisMap({ + {"name", "subcommand"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "importing"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + {"token", "IMPORTING"_RedisString}, + }), + RedisMap({ + {"name", "migrating"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + {"token", "MIGRATING"_RedisString}, + }), + RedisMap({ + {"name", "node"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + {"token", "NODE"_RedisString}, + }), + RedisMap({ + {"name", "stable"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "stable"_RedisString}, + {"token", "STABLE"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|keyslot", RedisMap({ + {"summary", "Returns the hash slot for a key."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the number of bytes in the key"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "key"_RedisString}, + }), + })}, + })}, + {"cluster|addslotsrange", + RedisMap({ + {"summary", "Assigns new hash slot ranges to a node."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", + "O(N) where N is the total number of the slots between the start slot and end slot arguments."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "start-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start-slot"_RedisString}, + }), + RedisMap({ + {"name", "end-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end-slot"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|saveconfig", + RedisMap({ + {"summary", "Forces a node to save the cluster configuration to disk."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|failover", + RedisMap({ + {"summary", "Forces a replica to perform a manual failover of its master."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "options"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + }), + RedisMap({ + {"name", "takeover"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "takeover"_RedisString}, + {"token", "TAKEOVER"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|replicate", RedisMap({ + {"summary", "Configure a node as replica of a master node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|shards", RedisMap({ + {"summary", "Returns the mapping of cluster slots to shards."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of cluster nodes"_RedisString}, + })}, + {"cluster|meet", + RedisMap({ + {"summary", "Forces a node to handshake with another node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, + "Added the optional `cluster_bus_port` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "ip"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + RedisMap({ + {"name", "cluster-bus-port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cluster-bus-port"_RedisString}, + {"since", "4.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"cluster|nodes", RedisMap({ + {"summary", "Returns the cluster configuration for a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of Cluster nodes"_RedisString}, + })}, + {"cluster|countkeysinslot", RedisMap({ + {"summary", "Returns the number of keys in a hash slot."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + }), + })}, + })}, + {"cluster|myshardid", RedisMap({ + {"summary", "Returns the shard ID of a node."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|slaves", RedisMap({ + {"summary", "Lists the replica nodes of a master node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "5.0.0"_RedisString}, + {"replaced_by", "`CLUSTER REPLICAS`"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|delslots", + RedisMap({ + {"summary", "Sets hash slots as unbound for a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of hash slot arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"cluster|myid", RedisMap({ + {"summary", "Returns the ID of a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|replicas", RedisMap({ + {"summary", "Lists the replica nodes of a master node."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|slots", + RedisMap({ + {"summary", "Returns the mapping of cluster slots to nodes."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of Cluster nodes"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "7.0.0"_RedisString}, + {"replaced_by", "`CLUSTER SHARDS`"_RedisString}, + {"history", + RedisSet({ + RedisArray({"4.0.0"_RedisString, "Added node IDs."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Added additional networking metadata field."_RedisString}), + })}, + })}, + {"cluster|info", RedisMap({ + {"summary", "Returns information about the state of a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|forget", RedisMap({ + {"summary", "Removes a node from the nodes table."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"cluster|count-failure-reports", + RedisMap({ + {"summary", "Returns the number of active failure reports active for a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the number of failure reports"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "node-id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "node-id"_RedisString}, + }), + })}, + })}, + {"cluster|addslots", + RedisMap({ + {"summary", "Assigns new hash slots to a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the total number of hash slot arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"cluster|getkeysinslot", RedisMap({ + {"summary", "Returns the key names in a hash slot."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(N) where N is the number of requested keys"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "slot"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + })}, + {"cluster|delslotsrange", + RedisMap({ + {"summary", "Sets hash slot ranges as unbound for a node."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", + "O(N) where N is the total number of the slots between the start slot and end slot arguments."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "start-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start-slot"_RedisString}, + }), + RedisMap({ + {"name", "end-slot"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end-slot"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|set-config-epoch", RedisMap({ + {"summary", "Sets the configuration epoch for a new node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "config-epoch"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "config-epoch"_RedisString}, + }), + })}, + })}, + {"cluster|reset", + RedisMap({ + {"summary", "Resets a node."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", + "O(N) where N is the number of known nodes. The command may execute a FLUSHALL as a side effect."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "reset-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "hard"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "hard"_RedisString}, + {"token", "HARD"_RedisString}, + }), + RedisMap({ + {"name", "soft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "soft"_RedisString}, + {"token", "SOFT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"cluster|bumpepoch", RedisMap({ + {"summary", "Advances the cluster config epoch."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "cluster"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"pttl", RedisMap({ + {"summary", "Returns the expiration time in milliseconds of a key."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.8.0"_RedisString, "Added the -2 reply."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"zcount", + RedisMap({ + {"summary", "Returns the count of members in a sorted set that have scores within a range."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"replconf", RedisMap({ + {"summary", "An internal command for configuring the replication stream."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + })}, + {"zintercard", + RedisMap({ + {"summary", "Returns the number of members of the intersect of multiple sorted sets."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N*K) worst case with N being the smallest input sorted set, K being the number of input sorted sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "limit"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zremrangebylex", + RedisMap({ + {"summary", + "Removes members in a sorted set within a lexicographical range. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + })}, + })}, + {"pfdebug", RedisMap({ + {"summary", "Internal commands for debugging HyperLogLog values."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "N/A"_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "subcommand"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "subcommand"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"hgetall", RedisMap({ + {"summary", "Returns all fields and values in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the size of the hash."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"dump", + RedisMap({ + {"summary", "Returns a serialized representation of the value stored at a key."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) to access the key and additional O(N*M) to serialize it, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"geohash", + RedisMap({ + {"summary", "Returns members from a geospatial index as geohash strings."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(log(N)) for each member requested, where N is the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"pfadd", RedisMap({ + {"summary", "Adds elements to a HyperLogLog key. Creates the key if it doesn't exist."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "O(1) to add every element."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"scan", + RedisMap({ + {"summary", "Iterates over the key names in the database."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, "Added the `TYPE` subcommand."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "type"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "type"_RedisString}, + {"token", "TYPE"_RedisString}, + {"since", "6.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"client", + RedisMap({ + {"summary", "A container for client connection commands."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"client|caching", + RedisMap({ + {"summary", "Instructs the server whether to track the keys in the next request."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "mode"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "yes"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "yes"_RedisString}, + {"token", "YES"_RedisString}, + }), + RedisMap({ + {"name", "no"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "no"_RedisString}, + {"token", "NO"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|trackinginfo", + RedisMap({ + {"summary", + "Returns information about server-assisted client-side caching for the connection."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|getredir", + RedisMap({ + {"summary", + "Returns the client ID to which the connection's tracking notifications are redirected."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|info", RedisMap({ + {"summary", "Returns information about the connection."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|pause", + RedisMap({ + {"summary", "Suspends commands processing."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, + "`CLIENT PAUSE WRITE` mode added along with the `mode` option."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + RedisMap({ + {"name", "mode"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "write"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "write"_RedisString}, + {"token", "WRITE"_RedisString}, + }), + RedisMap({ + {"name", "all"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "all"_RedisString}, + {"token", "ALL"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|no-evict", RedisMap({ + {"summary", "Sets the client eviction mode of the connection."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "enabled"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|no-touch", + RedisMap({ + {"summary", + "Controls whether commands sent by the client affect the LRU/LFU of accessed keys."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "enabled"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|kill", + RedisMap({ + {"summary", "Terminates open connections."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) where N is the number of client connections"_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.8.12"_RedisString, "Added new filter format."_RedisString}), + RedisArray({"2.8.12"_RedisString, "`ID` option."_RedisString}), + RedisArray({"3.2.0"_RedisString, "Added `master` type in for `TYPE` option."_RedisString}), + RedisArray( + {"5.0.0"_RedisString, + "Replaced `slave` `TYPE` with `replica`. `slave` still supported for backward compatibility."_RedisString}), + RedisArray({"6.2.0"_RedisString, "`LADDR` option."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "filter"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "old-format"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip:port"_RedisString}, + {"deprecated_since", "2.8.12"_RedisString}, + }), + RedisMap({ + {"name", "new-format"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + {"token", "ID"_RedisString}, + {"since", "2.8.12"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "client-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "TYPE"_RedisString}, + {"since", "2.8.12"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "normal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "normal"_RedisString}, + {"token", "NORMAL"_RedisString}, + }), + RedisMap({ + {"name", "master"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "master"_RedisString}, + {"token", "MASTER"_RedisString}, + {"since", "3.2.0"_RedisString}, + }), + RedisMap({ + {"name", "slave"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "slave"_RedisString}, + {"token", "SLAVE"_RedisString}, + }), + RedisMap({ + {"name", "replica"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replica"_RedisString}, + {"token", "REPLICA"_RedisString}, + {"since", "5.0.0"_RedisString}, + }), + RedisMap({ + {"name", "pubsub"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "pubsub"_RedisString}, + {"token", "PUBSUB"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + {"token", "USER"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "addr"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip:port"_RedisString}, + {"token", "ADDR"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "laddr"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "ip:port"_RedisString}, + {"token", "LADDR"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "skipme"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "SKIPME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "yes"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "yes"_RedisString}, + {"token", "YES"_RedisString}, + }), + RedisMap({ + {"name", "no"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "no"_RedisString}, + {"token", "NO"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"client|setinfo", + RedisMap({ + {"summary", "Sets information specific to the client or connection."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "attr"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "libname"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "libname"_RedisString}, + {"token", "LIB-NAME"_RedisString}, + }), + RedisMap({ + {"name", "libver"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "libver"_RedisString}, + {"token", "LIB-VER"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|id", RedisMap({ + {"summary", "Returns the unique client ID of the connection."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|getname", RedisMap({ + {"summary", "Returns the name of the connection."_RedisString}, + {"since", "2.6.9"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"client|tracking", + RedisMap({ + {"summary", "Controls server-assisted client-side caching for the connection."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1). Some options may introduce additional complexity."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "status"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + {"token", "REDIRECT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "prefix"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "prefix"_RedisString}, + {"token", "PREFIX"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "bcast"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bcast"_RedisString}, + {"token", "BCAST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "optin"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "optin"_RedisString}, + {"token", "OPTIN"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "optout"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "optout"_RedisString}, + {"token", "OPTOUT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "noloop"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "noloop"_RedisString}, + {"token", "NOLOOP"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"client|setname", RedisMap({ + {"summary", "Sets the connection name."_RedisString}, + {"since", "2.6.9"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "connection-name"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "connection-name"_RedisString}, + }), + })}, + })}, + {"client|list", + RedisMap({ + {"summary", "Lists open connections."_RedisString}, + {"since", "2.4.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) where N is the number of client connections"_RedisString}, + {"history", + RedisSet({ + RedisArray({"2.8.12"_RedisString, "Added unique client `id` field."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added optional `TYPE` filter."_RedisString}), + RedisArray({"6.0.0"_RedisString, "Added `user` field."_RedisString}), + RedisArray( + {"6.2.0"_RedisString, + "Added `argv-mem`, `tot-mem`, `laddr` and `redir` fields and the optional `ID` filter."_RedisString}), + RedisArray( + {"7.0.0"_RedisString, "Added `resp`, `multi-mem`, `rbs` and `rbp` fields."_RedisString}), + RedisArray({"7.0.3"_RedisString, "Added `ssub` field."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "client-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "TYPE"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "normal"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "normal"_RedisString}, + {"token", "NORMAL"_RedisString}, + }), + RedisMap({ + {"name", "master"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "master"_RedisString}, + {"token", "MASTER"_RedisString}, + }), + RedisMap({ + {"name", "replica"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replica"_RedisString}, + {"token", "REPLICA"_RedisString}, + }), + RedisMap({ + {"name", "pubsub"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "pubsub"_RedisString}, + {"token", "PUBSUB"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + {"token", "ID"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"client|reply", RedisMap({ + {"summary", "Instructs the server whether to reply to commands."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "action"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "on"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "on"_RedisString}, + {"token", "ON"_RedisString}, + }), + RedisMap({ + {"name", "off"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "off"_RedisString}, + {"token", "OFF"_RedisString}, + }), + RedisMap({ + {"name", "skip"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "skip"_RedisString}, + {"token", "SKIP"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|unblock", + RedisMap({ + {"summary", + "Unblocks a client blocked by a blocking command from a different connection."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(log N) where N is the number of client connections"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "client-id"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "client-id"_RedisString}, + }), + RedisMap({ + {"name", "unblock-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "timeout"_RedisString}, + {"token", "TIMEOUT"_RedisString}, + }), + RedisMap({ + {"name", "error"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "error"_RedisString}, + {"token", "ERROR"_RedisString}, + }), + })}, + }), + })}, + })}, + {"client|unpause", RedisMap({ + {"summary", "Resumes processing commands from paused clients."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(N) Where N is the number of paused clients"_RedisString}, + })}, + })}, + })}, + {"shutdown", + RedisMap({ + {"summary", "Synchronously saves the database(s) to disk and shuts down the Redis server."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(N) when saving, where N is the total number of keys in all databases when saving data, otherwise O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `NOW`, `FORCE` and `ABORT` modifiers."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "save-selector"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nosave"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nosave"_RedisString}, + {"token", "NOSAVE"_RedisString}, + }), + RedisMap({ + {"name", "save"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "save"_RedisString}, + {"token", "SAVE"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "now"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "now"_RedisString}, + {"token", "NOW"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "abort"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "abort"_RedisString}, + {"token", "ABORT"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lmpop", + RedisMap({ + {"summary", + "Returns multiple elements from a list after removing them. Deletes the list if the last element was popped."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N+M) where N is the number of provided keys and M is the number of elements returned."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "left"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "left"_RedisString}, + {"token", "LEFT"_RedisString}, + }), + RedisMap({ + {"name", "right"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "right"_RedisString}, + {"token", "RIGHT"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"watch", RedisMap({ + {"summary", "Monitors changes to keys to determine the execution of a transaction."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(1) for every key."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"hkeys", RedisMap({ + {"summary", "Returns all fields in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the size of the hash."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"zpopmin", + RedisMap({ + {"summary", + "Returns the lowest-scoring members from a sorted set after removing them. Deletes the sorted set if the last member was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)*M) with N being the number of elements in the sorted set, and M being the number of elements popped."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"ltrim", + RedisMap({ + {"summary", + "Removes elements from both ends a list. Deletes the list if all elements were trimmed."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of elements to be removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + })}, + })}, + {"evalsha_ro", RedisMap({ + {"summary", "Executes a read-only server-side Lua script by SHA1 digest."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sha1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "sha1"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"fcall", RedisMap({ + {"summary", "Invokes a function."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the function that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "function"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "function"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sort", + RedisMap({ + {"summary", + "Sorts the elements in a list, a set, or a sorted set, optionally storing the result."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(N+M*log(M)) where N is the number of elements in the list or set to sort, and M the number of returned elements. When the elements are not sorted, complexity is O(N)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "by-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "BY"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "get-pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "GET"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "sorting"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sorting"_RedisString}, + {"token", "ALPHA"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 2_RedisInt}, + {"token", "STORE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"pfmerge", RedisMap({ + {"summary", "Merges one or more HyperLogLog values into a single key."_RedisString}, + {"since", "2.8.9"_RedisString}, + {"group", "hyperloglog"_RedisString}, + {"complexity", "O(N) to merge N HyperLogLogs, but with high constant times."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destkey"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "sourcekey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "sourcekey"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"georadius", + RedisMap({ + {"summary", + "Queries a geospatial index for members within a distance from a coordinate, optionally stores the result."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(N+log(M)) where N is the number of elements inside the bounding box of the circular area delimited by center and radius and M is the number of items inside the index."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`GEOSEARCH` and `GEOSEARCHSTORE` with the `BYRADIUS` argument"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `ANY` option for `COUNT`."_RedisString}), + RedisArray({"7.0.0"_RedisString, "Added support for uppercase unit names."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + RedisMap({ + {"name", "radius"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "radius"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withcoord"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withcoord"_RedisString}, + {"token", "WITHCOORD"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withdist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withdist"_RedisString}, + {"token", "WITHDIST"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "withhash"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withhash"_RedisString}, + {"token", "WITHHASH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + }), + RedisMap({ + {"name", "any"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "any"_RedisString}, + {"token", "ANY"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "order"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "asc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "asc"_RedisString}, + {"token", "ASC"_RedisString}, + }), + RedisMap({ + {"name", "desc"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "desc"_RedisString}, + {"token", "DESC"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "store"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "storekey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"token", "STORE"_RedisString}, + }), + RedisMap({ + {"name", "storedistkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 2_RedisInt}, + {"token", "STOREDIST"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zrevrangebyscore", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of scores in reverse order."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `REV` and `BYSCORE` arguments"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.1.6"_RedisString, "`min` and `max` can be exclusive."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"lset", + RedisMap({ + {"summary", "Sets the value of an element in a list by its index."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the length of the list. Setting either the first or the last element of the list is O(1)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "index"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "index"_RedisString}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + })}, + })}, + {"xrevrange", + RedisMap({ + {"summary", "Returns the messages from a stream within a range of IDs in reverse order."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", + "O(N) with N being the number of elements returned. If N is constant (e.g. always asking for the first 10 elements with COUNT), you can consider it O(1)."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added exclusive ranges."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "end"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"linsert", + RedisMap({ + {"summary", "Inserts an element before or after another element in a list."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N) where N is the number of elements to traverse before seeing the value pivot. This means that inserting somewhere on the left end on the list (head) can be considered O(1) and inserting somewhere on the right end (tail) is O(N)."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "where"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "before"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "before"_RedisString}, + {"token", "BEFORE"_RedisString}, + }), + RedisMap({ + {"name", "after"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "after"_RedisString}, + {"token", "AFTER"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "pivot"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "pivot"_RedisString}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + })}, + })}, + {"incr", + RedisMap({ + {"summary", + "Increments the integer value of a key by one. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"hrandfield", RedisMap({ + {"summary", "Returns one or more random fields from a hash."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(N) where N is the number of fields returned"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "options"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "withvalues"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withvalues"_RedisString}, + {"token", "WITHVALUES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"rpushx", + RedisMap({ + {"summary", "Appends an element to a list only when the list exists."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(1) for each element added, so O(N) to add N elements when the command is called with multiple arguments."_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Accepts multiple `element` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"lrem", + RedisMap({ + {"summary", "Removes elements from a list. Deletes the list if the last element was removed."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", + "O(N+M) where N is the length of the list and M is the number of elements removed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + RedisMap({ + {"name", "element"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "element"_RedisString}, + }), + })}, + })}, + {"hello", + RedisMap({ + {"summary", "Handshakes with the Redis server."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"6.2.0"_RedisString, + "`protover` made optional; when called without arguments the command reports the current connection's context."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "arguments"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "protover"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "protover"_RedisString}, + }), + RedisMap({ + {"name", "auth"_RedisString}, + {"type", "block"_RedisString}, + {"token", "AUTH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "username"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "username"_RedisString}, + }), + RedisMap({ + {"name", "password"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "password"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "clientname"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "clientname"_RedisString}, + {"token", "SETNAME"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"config", + RedisMap({ + {"summary", "A container for server configuration commands."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"config|resetstat", RedisMap({ + {"summary", "Resets the server's statistics."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"config|get", + RedisMap({ + {"summary", "Returns the effective values of configuration parameters."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) when N is the number of configuration parameters provided"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Added the ability to pass multiple pattern parameters in one call"_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "parameter"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "parameter"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"config|rewrite", RedisMap({ + {"summary", "Persists the effective configuration to file."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"config|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"config|set", + RedisMap({ + {"summary", "Sets configuration parameters in-flight."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) when N is the number of configuration parameters provided"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, + "Added the ability to set multiple parameters in one call."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "parameter"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "parameter"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + })}, + })}, + {"zincrby", RedisMap({ + {"summary", "Increments the score of a member in a sorted set."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) where N is the number of elements in the sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + })}, + {"bitfield_ro", RedisMap({ + {"summary", "Performs arbitrary read-only bitfield integer operations on strings."_RedisString}, + {"since", "6.0.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1) for each subcommand specified"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "get-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "GET"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + "multiple_token"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + }), + })}, + })}, + {"expire", + RedisMap({ + {"summary", "Sets the expiration time of a key in seconds."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added options: `NX`, `XX`, `GT` and `LT`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + RedisMap({ + {"name", "gt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "gt"_RedisString}, + {"token", "GT"_RedisString}, + }), + RedisMap({ + {"name", "lt"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "lt"_RedisString}, + {"token", "LT"_RedisString}, + }), + })}, + }), + })}, + })}, + {"hincrbyfloat", + RedisMap({ + {"summary", + "Increments the floating point value of a field by a number. Uses 0 as initial value if the field doesn't exist."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + })}, + {"srandmember", + RedisMap({ + {"summary", "Get one or multiple random members from a set"_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "Without the count argument O(1), otherwise O(N) where N is the absolute value of the passed count."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.6.0"_RedisString, "Added the optional `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "2.6.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"multi", RedisMap({ + {"summary", "Starts a transaction."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"evalsha", RedisMap({ + {"summary", "Executes a server-side Lua script by SHA1 digest."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the script that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sha1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "sha1"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"sscan", + RedisMap({ + {"summary", "Iterates over members of a set."_RedisString}, + {"since", "2.8.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(1) for every call. O(N) for a complete iteration, including enough command calls for the cursor to return back to 0. N is the number of elements inside the collection."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "cursor"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "cursor"_RedisString}, + }), + RedisMap({ + {"name", "pattern"_RedisString}, + {"type", "pattern"_RedisString}, + {"display_text", "pattern"_RedisString}, + {"token", "MATCH"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"exec", RedisMap({ + {"summary", "Executes all commands in a transaction."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "transactions"_RedisString}, + {"complexity", "Depends on commands in the transaction"_RedisString}, + })}, + {"geoadd", + RedisMap({ + {"summary", + "Adds one or more members to a geospatial index. The key is created if it doesn't exist."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", + "O(log(N)) for each item added, where N is the number of elements in the sorted set."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `CH`, `NX` and `XX` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "condition"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "nx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "nx"_RedisString}, + {"token", "NX"_RedisString}, + }), + RedisMap({ + {"name", "xx"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "xx"_RedisString}, + {"token", "XX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "change"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "change"_RedisString}, + {"token", "CH"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "longitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "longitude"_RedisString}, + }), + RedisMap({ + {"name", "latitude"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "latitude"_RedisString}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + })}, + }), + })}, + })}, + {"waitaof", + RedisMap({ + {"summary", + "Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the master and/or replicas."_RedisString}, + {"since", "7.2.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numlocal"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numlocal"_RedisString}, + }), + RedisMap({ + {"name", "numreplicas"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numreplicas"_RedisString}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"brpoplpush", + RedisMap({ + {"summary", + "Pops an element from a list, pushes it to another list and returns it. Block until an element is available otherwise. Deletes the list if the last element was popped."_RedisString}, + {"since", "2.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`BLMOVE` with the `RIGHT` and `LEFT` arguments"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"xinfo", + RedisMap( + { + {"summary", "A container for stream introspection commands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"xinfo|groups", + RedisMap({ + {"summary", "Returns a list of the consumer groups of a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `entries-read` and `lag` fields"_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"xinfo|consumers", + RedisMap({ + {"summary", "Returns a list of the consumers in a consumer group."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.2.0"_RedisString, "Added the `inactive` field."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + })}, + })}, + {"xinfo|stream", + RedisMap( + { + {"summary", "Returns information about a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, "Added the `FULL` modifier."_RedisString}), + RedisArray( + {"7.0.0"_RedisString, + "Added the `max-deleted-entry-id`, `entries-added`, `recorded-first-entry-id`, `entries-read` and `lag` fields"_RedisString}), + RedisArray( + {"7.2.0"_RedisString, + "Added the `active-time` field, and changed the meaning of `seen-time`."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "full-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "full"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "full"_RedisString}, + {"token", "FULL"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"token", "COUNT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + })}, + })}, + {"xinfo|help", RedisMap({ + {"summary", "Returns helpful text about the different subcommands."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"getdel", RedisMap({ + {"summary", "Returns the string value of a key after deleting the key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"restore", + RedisMap({ + {"summary", "Creates a key from the serialized representation of a value."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", + "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N))."_RedisString}, + {"history", RedisSet({ + RedisArray({"3.0.0"_RedisString, "Added the `REPLACE` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `ABSTTL` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `IDLETIME` and `FREQ` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "ttl"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "ttl"_RedisString}, + }), + RedisMap({ + {"name", "serialized-value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "serialized-value"_RedisString}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "absttl"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "absttl"_RedisString}, + {"token", "ABSTTL"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "IDLETIME"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "frequency"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "frequency"_RedisString}, + {"token", "FREQ"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"xack", + RedisMap({ + {"summary", + "Returns the number of messages that were successfully acknowledged by the consumer group member of a stream."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "stream"_RedisString}, + {"complexity", "O(1) for each message ID processed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "group"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "group"_RedisString}, + }), + RedisMap({ + {"name", "id"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "id"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"bzpopmax", + RedisMap({ + {"summary", + "Removes and returns the member with the highest score from one or more sorted sets. Blocks until a member available otherwise. Deletes the sorted set if the last element was popped."_RedisString}, + {"since", "5.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N)) with N being the number of elements in the sorted set."_RedisString}, + {"history", RedisSet({ + RedisArray({"6.0.0"_RedisString, + "`timeout` is interpreted as a double instead of an integer."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "timeout"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "timeout"_RedisString}, + }), + })}, + })}, + {"hsetnx", RedisMap({ + {"summary", "Sets the value of a field in a hash only when the field doesn't exist."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "field"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "field"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"zcard", RedisMap({ + {"summary", "Returns the number of members in a sorted set."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"getex", RedisMap({ + {"summary", "Returns the string value of a key after setting its expiration time."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "expiration"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "EX"_RedisString}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "PX"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-seconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-seconds"_RedisString}, + {"token", "EXAT"_RedisString}, + }), + RedisMap({ + {"name", "unix-time-milliseconds"_RedisString}, + {"type", "unix-time"_RedisString}, + {"display_text", "unix-time-milliseconds"_RedisString}, + {"token", "PXAT"_RedisString}, + }), + RedisMap({ + {"name", "persist"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "persist"_RedisString}, + {"token", "PERSIST"_RedisString}, + }), + })}, + }), + })}, + })}, + {"dbsize", RedisMap({ + {"summary", "Returns the number of keys in the database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"sintercard", + RedisMap({ + {"summary", "Returns the number of members of the intersect of multiple sets."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", + "O(N*M) worst case where N is the cardinality of the smallest set and M is the number of sets."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "limit"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"geodist", RedisMap({ + {"summary", "Returns the distance between two members of a geospatial index."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "geo"_RedisString}, + {"complexity", "O(log(N))"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member1"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member1"_RedisString}, + }), + RedisMap({ + {"name", "member2"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member2"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "m"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "m"_RedisString}, + {"token", "M"_RedisString}, + }), + RedisMap({ + {"name", "km"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "km"_RedisString}, + {"token", "KM"_RedisString}, + }), + RedisMap({ + {"name", "ft"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "ft"_RedisString}, + {"token", "FT"_RedisString}, + }), + RedisMap({ + {"name", "mi"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "mi"_RedisString}, + {"token", "MI"_RedisString}, + }), + })}, + }), + })}, + })}, + {"renamenx", + RedisMap({ + {"summary", "Renames a key only when the target key name doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"history", + RedisSet({ + RedisArray( + {"3.2.0"_RedisString, + "The command no longer returns an error when source and destination names are the same."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "newkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "newkey"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + })}, + })}, + {"flushdb", + RedisMap({ + {"summary", "Remove all keys from the current database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of keys in the selected database"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, "Added the `ASYNC` flushing mode modifier."_RedisString}), + RedisArray({"6.2.0"_RedisString, "Added the `SYNC` flushing mode modifier."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "flush-type"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "async"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "async"_RedisString}, + {"token", "ASYNC"_RedisString}, + {"since", "4.0.0"_RedisString}, + }), + RedisMap({ + {"name", "sync"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sync"_RedisString}, + {"token", "SYNC"_RedisString}, + {"since", "6.2.0"_RedisString}, + }), + })}, + }), + })}, + })}, + {"zrange", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of indexes."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements returned."_RedisString}, + {"history", + RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `REV`, `BYSCORE`, `BYLEX` and `LIMIT` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + RedisMap({ + {"name", "sortby"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byscore"_RedisString}, + {"token", "BYSCORE"_RedisString}, + }), + RedisMap({ + {"name", "bylex"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bylex"_RedisString}, + {"token", "BYLEX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "rev"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "rev"_RedisString}, + {"token", "REV"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zrevrank", + RedisMap({ + {"summary", "Returns the index of a member in a sorted set ordered by descending scores."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", "O(log(N))"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.2.0"_RedisString, "Added the optional `WITHSCORE` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + }), + RedisMap({ + {"name", "withscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscore"_RedisString}, + {"token", "WITHSCORE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"decrby", + RedisMap({ + {"summary", + "Decrements a number from the integer value of a key. Uses 0 as initial value if the key doesn't exist."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "decrement"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "decrement"_RedisString}, + }), + })}, + })}, + {"rename", RedisMap({ + {"summary", "Renames a key and overwrites the destination."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "newkey"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "newkey"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + })}, + })}, + {"rpoplpush", + RedisMap({ + {"summary", + "Returns the last element of a list after removing and pushing it to another list. Deletes the list if the last element was popped."_RedisString}, + {"since", "1.2.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`LMOVE` with the `RIGHT` and `LEFT` arguments"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "source"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "source"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + })}, + })}, + {"randomkey", RedisMap({ + {"summary", "Returns a random key name from the database."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"fcall_ro", RedisMap({ + {"summary", "Invokes a read-only function."_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "scripting"_RedisString}, + {"complexity", "Depends on the function that is executed."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "function"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "function"_RedisString}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "arg"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "arg"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"failover", RedisMap({ + {"summary", "Starts a coordinated failover from a server to one of its replicas."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "target"_RedisString}, + {"type", "block"_RedisString}, + {"token", "TO"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "host"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "host"_RedisString}, + }), + RedisMap({ + {"name", "port"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "port"_RedisString}, + }), + RedisMap({ + {"name", "force"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "force"_RedisString}, + {"token", "FORCE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + }), + RedisMap({ + {"name", "abort"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "abort"_RedisString}, + {"token", "ABORT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + {"token", "TIMEOUT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"lpop", + RedisMap({ + {"summary", + "Returns the first elements in a list after removing it. Deletes the list if the last element was popped."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of elements returned"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"echo", RedisMap({ + {"summary", "Returns the given string."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + }), + })}, + })}, + {"rpop", + RedisMap({ + {"summary", + "Returns and removes the last elements of a list. Deletes the list if the last element was popped."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "list"_RedisString}, + {"complexity", "O(N) where N is the number of elements returned"_RedisString}, + {"history", RedisSet({ + RedisArray({"6.2.0"_RedisString, "Added the `count` argument."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"zrangestore", + RedisMap({ + {"summary", "Stores a range of members from sorted set in a key."_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements stored into the destination key."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "dst"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "dst"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "src"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "src"_RedisString}, + {"key_spec_index", 1_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "sortby"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byscore"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byscore"_RedisString}, + {"token", "BYSCORE"_RedisString}, + }), + RedisMap({ + {"name", "bylex"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bylex"_RedisString}, + {"token", "BYLEX"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "rev"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "rev"_RedisString}, + {"token", "REV"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"srem", + RedisMap({ + {"summary", + "Removes one or more members from a set. Deletes the set if the last member was removed."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "set"_RedisString}, + {"complexity", "O(N) where N is the number of members to be removed."_RedisString}, + {"history", RedisSet({ + RedisArray({"2.4.0"_RedisString, "Accepts multiple `member` arguments."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "member"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "member"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + })}, + })}, + {"restore-asking", + RedisMap({ + {"summary", "An internal command for migrating keys in a cluster."_RedisString}, + {"since", "3.0.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", + "O(1) to create the new key and additional O(N*M) to reconstruct the serialized value, where N is the number of Redis objects composing the value and M their average size. For small string values the time complexity is thus O(1)+O(1*M) where M is small, so simply O(1). However for sorted set values the complexity is O(N*M*log(N)) because inserting values into sorted sets is O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "syscmd"_RedisStatus, + })}, + {"history", RedisSet({ + RedisArray({"3.0.0"_RedisString, "Added the `REPLACE` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `ABSTTL` modifier."_RedisString}), + RedisArray({"5.0.0"_RedisString, "Added the `IDLETIME` and `FREQ` options."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "ttl"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "ttl"_RedisString}, + }), + RedisMap({ + {"name", "serialized-value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "serialized-value"_RedisString}, + }), + RedisMap({ + {"name", "replace"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "replace"_RedisString}, + {"token", "REPLACE"_RedisString}, + {"since", "3.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "absttl"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "absttl"_RedisString}, + {"token", "ABSTTL"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "seconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "seconds"_RedisString}, + {"token", "IDLETIME"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "frequency"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "frequency"_RedisString}, + {"token", "FREQ"_RedisString}, + {"since", "5.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"bitfield", + RedisMap({ + {"summary", "Performs arbitrary bitfield integer operations on strings."_RedisString}, + {"since", "3.2.0"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(1) for each subcommand specified"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "get-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "GET"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "write"_RedisString}, + {"type", "block"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "overflow-block"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "OVERFLOW"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "wrap"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "wrap"_RedisString}, + {"token", "WRAP"_RedisString}, + }), + RedisMap({ + {"name", "sat"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sat"_RedisString}, + {"token", "SAT"_RedisString}, + }), + RedisMap({ + {"name", "fail"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "fail"_RedisString}, + {"token", "FAIL"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "write-operation"_RedisString}, + {"type", "oneof"_RedisString}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "set-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "SET"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + RedisMap({ + {"name", "incrby-block"_RedisString}, + {"type", "block"_RedisString}, + {"token", "INCRBY"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "encoding"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "encoding"_RedisString}, + }), + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "increment"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "increment"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"psetex", + RedisMap({ + {"summary", + "Sets both string value and expiration time in milliseconds of a key. The key is created if it doesn't exist."_RedisString}, + {"since", "2.6.0"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "2.6.12"_RedisString}, + {"replaced_by", "`SET` with the `PX` argument"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "milliseconds"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "milliseconds"_RedisString}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + })}, + {"ping", RedisMap({ + {"summary", "Returns the server's liveliness response."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "connection"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"hlen", RedisMap({ + {"summary", "Returns the number of fields in a hash."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "hash"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"msetnx", + RedisMap({ + {"summary", + "Atomically modifies the string values of one or more keys only when all keys don't exist."_RedisString}, + {"since", "1.0.1"_RedisString}, + {"group", "string"_RedisString}, + {"complexity", "O(N) where N is the number of keys to set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "data"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "value"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "value"_RedisString}, + }), + })}, + }), + })}, + })}, + {"slowlog", + RedisMap({ + {"summary", "A container for slow log commands."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "Depends on subcommand."_RedisString}, + {"subcommands", + RedisMap({ + {"slowlog|get", + RedisMap({ + {"summary", "Returns the slow log's entries."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of entries returned"_RedisString}, + {"history", RedisSet({ + RedisArray({"4.0.0"_RedisString, + "Added client IP address, port and name to the reply."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + })}, + })}, + {"slowlog|reset", RedisMap({ + {"summary", "Clears all entries from the slow log."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(N) where N is the number of entries in the slowlog"_RedisString}, + })}, + {"slowlog|len", RedisMap({ + {"summary", "Returns the number of entries in the slow log."_RedisString}, + {"since", "2.2.12"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + {"slowlog|help", RedisMap({ + {"summary", "Show helpful text about the different subcommands"_RedisString}, + {"since", "6.2.0"_RedisString}, + {"group", "server"_RedisString}, + {"complexity", "O(1)"_RedisString}, + })}, + })}, + })}, + {"zremrangebyrank", + RedisMap({ + {"summary", + "Removes members in a sorted set within a range of indexes. Deletes the sorted set if all members were removed."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements removed by the operation."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "stop"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "stop"_RedisString}, + }), + })}, + })}, + {"zrangebyscore", + RedisMap({ + {"summary", "Returns members in a sorted set within a range of scores."_RedisString}, + {"since", "1.0.5"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(log(N)+M) with N being the number of elements in the sorted set and M the number of elements being returned. If M is constant (e.g. always asking for the first 10 elements with LIMIT), you can consider it O(log(N))."_RedisString}, + {"doc_flags", RedisSet({ + "deprecated"_RedisStatus, + })}, + {"deprecated_since", "6.2.0"_RedisString}, + {"replaced_by", "`ZRANGE` with the `BYSCORE` argument"_RedisString}, + {"history", RedisSet({ + RedisArray({"2.0.0"_RedisString, "Added the `WITHSCORES` modifier."_RedisString}), + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "min"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "double"_RedisString}, + {"display_text", "max"_RedisString}, + }), + RedisMap({ + {"name", "withscores"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "withscores"_RedisString}, + {"token", "WITHSCORES"_RedisString}, + {"since", "2.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "limit"_RedisString}, + {"type", "block"_RedisString}, + {"token", "LIMIT"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "offset"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "offset"_RedisString}, + }), + RedisMap({ + {"name", "count"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "count"_RedisString}, + }), + })}, + }), + })}, + })}, + {"sync", RedisMap({ + {"summary", "An internal command used in replication."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "server"_RedisString}, + })}, + {"zinterstore", + RedisMap({ + {"summary", "Stores the intersect of multiple sorted sets in a key."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N*K)+O(M*log(M)) worst case with N being the smallest input sorted set, K being the number of input sorted sets and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + })}, + })}, + {"type", RedisMap({ + {"summary", "Determines the type of value stored at a key."_RedisString}, + {"since", "1.0.0"_RedisString}, + {"group", "generic"_RedisString}, + {"complexity", "O(1)"_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + })}, + })}, + {"spublish", + RedisMap({ + {"summary", "Post a message to a shard channel"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"group", "pubsub"_RedisString}, + {"complexity", "O(N) where N is the number of clients subscribed to the receiving shard channel."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "shardchannel"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "shardchannel"_RedisString}, + }), + RedisMap({ + {"name", "message"_RedisString}, + {"type", "string"_RedisString}, + {"display_text", "message"_RedisString}, + }), + })}, + })}, + {"bitpos", RedisMap({ + {"summary", "Finds the first set (1) or clear (0) bit in a string."_RedisString}, + {"since", "2.8.7"_RedisString}, + {"group", "bitmap"_RedisString}, + {"complexity", "O(N)"_RedisString}, + {"history", RedisSet({ + RedisArray({"7.0.0"_RedisString, "Added the `BYTE|BIT` option."_RedisString}), + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "bit"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "bit"_RedisString}, + }), + RedisMap({ + {"name", "range"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", + RedisArray({ + RedisMap({ + {"name", "start"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "start"_RedisString}, + }), + RedisMap({ + {"name", "end-unit-block"_RedisString}, + {"type", "block"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "end"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "end"_RedisString}, + }), + RedisMap({ + {"name", "unit"_RedisString}, + {"type", "oneof"_RedisString}, + {"since", "7.0.0"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "byte"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "byte"_RedisString}, + {"token", "BYTE"_RedisString}, + }), + RedisMap({ + {"name", "bit"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "bit"_RedisString}, + {"token", "BIT"_RedisString}, + }), + })}, + }), + })}, + }), + })}, + }), + })}, + })}, + {"zunionstore", + RedisMap({ + {"summary", "Stores the union of multiple sorted sets in a key."_RedisString}, + {"since", "2.0.0"_RedisString}, + {"group", "sorted-set"_RedisString}, + {"complexity", + "O(N)+O(M log(M)) with N being the sum of the sizes of the input sorted sets, and M being the number of elements in the resulting sorted set."_RedisString}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "destination"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "destination"_RedisString}, + {"key_spec_index", 0_RedisInt}, + }), + RedisMap({ + {"name", "numkeys"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "numkeys"_RedisString}, + }), + RedisMap({ + {"name", "key"_RedisString}, + {"type", "key"_RedisString}, + {"display_text", "key"_RedisString}, + {"key_spec_index", 1_RedisInt}, + {"flags", RedisArray({ + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "weight"_RedisString}, + {"type", "integer"_RedisString}, + {"display_text", "weight"_RedisString}, + {"token", "WEIGHTS"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + "multiple"_RedisStatus, + })}, + }), + RedisMap({ + {"name", "aggregate"_RedisString}, + {"type", "oneof"_RedisString}, + {"token", "AGGREGATE"_RedisString}, + {"flags", RedisArray({ + "optional"_RedisStatus, + })}, + {"arguments", RedisArray({ + RedisMap({ + {"name", "sum"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "sum"_RedisString}, + {"token", "SUM"_RedisString}, + }), + RedisMap({ + {"name", "min"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "min"_RedisString}, + {"token", "MIN"_RedisString}, + }), + RedisMap({ + {"name", "max"_RedisString}, + {"type", "pure-token"_RedisString}, + {"display_text", "max"_RedisString}, + {"token", "MAX"_RedisString}, + }), + })}, + }), + })}, + })}, +}; + +#endif // WITH_COMMAND_DOCS diff --git a/tools/pika_migrate/src/pika_conf.cc b/tools/pika_migrate/src/pika_conf.cc new file mode 100644 index 0000000000..60c68b3617 --- /dev/null +++ b/tools/pika_migrate/src/pika_conf.cc @@ -0,0 +1,991 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include + +#include "cache/include/config.h" +#include "include/acl.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_conf.h" +#include "include/pika_define.h" + +using pstd::Status; +extern std::unique_ptr g_pika_cmd_table_manager; + +PikaConf::PikaConf(const std::string& path) + : pstd::BaseConf(path), conf_path_(path) {} + +int PikaConf::Load() { + int ret = LoadConf(); + if (ret) { + return ret; + } + + GetConfInt("timeout", &timeout_); + if (timeout_ < 0) { + timeout_ = 60; // 60s + } + GetConfStr("server-id", &server_id_); + if (server_id_.empty()) { + server_id_ = "1"; + } else if (PIKA_SERVER_ID_MAX < std::stoull(server_id_)) { + server_id_ = "PIKA_SERVER_ID_MAX"; + } + GetConfStr("run-id", &run_id_); + if (run_id_.empty()) { + run_id_ = pstd::getRandomHexChars(configRunIDSize); + // try rewrite run_id_ to diff_commands_ + SetRunID(run_id_); + } else if (run_id_.length() != configRunIDSize) { + LOG(FATAL) << "run-id " << run_id_ << " is invalid, its string length should be " << configRunIDSize; + } + GetConfStr("replication-id", &replication_id_); + GetConfStr("requirepass", &requirepass_); + GetConfStr("masterauth", &masterauth_); + GetConfStr("userpass", &userpass_); + GetConfInt("maxclients", &maxclients_); + if (maxclients_ <= 0) { + maxclients_ = 20000; + } + GetConfInt("root-connection-num", &root_connection_num_); + if (root_connection_num_ < 0) { + root_connection_num_ = 2; + } + + std::string swe; + GetConfStr("slowlog-write-errorlog", &swe); + slowlog_write_errorlog_.store(swe == "yes" ? true : false); + + // slot migrate + std::string smgrt; + GetConfStr("slotmigrate", &smgrt); + slotmigrate_.store(smgrt == "yes" ? true : false); + + // slow cmd thread pool + std::string slowcmdpool; + GetConfStr("slow-cmd-pool", &slowcmdpool); + slow_cmd_pool_.store(slowcmdpool == "yes" ? true : false); + + int binlog_writer_num = 1; + GetConfInt("binlog-writer-num", &binlog_writer_num); + if (binlog_writer_num <= 0 || binlog_writer_num > 24) { + binlog_writer_num_ = 1; + } else { + binlog_writer_num_ = binlog_writer_num; + } + + int tmp_slowlog_log_slower_than; + GetConfInt("slowlog-log-slower-than", &tmp_slowlog_log_slower_than); + slowlog_log_slower_than_.store(tmp_slowlog_log_slower_than); + + GetConfInt("slowlog-max-len", &slowlog_max_len_); + if (slowlog_max_len_ == 0) { + slowlog_max_len_ = 128; + } + std::string user_blacklist; + GetConfStr("userblacklist", &user_blacklist); + pstd::StringSplit(user_blacklist, COMMA, user_blacklist_); + for (auto& item : user_blacklist_) { + pstd::StringToLower(item); + } + GetConfInt("default-slot-num", &default_slot_num_); + GetConfStr("dump-path", &bgsave_path_); + bgsave_path_ = bgsave_path_.empty() ? "./dump/" : bgsave_path_; + if (bgsave_path_[bgsave_path_.length() - 1] != '/') { + bgsave_path_ += "/"; + } + GetConfInt("dump-expire", &expire_dump_days_); + if (expire_dump_days_ < 0) { + expire_dump_days_ = 0; + } + GetConfStr("dump-prefix", &bgsave_prefix_); + + GetConfInt("expire-logs-nums", &expire_logs_nums_); + if (expire_logs_nums_ <= 10) { + expire_logs_nums_ = 10; + } + GetConfInt("expire-logs-days", &expire_logs_days_); + if (expire_logs_days_ <= 0) { + expire_logs_days_ = 1; + } + GetConfStr("compression", &compression_); + GetConfStr("compression_per_level", &compression_per_level_); + // set slave read only true as default + slave_read_only_ = true; + GetConfInt("slave-priority", &slave_priority_); + + // + // Immutable Sections + // + GetConfInt("port", &port_); + GetConfStr("log-path", &log_path_); + log_path_ = log_path_.empty() ? "./log/" : log_path_; + if (log_path_[log_path_.length() - 1] != '/') { + log_path_ += "/"; + } + GetConfInt("log-retention-time",&log_retention_time_); + if(log_retention_time_ < 0){ + LOG(FATAL) << "log-retention-time invalid"; + } + GetConfStr("loglevel", &log_level_); + GetConfStr("db-path", &db_path_); + GetConfInt("db-instance-num", &db_instance_num_); + if (db_instance_num_ <= 0) { + LOG(FATAL) << "db-instance-num load error"; + } + int64_t t_val = 0; + GetConfInt64("rocksdb-ttl-second", &t_val); + rocksdb_ttl_second_.store(uint64_t(t_val)); + t_val = 0; + GetConfInt64("rocksdb-periodic-second", &t_val); + rocksdb_periodic_second_.store(uint64_t(t_val)); + db_path_ = db_path_.empty() ? "./db/" : db_path_; + if (db_path_[db_path_.length() - 1] != '/') { + db_path_ += "/"; + } + + GetConfInt("thread-num", &thread_num_); + if (thread_num_ <= 0) { + thread_num_ = 12; + } + + GetConfInt("thread-pool-size", &thread_pool_size_); + if (thread_pool_size_ <= 0) { + thread_pool_size_ = 12; + } + if (thread_pool_size_ > 100) { + thread_pool_size_ = 100; + } + + GetConfInt("slow-cmd-thread-pool-size", &slow_cmd_thread_pool_size_); + if (slow_cmd_thread_pool_size_ < 0) { + slow_cmd_thread_pool_size_ = 8; + } + if (slow_cmd_thread_pool_size_ > 50) { + slow_cmd_thread_pool_size_ = 50; + } + + GetConfInt("admin-thread-pool-size", &admin_thread_pool_size_); + if (admin_thread_pool_size_ <= 0) { + admin_thread_pool_size_ = 2; + } + if (admin_thread_pool_size_ > 4) { + admin_thread_pool_size_ = 4; + } + + std::string slow_cmd_list; + GetConfStr("slow-cmd-list", &slow_cmd_list); + SetSlowCmd(slow_cmd_list); + + std::string admin_cmd_list; + GetConfStr("admin-cmd-list", &admin_cmd_list); + if (admin_cmd_list == "") { + admin_cmd_list = "info, monitor, ping"; + SetAdminCmd(admin_cmd_list); + } + + std::string unfinished_full_sync; + GetConfStr("internal-used-unfinished-full-sync", &unfinished_full_sync); + if (replication_id_.empty()) { + unfinished_full_sync.clear(); + } + SetInternalUsedUnFinishedFullSync(unfinished_full_sync); + + + GetConfInt("sync-thread-num", &sync_thread_num_); + if (sync_thread_num_ <= 0) { + sync_thread_num_ = 3; + } + if (sync_thread_num_ > 24) { + sync_thread_num_ = 24; + } + + std::string instance_mode; + GetConfStr("instance-mode", &instance_mode); + classic_mode_.store(instance_mode.empty() || !strcasecmp(instance_mode.data(), "classic")); + + if (classic_mode_.load()) { + GetConfInt("databases", &databases_); + if (databases_ < 1 || databases_ > MAX_DB_NUM) { + LOG(FATAL) << "config databases error, limit [1 ~ 8], the actual is: " << databases_; + } + for (int idx = 0; idx < databases_; ++idx) { + db_structs_.push_back({"db" + std::to_string(idx), db_instance_num_}); + } + } + default_db_ = db_structs_[0].db_name; + + // sync_binlog_thread_num_ must be set after the setting of databases_ + GetConfInt("sync-binlog-thread-num", &sync_binlog_thread_num_); + if (sync_binlog_thread_num_ <= 0) { + sync_binlog_thread_num_ = databases_; + } else { + // final value is MIN(sync_binlog_thread_num, databases_) + sync_binlog_thread_num_ = sync_binlog_thread_num_ > databases_ ? databases_ : sync_binlog_thread_num_; + } + + int tmp_replication_num = 0; + GetConfInt("replication-num", &tmp_replication_num); + if (tmp_replication_num > 4 || tmp_replication_num < 0) { + LOG(FATAL) << "replication-num " << tmp_replication_num << "is invalid, please pick from [0...4]"; + } + replication_num_.store(tmp_replication_num); + + int tmp_consensus_level = 0; + GetConfInt("consensus-level", &tmp_consensus_level); + if (tmp_consensus_level < 0 || tmp_consensus_level > replication_num_.load()) { + LOG(FATAL) << "consensus-level " << tmp_consensus_level + << " is invalid, current replication-num: " << replication_num_.load() + << ", please pick from 0 to replication-num" + << " [0..." << replication_num_.load() << "]"; + } + consensus_level_.store(tmp_consensus_level); + if (classic_mode_.load() && (consensus_level_.load() != 0 || replication_num_.load() != 0)) { + LOG(FATAL) << "consensus-level & replication-num only configurable under sharding mode," + << " set it to be 0 if you are using classic mode"; + } + + compact_cron_ = ""; + GetConfStr("compact-cron", &compact_cron_); + if (!compact_cron_.empty()) { + bool have_week = false; + std::string compact_cron; + std::string week_str; + int64_t slash_num = count(compact_cron_.begin(), compact_cron_.end(), '/'); + if (slash_num == 2) { + have_week = true; + std::string::size_type first_slash = compact_cron_.find('/'); + week_str = compact_cron_.substr(0, first_slash); + compact_cron = compact_cron_.substr(first_slash + 1); + } else { + compact_cron = compact_cron_; + } + + std::string::size_type len = compact_cron.length(); + std::string::size_type colon = compact_cron.find('-'); + std::string::size_type underline = compact_cron.find('/'); + if (colon == std::string::npos || underline == std::string::npos || colon >= underline || colon + 1 >= len || + colon + 1 == underline || underline + 1 >= len) { + compact_cron_ = ""; + } else { + int week = std::atoi(week_str.c_str()); + int start = std::atoi(compact_cron.substr(0, colon).c_str()); + int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); + int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); + if ((have_week && (week < 1 || week > 7)) || start < 0 || start > 23 || end < 0 || end > 23 || usage < 0 || + usage > 100) { + compact_cron_ = ""; + } + } + } + + compact_interval_ = ""; + GetConfStr("compact-interval", &compact_interval_); + if (!compact_interval_.empty()) { + std::string::size_type len = compact_interval_.length(); + std::string::size_type slash = compact_interval_.find('/'); + if (slash == std::string::npos || slash + 1 >= len) { + compact_interval_ = ""; + } else { + int interval = std::atoi(compact_interval_.substr(0, slash).c_str()); + int usage = std::atoi(compact_interval_.substr(slash + 1).c_str()); + if (interval <= 0 || usage < 0 || usage > 100) { + compact_interval_ = ""; + } + } + } + + GetConfInt("max-subcompactions", &max_subcompactions_); + if (max_subcompactions_ < 1) { + max_subcompactions_ = 1; + } + + GetConfInt("compact-every-num-of-files", &compact_every_num_of_files_); + if (compact_every_num_of_files_ < 10) { + compact_every_num_of_files_ = 10; + } + + GetConfInt("force-compact-file-age-seconds", &force_compact_file_age_seconds_); + if (force_compact_file_age_seconds_ < 300) { + force_compact_file_age_seconds_ = 300; + } + + GetConfInt("force-compact-min-delete-ratio", &force_compact_min_delete_ratio_); + if (force_compact_min_delete_ratio_ < 10) { + force_compact_min_delete_ratio_ = 10; + } + + GetConfInt("dont-compact-sst-created-in-seconds", &dont_compact_sst_created_in_seconds_); + if (dont_compact_sst_created_in_seconds_ < 600) { + dont_compact_sst_created_in_seconds_ = 600; + } + + GetConfInt("best-delete-min-ratio", &best_delete_min_ratio_); + if (best_delete_min_ratio_ < 10) { + best_delete_min_ratio_ = 10; + } + + std::string cs_; + GetConfStr("compaction-strategy", &cs_); + if (cs_ == "full-compact") { + compaction_strategy_ = FullCompact; + } else if (cs_ == "obd-compact") { + compaction_strategy_ = OldestOrBestDeleteRatioSstCompact; + } else { + compaction_strategy_ = NONE; + } + + // least-free-disk-resume-size + GetConfInt64Human("least-free-disk-resume-size", &least_free_disk_to_resume_); + if (least_free_disk_to_resume_ <= 0) { + least_free_disk_to_resume_ = 268435456; // 256Mb + } + + GetConfInt64("manually-resume-interval", &resume_check_interval_); + if (resume_check_interval_ <= 0) { + resume_check_interval_ = 60; // seconds + } + + GetConfDouble("min-check-resume-ratio", &min_check_resume_ratio_); + if (min_check_resume_ratio_ < 0) { + min_check_resume_ratio_ = 0.7; + } + + // write_buffer_size + GetConfInt64Human("write-buffer-size", &write_buffer_size_); + if (write_buffer_size_ <= 0) { + write_buffer_size_ = 268435456; // 256Mb + } + + GetConfInt("level0-stop-writes-trigger", &level0_stop_writes_trigger_); + if (level0_stop_writes_trigger_ < 36) { + level0_stop_writes_trigger_ = 36; + } + + GetConfInt("level0-slowdown-writes-trigger", &level0_slowdown_writes_trigger_); + if (level0_slowdown_writes_trigger_ < 20) { + level0_slowdown_writes_trigger_ = 20; + } + + GetConfInt("level0-file-num-compaction-trigger", &level0_file_num_compaction_trigger_); + if (level0_file_num_compaction_trigger_ < 4) { + level0_file_num_compaction_trigger_ = 4; + } + + GetConfInt("min-write-buffer-number-to-merge", &min_write_buffer_number_to_merge_); + if (min_write_buffer_number_to_merge_ < 1) { + min_write_buffer_number_to_merge_ = 1; // 1 for immutable memtable to merge + } + + // arena_block_size + GetConfInt64Human("arena-block-size", &arena_block_size_); + if (arena_block_size_ <= 0) { + arena_block_size_ = write_buffer_size_ >> 3; // 1/8 of the write_buffer_size_ + } + + // arena_block_size + GetConfInt64Human("slotmigrate-thread-num", &slotmigrate_thread_num_); + if (slotmigrate_thread_num_ < 1 || slotmigrate_thread_num_ > 24) { + slotmigrate_thread_num_ = 8; // 1/8 of the write_buffer_size_ + } + + // arena_block_size + GetConfInt64Human("thread-migrate-keys-num", &thread_migrate_keys_num_); + if (thread_migrate_keys_num_ < 8 || thread_migrate_keys_num_ > 128) { + thread_migrate_keys_num_ = 64; // 1/8 of the write_buffer_size_ + } + + // max_write_buffer_size + GetConfInt64Human("max-write-buffer-size", &max_write_buffer_size_); + if (max_write_buffer_size_ <= 0) { + max_write_buffer_size_ = PIKA_CACHE_SIZE_DEFAULT; // 10Gb + } + + // max-total-wal-size + GetConfInt64("max-total-wal-size", &max_total_wal_size_); + if (max_total_wal_size_ < 0) { + max_total_wal_size_ = 0; + } + + // rate-limiter-mode + rate_limiter_mode_ = 1; + GetConfInt("rate-limiter-mode", &rate_limiter_mode_); + if (rate_limiter_mode_ < 0 or rate_limiter_mode_ > 2) { + rate_limiter_mode_ = 1; + } + + // rate-limiter-bandwidth + GetConfInt64("rate-limiter-bandwidth", &rate_limiter_bandwidth_); + if (rate_limiter_bandwidth_ <= 0) { + rate_limiter_bandwidth_ = 1024LL << 30; // 1024GB/s + } + + // rate-limiter-refill-period-us + GetConfInt64("rate-limiter-refill-period-us", &rate_limiter_refill_period_us_); + if (rate_limiter_refill_period_us_ <= 0) { + rate_limiter_refill_period_us_ = 100 * 1000; + } + + // rate-limiter-fairness + GetConfInt64("rate-limiter-fairness", &rate_limiter_fairness_); + if (rate_limiter_fairness_ <= 0) { + rate_limiter_fairness_ = 10; + } + + std::string at; + GetConfStr("rate-limiter-auto-tuned", &at); + // rate_limiter_auto_tuned_ will be true if user didn't config + rate_limiter_auto_tuned_ = at == "yes" || at.empty(); + + // max_write_buffer_num + max_write_buffer_num_ = 2; + GetConfInt("max-write-buffer-num", &max_write_buffer_num_); + if (max_write_buffer_num_ <= 0) { + max_write_buffer_num_ = 2; // 1 for immutable memtable, 1 for mutable memtable + } + + // max_client_response_size + GetConfInt64Human("max-client-response-size", &max_client_response_size_); + if (max_client_response_size_ <= 0) { + max_client_response_size_ = 1073741824; // 1Gb + } + + // target_file_size_base + GetConfInt64Human("target-file-size-base", &target_file_size_base_); + if (target_file_size_base_ <= 0) { + target_file_size_base_ = 1048576; // 10Mb + } + + GetConfInt64("max-compaction-bytes", &max_compaction_bytes_); + if (max_compaction_bytes_ <= 0) { + // RocksDB's default is 25 * target_file_size_base_ + max_compaction_bytes_ = target_file_size_base_ * 25; + } + + max_cache_statistic_keys_ = 0; + GetConfInt("max-cache-statistic-keys", &max_cache_statistic_keys_); + if (max_cache_statistic_keys_ <= 0) { + max_cache_statistic_keys_ = 0; + } + + // disable_auto_compactions + GetConfBool("disable_auto_compactions", &disable_auto_compactions_); + + small_compaction_threshold_ = 5000; + GetConfInt("small-compaction-threshold", &small_compaction_threshold_); + if (small_compaction_threshold_ < 0) { + small_compaction_threshold_ = 0; + } else if (small_compaction_threshold_ >= 100000) { + small_compaction_threshold_ = 100000; + } + + small_compaction_duration_threshold_ = 10000; + GetConfInt("small-compaction-duration-threshold", &small_compaction_duration_threshold_); + if (small_compaction_duration_threshold_ < 0) { + small_compaction_duration_threshold_ = 0; + } else if (small_compaction_duration_threshold_ >= 1000000) { + small_compaction_duration_threshold_ = 1000000; + } + + // max-background-flushes and max-background-compactions should both be -1 or both not + GetConfInt("max-background-flushes", &max_background_flushes_); + if (max_background_flushes_ <= 0 && max_background_flushes_ != -1) { + max_background_flushes_ = 1; + } + if (max_background_flushes_ >= 6) { + max_background_flushes_ = 6; + } + + GetConfInt("max-background-compactions", &max_background_compactions_); + if (max_background_compactions_ <= 0 && max_background_compactions_ != -1) { + max_background_compactions_ = 2; + } + if (max_background_compactions_ >= 8) { + max_background_compactions_ = 8; + } + + max_background_jobs_ = max_background_flushes_ + max_background_compactions_; + GetConfInt("max-background-jobs", &max_background_jobs_); + if (max_background_jobs_ <= 0) { + max_background_jobs_ = (1 + 2); + } + if (max_background_jobs_ >= (8 + 6)) { + max_background_jobs_ = (8 + 6); + } + + GetConfInt64("delayed-write-rate", &delayed_write_rate_); + if (delayed_write_rate_ <= 0) { + // set 0 means let rocksDB infer from rate-limiter(by default, rate-limiter is 1024GB, delayed_write_rate will be 512GB) + // if rate-limiter is nullptr, it would be set to 16MB by RocksDB + delayed_write_rate_ = 0; + } + + max_cache_files_ = 5000; + GetConfInt("max-cache-files", &max_cache_files_); + if (max_cache_files_ < -1) { + max_cache_files_ = 5000; + } + max_bytes_for_level_multiplier_ = 10; + GetConfInt("max-bytes-for-level-multiplier", &max_bytes_for_level_multiplier_); + if (max_bytes_for_level_multiplier_ < 10) { + max_bytes_for_level_multiplier_ = 5; + } + + block_size_ = 4 * 1024; + GetConfInt64Human("block-size", &block_size_); + if (block_size_ <= 0) { + block_size_ = 4 * 1024; + } + + block_cache_ = 8 * 1024 * 1024; + GetConfInt64Human("block-cache", &block_cache_); + if (block_cache_ < 0) { + block_cache_ = 8 * 1024 * 1024; + } + + num_shard_bits_ = -1; + GetConfInt64("num-shard-bits", &num_shard_bits_); + + std::string sbc; + GetConfStr("share-block-cache", &sbc); + share_block_cache_ = sbc == "yes"; + + std::string epif; + GetConfStr("enable-partitioned-index-filters", &epif); + enable_partitioned_index_filters_ = epif == "yes"; + + std::string ciafb; + GetConfStr("cache-index-and-filter-blocks", &ciafb); + cache_index_and_filter_blocks_ = ciafb == "yes"; + + std::string plfaibic; + GetConfStr("pin_l0_filter_and_index_blocks_in_cache", &plfaibic); + pin_l0_filter_and_index_blocks_in_cache_ = plfaibic == "yes"; + + std::string offh; + GetConfStr("optimize-filters-for-hits", &offh); + optimize_filters_for_hits_ = offh == "yes"; + + std::string lcdlb; + GetConfStr("level-compaction-dynamic-level-bytes", &lcdlb); + level_compaction_dynamic_level_bytes_ = lcdlb == "yes" || lcdlb.empty(); + + // daemonize + std::string dmz; + GetConfStr("daemonize", &dmz); + daemonize_ = dmz == "yes"; + + // read redis cache in Net worker threads + std::string rtc_enabled; + GetConfStr("rtc-cache-read", &rtc_enabled); + rtc_cache_read_enabled_ = rtc_enabled != "no"; + + // binlog + std::string wb; + GetConfStr("write-binlog", &wb); + write_binlog_ = wb != "no"; + GetConfIntHuman("binlog-file-size", &binlog_file_size_); + if (binlog_file_size_ < 1024 || static_cast(binlog_file_size_) > (1024LL * 1024 * 1024)) { + binlog_file_size_ = 100 * 1024 * 1024; // 100M + } + GetConfStr("pidfile", &pidfile_); + + // db sync + GetConfStr("db-sync-path", &db_sync_path_); + db_sync_path_ = db_sync_path_.empty() ? "./dbsync/" : db_sync_path_; + if (db_sync_path_[db_sync_path_.length() - 1] != '/') { + db_sync_path_ += "/"; + } + GetConfInt("db-sync-speed", &db_sync_speed_); + if (db_sync_speed_ < 0 || db_sync_speed_ > 1024) { + db_sync_speed_ = 1024; + } + // network interface + network_interface_ = ""; + GetConfStr("network-interface", &network_interface_); + + // userblacklist + GetConfStr("userblacklist", &userblacklist_); + // acl users + GetConfStrMulti("user", &users_); + + GetConfStr("aclfile", &aclFile_); + GetConfStrMulti("rename-command", &cmds_); + for (const auto & i : cmds_) { + std::string before, after; + std::istringstream iss(i); + iss >> before; + if (iss) { + iss >> after; + pstd::StringToLower(before); + pstd::StringToLower(after); + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(before); + if (!c_ptr) { + LOG(ERROR) << "No such " << before << " command in pika-command"; + return -1; + } + g_pika_cmd_table_manager->RenameCommand(before, after); + } + } + std::string acl_pubsub_default; + GetConfStr("acl-pubsub-default", &acl_pubsub_default); + if (acl_pubsub_default == "allchannels") { + acl_pubsub_default_ = static_cast(AclSelectorFlag::ALL_CHANNELS); + } + + int tmp_acllog_max_len = 128; + GetConfInt("acllog-max-len", &tmp_acllog_max_len); + if (tmp_acllog_max_len < 0) { + tmp_acllog_max_len = 128; + } + acl_Log_max_len_ = tmp_acllog_max_len; + + // slaveof + slaveof_ = ""; + GetConfStr("slaveof", &slaveof_); + + int cache_num = 16; + GetConfInt("cache-num", &cache_num); + cache_num_ = (0 >= cache_num || 48 < cache_num) ? 16 : cache_num; + + int cache_mode = 0; + GetConfInt("cache-model", &cache_mode); + cache_mode_ = (PIKA_CACHE_NONE > cache_mode || PIKA_CACHE_READ < cache_mode) ? PIKA_CACHE_NONE : cache_mode; + + std::string cache_type; + GetConfStr("cache-type", &cache_type); + SetCacheType(cache_type); + + int zset_cache_start_direction = 0; + GetConfInt("zset-cache-start-direction", &zset_cache_start_direction); + if (zset_cache_start_direction != cache::CACHE_START_FROM_BEGIN && zset_cache_start_direction != cache::CACHE_START_FROM_END) { + zset_cache_start_direction = cache::CACHE_START_FROM_BEGIN; + } + zset_cache_start_direction_ = zset_cache_start_direction; + + int zset_cache_field_num_per_key = DEFAULT_CACHE_ITEMS_PER_KEY; + GetConfInt("zset-cache-field-num-per-key", &zset_cache_field_num_per_key); + if (zset_cache_field_num_per_key <= 0) { + zset_cache_field_num_per_key = DEFAULT_CACHE_ITEMS_PER_KEY; + } + zset_cache_field_num_per_key_ = zset_cache_field_num_per_key; + + int max_key_size_in_cache = DEFAULT_CACHE_MAX_KEY_SIZE; + GetConfInt("max-key-size-in-cache", &max_key_size_in_cache); + if (max_key_size_in_cache <= 0) { + max_key_size_in_cache = DEFAULT_CACHE_MAX_KEY_SIZE; + } + max_key_size_in_cache_ = max_key_size_in_cache; + + int64_t cache_maxmemory = PIKA_CACHE_SIZE_DEFAULT; + GetConfInt64("cache-maxmemory", &cache_maxmemory); + cache_maxmemory_ = (PIKA_CACHE_SIZE_MIN > cache_maxmemory) ? PIKA_CACHE_SIZE_DEFAULT : cache_maxmemory; + + int cache_maxmemory_policy = 1; + GetConfInt("cache-maxmemory-policy", &cache_maxmemory_policy); + cache_maxmemory_policy_ = (0 > cache_maxmemory_policy || 7 < cache_maxmemory_policy) ? 1 : cache_maxmemory_policy; + + int cache_maxmemory_samples = 5; + GetConfInt("cache-maxmemory-samples", &cache_maxmemory_samples); + cache_maxmemory_samples_ = (1 > cache_maxmemory_samples) ? 5 : cache_maxmemory_samples; + + int cache_lfu_decay_time = 1; + GetConfInt("cache-lfu-decay-time", &cache_lfu_decay_time); + cache_lfu_decay_time_ = (0 > cache_lfu_decay_time) ? 1 : cache_lfu_decay_time; + // sync window size + int tmp_sync_window_size = kBinlogReadWinDefaultSize; + GetConfInt("sync-window-size", &tmp_sync_window_size); + if (tmp_sync_window_size <= 0) { + sync_window_size_.store(kBinlogReadWinDefaultSize); + } else if (tmp_sync_window_size > kBinlogReadWinMaxSize) { + sync_window_size_.store(kBinlogReadWinMaxSize); + } else { + sync_window_size_.store(tmp_sync_window_size); + } + + // max conn rbuf size + int tmp_max_conn_rbuf_size = PIKA_MAX_CONN_RBUF; + GetConfIntHuman("max-conn-rbuf-size", &tmp_max_conn_rbuf_size); + if (tmp_max_conn_rbuf_size <= PIKA_MAX_CONN_RBUF_LB) { + max_conn_rbuf_size_.store(PIKA_MAX_CONN_RBUF_LB); + } else if (tmp_max_conn_rbuf_size >= PIKA_MAX_CONN_RBUF_HB * 2) { + max_conn_rbuf_size_.store(PIKA_MAX_CONN_RBUF_HB * 2); + } else { + max_conn_rbuf_size_.store(tmp_max_conn_rbuf_size); + } + + // rocksdb blob configure + GetConfBool("enable-blob-files", &enable_blob_files_); + GetConfInt64Human("min-blob-size", &min_blob_size_); + if (min_blob_size_ <= 0) { + min_blob_size_ = 4096; + } + GetConfInt64Human("blob-file-size", &blob_file_size_); + if (blob_file_size_ <= 0) { + blob_file_size_ = 256 * 1024 * 1024; + } + GetConfStr("blob-compression-type", &blob_compression_type_); + GetConfBool("enable-blob-garbage-collection", &enable_blob_garbage_collection_); + GetConfDouble("blob-garbage-collection-age-cutoff", &blob_garbage_collection_age_cutoff_); + if (blob_garbage_collection_age_cutoff_ <= 0) { + blob_garbage_collection_age_cutoff_ = 0.25; + } + GetConfDouble("blob-garbage-collection-force-threshold", &blob_garbage_collection_force_threshold_); + if (blob_garbage_collection_force_threshold_ <= 0) { + blob_garbage_collection_force_threshold_ = 1.0; + } + GetConfInt64("blob-cache", &block_cache_); + GetConfInt64("blob-num-shard-bits", &blob_num_shard_bits_); + + // throttle-bytes-per-second + GetConfInt("throttle-bytes-per-second", &throttle_bytes_per_second_); + if (throttle_bytes_per_second_ <= 0) { + throttle_bytes_per_second_ = 200LL << 20; //200 MB + } + + GetConfInt("max-rsync-parallel-num", &max_rsync_parallel_num_); + if (max_rsync_parallel_num_ <= 0 || max_rsync_parallel_num_ > kMaxRsyncParallelNum) { + max_rsync_parallel_num_ = kMaxRsyncParallelNum; + } + + // rocksdb_statistics_tickers + std::string open_tickers; + GetConfStr("enable-db-statistics", &open_tickers); + enable_db_statistics_ = open_tickers == "yes"; + + db_statistics_level_ = 0; + GetConfInt("db-statistics-level", &db_statistics_level_); + if (db_statistics_level_ < 0) { + db_statistics_level_ = 0; + } + + int64_t tmp_rsync_timeout_ms = -1; + GetConfInt64("rsync-timeout-ms", &tmp_rsync_timeout_ms); + if (tmp_rsync_timeout_ms <= 0) { + rsync_timeout_ms_.store(1000); + } else { + rsync_timeout_ms_.store(tmp_rsync_timeout_ms); + } + + GetConfBool("wash-data", &wash_data_); + + return ret; +} + +void PikaConf::TryPushDiffCommands(const std::string& command, const std::string& value) { + if (!CheckConfExist(command)) { + diff_commands_[command] = value; + } +} + +void PikaConf::SetCacheType(const std::string& value) { + cache_string_ = cache_set_ = cache_zset_ = cache_hash_ = cache_list_ = cache_bit_ = 0; + if (value == "") { + return; + } + std::lock_guard l(rwlock_); + + std::string lower_value = value; + pstd::StringToLower(lower_value); + lower_value.erase(remove_if(lower_value.begin(), lower_value.end(), isspace), lower_value.end()); + pstd::StringSplit(lower_value, COMMA, cache_type_); + for (auto& type : cache_type_) { + if (type == "string") { + cache_string_ = 1; + } else if (type == "set") { + cache_set_ = 1; + } else if (type == "zset") { + cache_zset_ = 1; + } else if (type == "hash") { + cache_hash_ = 1; + } else if (type == "list") { + cache_list_ = 1; + } else if (type == "bit") { + cache_bit_ = 1; + } + } +} + +int PikaConf::ConfigRewrite() { + std::string userblacklist = user_blacklist_string(); + std::string scachetype = scache_type(); + std::lock_guard l(rwlock_); + // Only set value for config item that can be config set. + SetConfInt("timeout", timeout_); + SetConfStr("requirepass", requirepass_); + SetConfStr("masterauth", masterauth_); + SetConfStr("userpass", userpass_); + SetConfStr("userblacklist", userblacklist_); + SetConfStr("dump-prefix", bgsave_prefix_); + SetConfInt("maxclients", maxclients_); + SetConfInt("dump-expire", expire_dump_days_); + SetConfInt("expire-logs-days", expire_logs_days_); + SetConfInt("expire-logs-nums", expire_logs_nums_); + SetConfInt("root-connection-num", root_connection_num_); + SetConfStr("slowlog-write-errorlog", slowlog_write_errorlog_.load() ? "yes" : "no"); + SetConfInt("slowlog-log-slower-than", slowlog_log_slower_than_.load()); + SetConfInt("slowlog-max-len", slowlog_max_len_); + SetConfStr("write-binlog", write_binlog_ ? "yes" : "no"); + SetConfStr("run-id", run_id_); + SetConfStr("replication-id", replication_id_); + SetConfInt("max-cache-statistic-keys", max_cache_statistic_keys_); + SetConfInt("small-compaction-threshold", small_compaction_threshold_); + SetConfInt("small-compaction-duration-threshold", small_compaction_duration_threshold_); + SetConfInt("max-client-response-size", static_cast(max_client_response_size_)); + SetConfInt("db-sync-speed", db_sync_speed_); + SetConfStr("compact-cron", compact_cron_); + SetConfStr("compact-interval", compact_interval_); + SetConfInt("compact-every-num-of-files", compact_every_num_of_files_); + if (compact_every_num_of_files_ < 1) { + compact_every_num_of_files_ = 1; + } + SetConfInt("force-compact-file-age-seconds", force_compact_file_age_seconds_); + if (force_compact_file_age_seconds_ < 300) { + force_compact_file_age_seconds_ = 300; + } + SetConfInt("force-compact-min-delete-ratio", force_compact_min_delete_ratio_); + if (force_compact_min_delete_ratio_ < 5) { + force_compact_min_delete_ratio_ = 5; + } + SetConfInt("dont-compact-sst-created-in-seconds", dont_compact_sst_created_in_seconds_); + if (dont_compact_sst_created_in_seconds_ < 300) { + dont_compact_sst_created_in_seconds_ = 300; + } + SetConfInt("best-delete-min-ratio", best_delete_min_ratio_); + if (best_delete_min_ratio_ < 10) { + best_delete_min_ratio_ = 10; + } + + std::string cs_; + SetConfStr("compaction-strategy", cs_); + if (cs_ == "full-compact") { + compaction_strategy_ = FullCompact; + } else if (cs_ == "obd-compact") { + compaction_strategy_ = OldestOrBestDeleteRatioSstCompact; + } else { + compaction_strategy_ = NONE; + } + + SetConfStr("disable_auto_compactions", disable_auto_compactions_ ? "true" : "false"); + SetConfStr("cache-type", scachetype); + SetConfInt64("least-free-disk-resume-size", least_free_disk_to_resume_); + SetConfInt64("manually-resume-interval", resume_check_interval_); + SetConfDouble("min-check-resume-ratio", min_check_resume_ratio_); + SetConfInt("slave-priority", slave_priority_); + SetConfInt("throttle-bytes-per-second", throttle_bytes_per_second_); + SetConfStr("internal-used-unfinished-full-sync", pstd::Set2String(internal_used_unfinished_full_sync_, ',')); + SetConfInt("max-rsync-parallel-num", max_rsync_parallel_num_); + SetConfInt("sync-window-size", sync_window_size_.load()); + SetConfInt("consensus-level", consensus_level_.load()); + SetConfInt("replication-num", replication_num_.load()); + SetConfStr("slow-cmd-list", pstd::Set2String(slow_cmd_set_, ',')); + SetConfInt("max-conn-rbuf-size", max_conn_rbuf_size_.load()); + // options for storage engine + SetConfInt("max-cache-files", max_cache_files_); + SetConfInt("max-background-compactions", max_background_compactions_); + SetConfInt("max-background-jobs", max_background_jobs_); + SetConfInt64("rate-limiter-bandwidth", rate_limiter_bandwidth_); + SetConfInt64("delayed-write-rate", delayed_write_rate_); + SetConfInt64("max-compaction-bytes", max_compaction_bytes_); + SetConfInt("max-write-buffer-num", max_write_buffer_num_); + SetConfInt64("write-buffer-size", write_buffer_size_); + SetConfInt("min-write-buffer-number-to-merge", min_write_buffer_number_to_merge_); + SetConfInt("level0-stop-writes-trigger", level0_stop_writes_trigger_); + SetConfInt("level0-slowdown-writes-trigger", level0_slowdown_writes_trigger_); + SetConfInt("level0-file-num-compaction-trigger", level0_file_num_compaction_trigger_); + SetConfInt64("arena-block-size", arena_block_size_); + SetConfStr("slotmigrate", slotmigrate_.load() ? "yes" : "no"); + SetConfInt64("slotmigrate-thread-num", slotmigrate_thread_num_); + SetConfInt64("thread-migrate-keys-num", thread_migrate_keys_num_); + SetConfStr("enable-db-statistics", enable_db_statistics_ ? "yes" : "no"); + SetConfInt("db-statistics-level", db_statistics_level_); + // slaveof config item is special + SetConfStr("slaveof", slaveof_); + // cache config + SetConfStr("cache-index-and-filter-blocks", cache_index_and_filter_blocks_ ? "yes" : "no"); + SetConfInt("cache-model", cache_mode_); + SetConfInt("zset-cache-start-direction", zset_cache_start_direction_); + SetConfInt("zset_cache_field_num_per_key", zset_cache_field_num_per_key_); + + if (!diff_commands_.empty()) { + std::vector filtered_items; + for (const auto& diff_command : diff_commands_) { + if (!diff_command.second.empty()) { + pstd::BaseConf::Rep::ConfItem item(pstd::BaseConf::Rep::kConf, diff_command.first, diff_command.second); + filtered_items.push_back(item); + } + } + if (!filtered_items.empty()) { + pstd::BaseConf::Rep::ConfItem comment_item(pstd::BaseConf::Rep::kComment, "# Generated by CONFIG REWRITE\n"); + PushConfItem(comment_item); + for (const auto& item : filtered_items) { + PushConfItem(item); + } + } + diff_commands_.clear(); + } + return static_cast(WriteBack()); +} + +int PikaConf::ConfigRewriteReplicationID() { + std::lock_guard l(rwlock_); + SetConfStr("replication-id", replication_id_); + SetConfStr("internal-used-unfinished-full-sync", pstd::Set2String(internal_used_unfinished_full_sync_, ',')); + if (!diff_commands_.empty()) { + std::vector filtered_items; + for (const auto& diff_command : diff_commands_) { + if (!diff_command.second.empty()) { + pstd::BaseConf::Rep::ConfItem item(pstd::BaseConf::Rep::kConf, diff_command.first, diff_command.second); + filtered_items.push_back(item); + } + } + if (!filtered_items.empty()) { + pstd::BaseConf::Rep::ConfItem comment_item(pstd::BaseConf::Rep::kComment, + "# Generated by ReplicationID CONFIG REWRITE\n"); + PushConfItem(comment_item); + for (const auto& item : filtered_items) { + PushConfItem(item); + } + } + diff_commands_.clear(); + } + return static_cast(WriteBack()); +} + +rocksdb::CompressionType PikaConf::GetCompression(const std::string& value) { + if (value == "snappy") { + return rocksdb::CompressionType::kSnappyCompression; + } else if (value == "zlib") { + return rocksdb::CompressionType::kZlibCompression; + } else if (value == "lz4") { + return rocksdb::CompressionType::kLZ4Compression; + } else if (value == "zstd") { + return rocksdb::CompressionType::kZSTD; + } + return rocksdb::CompressionType::kNoCompression; +} + +std::vector PikaConf::compression_per_level() { + std::shared_lock l(rwlock_); + std::vector types; + if (compression_per_level_.empty()) { + return types; + } + auto left = compression_per_level_.find_first_of('['); + auto right = compression_per_level_.find_first_of(']'); + + if (left == std::string::npos || right == std::string::npos || right <= left + 1) { + return types; + } + std::vector strings; + pstd::StringSplit(compression_per_level_.substr(left + 1, right - left - 1), ':', strings); + for (const auto& item : strings) { + types.push_back(GetCompression(pstd::StringTrim(item))); + } + return types; +} diff --git a/tools/pika_migrate/src/pika_consensus.cc b/tools/pika_migrate/src/pika_consensus.cc new file mode 100644 index 0000000000..89f10e0317 --- /dev/null +++ b/tools/pika_migrate/src/pika_consensus.cc @@ -0,0 +1,783 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/pika_consensus.h" + +#include "include/pika_client_conn.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_conf.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" + +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +/* Context */ + +Context::Context(std::string path) : path_(std::move(path)) {} + +Status Context::StableSave() { + char* p = save_->GetData(); + memcpy(p, &(applied_index_.b_offset.filenum), sizeof(uint32_t)); + p += 4; + memcpy(p, &(applied_index_.b_offset.offset), sizeof(uint64_t)); + p += 8; + memcpy(p, &(applied_index_.l_offset.term), sizeof(uint32_t)); + p += 4; + memcpy(p, &(applied_index_.l_offset.index), sizeof(uint64_t)); + return Status::OK(); +} + +Status Context::Init() { + if (!pstd::FileExists(path_)) { + Status s = pstd::NewRWFile(path_, save_); + if (!s.ok()) { + LOG(FATAL) << "Context new file failed " << s.ToString(); + } + StableSave(); + } else { + std::unique_ptr tmp_file; + Status s = pstd::NewRWFile(path_, tmp_file); + save_.reset(tmp_file.release()); + if (!s.ok()) { + LOG(FATAL) << "Context new file failed " << s.ToString(); + } + } + if (save_->GetData()) { + memcpy(reinterpret_cast(&(applied_index_.b_offset.filenum)), save_->GetData(), sizeof(uint32_t)); + memcpy(reinterpret_cast(&(applied_index_.b_offset.offset)), save_->GetData() + 4, sizeof(uint64_t)); + memcpy(reinterpret_cast(&(applied_index_.l_offset.term)), save_->GetData() + 12, sizeof(uint32_t)); + memcpy(reinterpret_cast(&(applied_index_.l_offset.index)), save_->GetData() + 16, sizeof(uint64_t)); + return Status::OK(); + } else { + return Status::Corruption("Context init error"); + } +} + +void Context::UpdateAppliedIndex(const LogOffset& offset) { + std::lock_guard l(rwlock_); + LogOffset cur_offset; + applied_win_.Update(SyncWinItem(offset), SyncWinItem(offset), &cur_offset); + if (cur_offset > applied_index_) { + applied_index_ = cur_offset; + StableSave(); + } +} + +void Context::Reset(const LogOffset& offset) { + std::lock_guard l(rwlock_); + applied_index_ = offset; + applied_win_.Reset(); + StableSave(); +} + +/* SyncProgress */ + +std::string MakeSlaveKey(const std::string& ip, int port) { + return ip + ":" + std::to_string(port); +} + +std::shared_ptr SyncProgress::GetSlaveNode(const std::string& ip, int port) { + std::string slave_key = MakeSlaveKey(ip, port); + std::shared_lock l(rwlock_); + if (slaves_.find(slave_key) == slaves_.end()) { + return nullptr; + } + return slaves_[slave_key]; +} + +std::unordered_map> SyncProgress::GetAllSlaveNodes() { + std::shared_lock l(rwlock_); + return slaves_; +} + +Status SyncProgress::AddSlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id) { + std::string slave_key = MakeSlaveKey(ip, port); + std::shared_ptr exist_ptr = GetSlaveNode(ip, port); + if (exist_ptr) { + LOG(WARNING) << "SlaveNode " << exist_ptr->ToString() << " already exist, set new session " << session_id; + exist_ptr->SetSessionId(session_id); + return Status::OK(); + } + std::shared_ptr slave_ptr = std::make_shared(ip, port, db_name, session_id); + slave_ptr->SetLastSendTime(pstd::NowMicros()); + slave_ptr->SetLastRecvTime(pstd::NowMicros()); + + { + std::lock_guard l(rwlock_); + slaves_[slave_key] = slave_ptr; + // add slave to match_index + match_index_[slave_key] = LogOffset(); + } + return Status::OK(); +} + +Status SyncProgress::RemoveSlaveNode(const std::string& ip, int port) { + std::string slave_key = MakeSlaveKey(ip, port); + { + std::lock_guard l(rwlock_); + slaves_.erase(slave_key); + // remove slave to match_index + match_index_.erase(slave_key); + } + return Status::OK(); +} + +Status SyncProgress::Update(const std::string& ip, int port, const LogOffset& start, const LogOffset& end, + LogOffset* committed_index) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + + LogOffset acked_offset; + { + // update slave_ptr + std::lock_guard l(slave_ptr->slave_mu); + Status s = slave_ptr->Update(start, end, &acked_offset); + if (!s.ok()) { + return s; + } + // update match_index_ + // shared slave_ptr->slave_mu + match_index_[ip + std::to_string(port)] = acked_offset; + } + + return Status::OK(); +} + +int SyncProgress::SlaveSize() { + std::shared_lock l(rwlock_); + return static_cast(slaves_.size()); +} + +/* MemLog */ + +MemLog::MemLog() = default; + +int MemLog::Size() { return static_cast(logs_.size()); } + +// keep mem_log [mem_log.begin, offset] +Status MemLog::TruncateTo(const LogOffset& offset) { + std::lock_guard l_logs(logs_mu_); + int index = InternalFindLogByBinlogOffset(offset); + if (index < 0) { + return Status::Corruption("Cant find correct index"); + } + last_offset_ = logs_[index].offset; + logs_.erase(logs_.begin() + index + 1, logs_.end()); + return Status::OK(); +} + +void MemLog::Reset(const LogOffset& offset) { + std::lock_guard l_logs(logs_mu_); + logs_.erase(logs_.begin(), logs_.end()); + last_offset_ = offset; +} + +bool MemLog::FindLogItem(const LogOffset& offset, LogOffset* found_offset) { + std::lock_guard l_logs(logs_mu_); + int index = InternalFindLogByLogicIndex(offset); + if (index < 0) { + return false; + } + *found_offset = logs_[index].offset; + return true; +} + +int MemLog::InternalFindLogByLogicIndex(const LogOffset& offset) { + for (size_t i = 0; i < logs_.size(); ++i) { + if (logs_[i].offset.l_offset.index > offset.l_offset.index) { + return -1; + } + if (logs_[i].offset.l_offset.index == offset.l_offset.index) { + return static_cast(i); + } + } + return -1; +} + +int MemLog::InternalFindLogByBinlogOffset(const LogOffset& offset) { + for (size_t i = 0; i < logs_.size(); ++i) { + if (logs_[i].offset > offset) { + return -1; + } + if (logs_[i].offset == offset) { + return static_cast(i); + } + } + return -1; +} + +/* ConsensusCoordinator */ + +ConsensusCoordinator::ConsensusCoordinator(const std::string& db_name) + : db_name_(db_name) { + std::string db_log_path = g_pika_conf->log_path() + "log_" + db_name + "/"; + std::string log_path = db_log_path; + context_ = std::make_shared(log_path + kContext); + stable_logger_ = std::make_shared(db_name, log_path); + mem_logger_ = std::make_shared(); +} + +ConsensusCoordinator::~ConsensusCoordinator() = default; + +// since it is invoked in constructor all locks not hold +void ConsensusCoordinator::Init() { + // load committed_index_ & applied_index + context_->Init(); + committed_index_ = context_->applied_index_; + + // load term_ + term_ = stable_logger_->Logger()->term(); + + LOG(INFO) << DBInfo(db_name_).ToString() << "Restore applied index " + << context_->applied_index_.ToString() << " current term " << term_; + if (committed_index_ == LogOffset()) { + return; + } + // load mem_logger_ + mem_logger_->SetLastOffset(committed_index_); + net::RedisParserSettings settings; + settings.DealMessage = &(ConsensusCoordinator::InitCmd); + net::RedisParser redis_parser; + redis_parser.RedisParserInit(REDIS_PARSER_REQUEST, settings); + PikaBinlogReader binlog_reader; + int res = + binlog_reader.Seek(stable_logger_->Logger(), committed_index_.b_offset.filenum, committed_index_.b_offset.offset); + if (res != 0) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Binlog reader init failed"; + } + + while (true) { + LogOffset offset; + std::string binlog; + Status s = binlog_reader.Get(&binlog, &(offset.b_offset.filenum), &(offset.b_offset.offset)); + if (s.IsEndFile()) { + break; + } else if (s.IsCorruption() || s.IsIOError()) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Read Binlog error"; + } + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Binlog item decode failed"; + } + offset.l_offset.term = item.term_id(); + offset.l_offset.index = item.logic_id(); + + redis_parser.data = static_cast(&db_name_); + const char* redis_parser_start = binlog.data() + BINLOG_ENCODE_LEN; + int redis_parser_len = static_cast(binlog.size()) - BINLOG_ENCODE_LEN; + int processed_len = 0; + net::RedisParserStatus ret = redis_parser.ProcessInputBuffer(redis_parser_start, redis_parser_len, &processed_len); + if (ret != net::kRedisParserDone) { + LOG(FATAL) << DBInfo(db_name_).ToString() << "Redis parser parse failed"; + return; + } + auto arg = static_cast(redis_parser.data); + std::shared_ptr cmd_ptr = arg->cmd_ptr; + delete arg; + redis_parser.data = nullptr; + + mem_logger_->AppendLog(MemLog::LogItem(offset, cmd_ptr, nullptr, nullptr)); + } +} + +Status ConsensusCoordinator::Reset(const LogOffset& offset) { + context_->Reset(offset); + { + std::lock_guard l(index_mu_); + committed_index_ = offset; + } + + UpdateTerm(offset.l_offset.term); + Status s = stable_logger_->Logger()->SetProducerStatus(offset.b_offset.filenum, offset.b_offset.offset, + offset.l_offset.term, offset.l_offset.index); + if (!s.ok()) { + LOG(WARNING) << DBInfo(db_name_).ToString() << "Consensus reset status failed " + << s.ToString(); + return s; + } + + stable_logger_->SetFirstOffset(offset); + + stable_logger_->Logger()->Lock(); + mem_logger_->Reset(offset); + stable_logger_->Logger()->Unlock(); + return Status::OK(); +} + +Status ConsensusCoordinator::ProposeLog(const std::shared_ptr& cmd_ptr) { + std::vector keys = cmd_ptr->current_key(); + // slotkey shouldn't add binlog + if (cmd_ptr->name() == kCmdNameSAdd && !keys.empty() && + (keys[0].compare(0, SlotKeyPrefix.length(), SlotKeyPrefix) == 0 || keys[0].compare(0, SlotTagPrefix.length(), SlotTagPrefix) == 0)) { + return Status::OK(); + } + + // make sure stable log and mem log consistent + Status s = InternalAppendLog(cmd_ptr); + if (!s.ok()) { + return s; + } + + g_pika_server->SignalAuxiliary(); + return Status::OK(); +} + +Status ConsensusCoordinator::InternalAppendLog(const std::shared_ptr& cmd_ptr) { + return InternalAppendBinlog(cmd_ptr); +} + +// precheck if prev_offset match && drop this log if this log exist +Status ConsensusCoordinator::ProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute) { + LogOffset last_index = mem_logger_->last_offset(); + if (attribute.logic_id() < last_index.l_offset.index) { + LOG(WARNING) << DBInfo(db_name_).ToString() << "Drop log from leader logic_id " + << attribute.logic_id() << " cur last index " << last_index.l_offset.index; + return Status::OK(); + } + + auto opt = cmd_ptr->argv()[0]; + if (pstd::StringToLower(opt) != kCmdNameFlushdb) { + // apply binlog in sync way + Status s = InternalAppendLog(cmd_ptr); + // apply db in async way + InternalApplyFollower(cmd_ptr); + } else { + // this is a flushdb-binlog, both apply binlog and apply db are in sync way + // ensure all writeDB task that submitted before has finished before we exec this flushdb + int32_t wait_ms = 250; + while (g_pika_rm->GetUnfinishedAsyncWriteDBTaskCount(db_name_) > 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(wait_ms)); + wait_ms *= 2; + wait_ms = wait_ms < 3000 ? wait_ms : 3000; + } + // apply flushdb-binlog in sync way + Status s = InternalAppendLog(cmd_ptr); + // applyDB in sync way + PikaReplBgWorker::WriteDBInSyncWay(cmd_ptr); + } + return Status::OK(); +} + +Status ConsensusCoordinator::UpdateSlave(const std::string& ip, int port, const LogOffset& start, + const LogOffset& end) { + LogOffset committed_index; + Status s = sync_pros_.Update(ip, port, start, end, &committed_index); + if (!s.ok()) { + return s; + } + + return Status::OK(); +} + +Status ConsensusCoordinator::InternalAppendBinlog(const std::shared_ptr& cmd_ptr) { + std::string content = cmd_ptr->ToRedisProtocol(); + Status s = stable_logger_->Logger()->Put(content); + if (!s.ok()) { + std::string db_name = cmd_ptr->db_name().empty() ? g_pika_conf->default_db() : cmd_ptr->db_name(); + std::shared_ptr db = g_pika_server->GetDB(db_name); + if (db) { + db->SetBinlogIoError(); + } + return s; + } + return stable_logger_->Logger()->IsOpened(); +} + +Status ConsensusCoordinator::AddSlaveNode(const std::string& ip, int port, int session_id) { + Status s = sync_pros_.AddSlaveNode(ip, port, db_name_, session_id); + if (!s.ok()) { + return s; + } + return Status::OK(); +} + +Status ConsensusCoordinator::RemoveSlaveNode(const std::string& ip, int port) { + Status s = sync_pros_.RemoveSlaveNode(ip, port); + if (!s.ok()) { + return s; + } + return Status::OK(); +} + +void ConsensusCoordinator::UpdateTerm(uint32_t term) { + stable_logger_->Logger()->Lock(); + std::lock_guard l(term_rwlock_); + term_ = term; + stable_logger_->Logger()->SetTerm(term); + stable_logger_->Logger()->Unlock(); +} + +uint32_t ConsensusCoordinator::term() { + std::shared_lock l(term_rwlock_); + return term_; +} + +void ConsensusCoordinator::InternalApplyFollower(const std::shared_ptr& cmd_ptr) { + g_pika_rm->ScheduleWriteDBTask(cmd_ptr, db_name_); +} + +int ConsensusCoordinator::InitCmd(net::RedisParser* parser, const net::RedisCmdArgsType& argv) { + auto db_name = static_cast(parser->data); + std::string opt = argv[0]; + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(pstd::StringToLower(opt)); + if (!c_ptr) { + LOG(WARNING) << "Command " << opt << " not in the command table"; + return -1; + } + // Initial + c_ptr->Initial(argv, *db_name); + if (!c_ptr->res().ok()) { + LOG(WARNING) << "Fail to initial command from binlog: " << opt; + return -1; + } + parser->data = static_cast(new CmdPtrArg(c_ptr)); + return 0; +} + +Status ConsensusCoordinator::TruncateTo(const LogOffset& offset) { + LOG(INFO) << DBInfo(db_name_).ToString() << "Truncate to " << offset.ToString(); + LogOffset founded_offset; + Status s = FindLogicOffset(offset.b_offset, offset.l_offset.index, &founded_offset); + if (!s.ok()) { + return s; + } + LOG(INFO) << DBInfo(db_name_).ToString() << " Founded truncate pos " + << founded_offset.ToString(); + LogOffset committed = committed_index(); + stable_logger_->Logger()->Lock(); + if (founded_offset.l_offset.index == committed.l_offset.index) { + mem_logger_->Reset(committed); + } else { + Status s = mem_logger_->TruncateTo(founded_offset); + if (!s.ok()) { + stable_logger_->Logger()->Unlock(); + return s; + } + } + s = stable_logger_->TruncateTo(founded_offset); + if (!s.ok()) { + stable_logger_->Logger()->Unlock(); + return s; + } + stable_logger_->Logger()->Unlock(); + return Status::OK(); +} + +Status ConsensusCoordinator::GetBinlogOffset(const BinlogOffset& start_offset, LogOffset* log_offset) { + PikaBinlogReader binlog_reader; + int res = binlog_reader.Seek(stable_logger_->Logger(), start_offset.filenum, start_offset.offset); + if (res != 0) { + return Status::Corruption("Binlog reader init failed"); + } + std::string binlog; + BinlogOffset offset; + Status s = binlog_reader.Get(&binlog, &(offset.filenum), &(offset.offset)); + if (!s.ok()) { + return Status::Corruption("Binlog reader get failed"); + } + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + return Status::Corruption("Binlog item decode failed"); + } + log_offset->b_offset = offset; + log_offset->l_offset.term = item.term_id(); + log_offset->l_offset.index = item.logic_id(); + return Status::OK(); +} + +// get binlog offset range [start_offset, end_offset] +// start_offset 0,0 end_offset 1,129, result will include binlog (1,129) +// start_offset 0,0 end_offset 1,0, result will NOT include binlog (1,xxx) +// start_offset 0,0 end_offset 0,0, resulet will NOT include binlog(0,xxx) +Status ConsensusCoordinator::GetBinlogOffset(const BinlogOffset& start_offset, const BinlogOffset& end_offset, + std::vector* log_offset) { + PikaBinlogReader binlog_reader; + int res = binlog_reader.Seek(stable_logger_->Logger(), start_offset.filenum, start_offset.offset); + if (res != 0) { + return Status::Corruption("Binlog reader init failed"); + } + while (true) { + BinlogOffset b_offset; + std::string binlog; + Status s = binlog_reader.Get(&binlog, &(b_offset.filenum), &(b_offset.offset)); + if (s.IsEndFile()) { + return Status::OK(); + } else if (s.IsCorruption() || s.IsIOError()) { + return Status::Corruption("Read Binlog error"); + } + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + return Status::Corruption("Binlog item decode failed"); + } + LogOffset offset; + offset.b_offset = b_offset; + offset.l_offset.term = item.term_id(); + offset.l_offset.index = item.logic_id(); + if (offset.b_offset > end_offset) { + return Status::OK(); + } + log_offset->push_back(offset); + } + return Status::OK(); +} + +Status ConsensusCoordinator::FindBinlogFileNum(const std::map& binlogs, uint64_t target_index, + uint32_t start_filenum, uint32_t* founded_filenum) { + // low boundary & high boundary + uint32_t lb_binlogs = binlogs.begin()->first; + uint32_t hb_binlogs = binlogs.rbegin()->first; + bool first_time_left = false; + bool first_time_right = false; + uint32_t filenum = start_filenum; + while (true) { + LogOffset first_offset; + Status s = GetBinlogOffset(BinlogOffset(filenum, 0), &first_offset); + if (!s.ok()) { + return s; + } + if (target_index < first_offset.l_offset.index) { + if (first_time_right) { + // last filenum + filenum = filenum - 1; + break; + } + // move left + first_time_left = true; + if (filenum == 0 || filenum - 1 < lb_binlogs) { + return Status::NotFound(std::to_string(target_index) + " hit low boundary"); + } + filenum = filenum - 1; + } else if (target_index > first_offset.l_offset.index) { + if (first_time_left) { + break; + } + // move right + first_time_right = true; + if (filenum + 1 > hb_binlogs) { + break; + } + filenum = filenum + 1; + } else { + break; + } + } + *founded_filenum = filenum; + return Status::OK(); +} + +Status ConsensusCoordinator::FindLogicOffsetBySearchingBinlog(const BinlogOffset& hint_offset, uint64_t target_index, + LogOffset* found_offset) { + LOG(INFO) << DBInfo(db_name_).ToString() << "FindLogicOffsetBySearchingBinlog hint offset " + << hint_offset.ToString() << " target_index " << target_index; + BinlogOffset start_offset; + std::map binlogs; + if (!stable_logger_->GetBinlogFiles(&binlogs)) { + return Status::Corruption("Get binlog files failed"); + } + if (binlogs.empty()) { + return Status::NotFound("Binlogs is empty"); + } + if (binlogs.find(hint_offset.filenum) == binlogs.end()) { + start_offset = BinlogOffset(binlogs.crbegin()->first, 0); + } else { + start_offset = hint_offset; + } + + uint32_t found_filenum; + Status s = FindBinlogFileNum(binlogs, target_index, start_offset.filenum, &found_filenum); + if (!s.ok()) { + return s; + } + + LOG(INFO) << DBInfo(db_name_).ToString() << "FindBinlogFilenum res " // NOLINT + << found_filenum; + BinlogOffset traversal_start(found_filenum, 0); + BinlogOffset traversal_end(found_filenum + 1, 0); + std::vector offsets; + s = GetBinlogOffset(traversal_start, traversal_end, &offsets); + if (!s.ok()) { + return s; + } + for (auto& offset : offsets) { + if (offset.l_offset.index == target_index) { + LOG(INFO) << DBInfo(db_name_).ToString() << "Founded " << target_index << " " + << offset.ToString(); + *found_offset = offset; + return Status::OK(); + } + } + return Status::NotFound("Logic index not found"); +} + +Status ConsensusCoordinator::FindLogicOffset(const BinlogOffset& start_offset, uint64_t target_index, + LogOffset* found_offset) { + LogOffset possible_offset; + Status s = GetBinlogOffset(start_offset, &possible_offset); + if (!s.ok() || possible_offset.l_offset.index != target_index) { + if (!s.ok()) { + LOG(INFO) << DBInfo(db_name_).ToString() << "GetBinlogOffset res: " << s.ToString(); + } else { + LOG(INFO) << DBInfo(db_name_).ToString() << "GetBInlogOffset res: " << s.ToString() + << " possible_offset " << possible_offset.ToString() << " target_index " << target_index; + } + return FindLogicOffsetBySearchingBinlog(start_offset, target_index, found_offset); + } + *found_offset = possible_offset; + return Status::OK(); +} + +Status ConsensusCoordinator::GetLogsBefore(const BinlogOffset& start_offset, std::vector* hints) { + BinlogOffset traversal_end = start_offset; + BinlogOffset traversal_start(traversal_end.filenum, 0); + traversal_start.filenum = traversal_start.filenum == 0 ? 0 : traversal_start.filenum - 1; + std::map binlogs; + if (!stable_logger_->GetBinlogFiles(&binlogs)) { + return Status::Corruption("Get binlog files failed"); + } + if (binlogs.find(traversal_start.filenum) == binlogs.end()) { + traversal_start.filenum = traversal_end.filenum; + } + std::vector res; + Status s = GetBinlogOffset(traversal_start, traversal_end, &res); + if (!s.ok()) { + return s; + } + if (res.size() > 100) { + res.assign(res.end() - 100, res.end()); + } + *hints = res; + return Status::OK(); +} + +Status ConsensusCoordinator::LeaderNegotiate(const LogOffset& f_last_offset, bool* reject, + std::vector* hints) { + uint64_t f_index = f_last_offset.l_offset.index; + LOG(INFO) << DBInfo(db_name_).ToString() << "LeaderNeotiate follower last offset " + << f_last_offset.ToString() << " first_offsert " << stable_logger_->first_offset().ToString() + << " last_offset " << mem_logger_->last_offset().ToString(); + *reject = true; + if (f_index > mem_logger_->last_offset().l_offset.index) { + // hints starts from last_offset() - 100; + Status s = GetLogsBefore(mem_logger_->last_offset().b_offset, hints); + if (!s.ok()) { + LOG(WARNING) << f_index << " is larger than last index " << mem_logger_->last_offset().ToString() + << " get logs before last index failed " << s.ToString(); + return s; + } + LOG(INFO) << DBInfo(db_name_).ToString() + << "follower index larger then last_offset index, get logs before " + << mem_logger_->last_offset().ToString(); + return Status::OK(); + } + if (f_index < stable_logger_->first_offset().l_offset.index) { + // need full sync + LOG(INFO) << DBInfo(db_name_).ToString() << f_index << " not found current first index" + << stable_logger_->first_offset().ToString(); + return Status::NotFound("logic index"); + } + if (f_last_offset.l_offset.index == 0) { + *reject = false; + return Status::OK(); + } + + LogOffset found_offset; + Status s = FindLogicOffset(f_last_offset.b_offset, f_index, &found_offset); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(INFO) << DBInfo(db_name_).ToString() << f_last_offset.ToString() << " not found " + << s.ToString(); + return s; + } else { + LOG(WARNING) << DBInfo(db_name_).ToString() << "find logic offset failed" + << s.ToString(); + return s; + } + } + + if (found_offset.l_offset.term != f_last_offset.l_offset.term || !(f_last_offset.b_offset == found_offset.b_offset)) { + Status s = GetLogsBefore(found_offset.b_offset, hints); + if (!s.ok()) { + LOG(WARNING) << DBInfo(db_name_).ToString() << "Try to get logs before " + << found_offset.ToString() << " failed"; + return s; + } + return Status::OK(); + } + + LOG(INFO) << DBInfo(db_name_).ToString() << "Found equal offset " << found_offset.ToString(); + *reject = false; + return Status::OK(); +} + +// memlog order: committed_index , [committed_index + 1, memlogger.end()] +Status ConsensusCoordinator::FollowerNegotiate(const std::vector& hints, LogOffset* reply_offset) { + if (hints.empty()) { + return Status::Corruption("hints empty"); + } + LOG(INFO) << DBInfo(db_name_).ToString() << "FollowerNegotiate from " << hints[0].ToString() + << " to " << hints[hints.size() - 1].ToString(); + if (mem_logger_->last_offset().l_offset.index < hints[0].l_offset.index) { + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); + } + if (committed_index().l_offset.index > hints[hints.size() - 1].l_offset.index) { + return Status::Corruption("invalid hints all smaller than committed_index"); + } + if (mem_logger_->last_offset().l_offset.index > hints[hints.size() - 1].l_offset.index) { + const auto &truncate_offset = hints[hints.size() - 1]; + // trunck to hints end + Status s = TruncateTo(truncate_offset); + if (!s.ok()) { + return s; + } + } + + LogOffset committed = committed_index(); + for (size_t i = hints.size() - 1; i >= 0; i--) { + if (hints[i].l_offset.index < committed.l_offset.index) { + return Status::Corruption("hints less than committed index"); + } + if (hints[i].l_offset.index == committed.l_offset.index) { + if (hints[i].l_offset.term == committed.l_offset.term) { + Status s = TruncateTo(hints[i]); + if (!s.ok()) { + return s; + } + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); + } + } + LogOffset found_offset; + bool res = mem_logger_->FindLogItem(hints[i], &found_offset); + if (!res) { + return Status::Corruption("hints not found " + hints[i].ToString()); + } + if (found_offset.l_offset.term == hints[i].l_offset.term) { + // trunk to found_offsett + Status s = TruncateTo(found_offset); + if (!s.ok()) { + return s; + } + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); + } + } + + Status s = TruncateTo(hints[0]); + if (!s.ok()) { + return s; + } + *reply_offset = mem_logger_->last_offset(); + return Status::OK(); +} diff --git a/tools/pika_migrate/src/pika_data_distribution.cc b/tools/pika_migrate/src/pika_data_distribution.cc new file mode 100644 index 0000000000..49d6af125e --- /dev/null +++ b/tools/pika_migrate/src/pika_data_distribution.cc @@ -0,0 +1,11 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_data_distribution.h" + +void HashModulo::Init() {} + + + diff --git a/tools/pika_migrate/src/pika_db.cc b/tools/pika_migrate/src/pika_db.cc new file mode 100644 index 0000000000..f3d52fdec3 --- /dev/null +++ b/tools/pika_migrate/src/pika_db.cc @@ -0,0 +1,640 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include "include/pika_db.h" + +#include "include/pika_cmd_table_manager.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "mutex_impl.h" + +using pstd::Status; +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +std::string DBPath(const std::string& path, const std::string& db_name) { + char buf[100]; + snprintf(buf, sizeof(buf), "%s/", db_name.data()); + return path + buf; +} + +std::string DbSyncPath(const std::string& sync_path, const std::string& db_name) { + char buf[256]; + snprintf(buf, sizeof(buf), "%s/", db_name.data()); + return sync_path + buf; +} + +DB::DB(std::string db_name, const std::string& db_path, + const std::string& log_path) + : db_name_(db_name), bgsave_engine_(nullptr) { + db_path_ = DBPath(db_path, db_name_); + bgsave_sub_path_ = db_name; + dbsync_path_ = DbSyncPath(g_pika_conf->db_sync_path(), db_name); + log_path_ = DBPath(log_path, "log_" + db_name_); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); + pstd::CreatePath(db_path_); + pstd::CreatePath(log_path_); + lock_mgr_ = std::make_shared(1000, 0, std::make_shared()); + binlog_io_error_.store(false); + opened_ = s.ok(); + assert(storage_); + assert(s.ok()); + LOG(INFO) << db_name_ << " DB Success"; +} + +DB::~DB() { + StopKeyScan(); +} + +bool DB::WashData() { + rocksdb::ReadOptions read_options; + rocksdb::Status s; + auto suffix_len = storage::ParsedBaseDataValue::GetkBaseDataValueSuffixLength(); + for (int i = 0; i < g_pika_conf->db_instance_num(); i++) { + rocksdb::WriteBatch batch; + auto handle = storage_->GetHashCFHandles(i)[1]; + auto db = storage_->GetDBByIndex(i); + auto it(db->NewIterator(read_options, handle)); + for (it->SeekToFirst(); it->Valid(); it->Next()) { + std::string key = it->key().ToString(); + std::string value = it->value().ToString(); + if (value.size() < suffix_len) { + // need to wash + storage::BaseDataValue internal_value(value); + batch.Put(handle, key, internal_value.Encode()); + } + } + delete it; + s = db->Write(storage_->GetDefaultWriteOptions(i), &batch); + if (!s.ok()) { + return false; + } + } + return true; +} + +std::string DB::GetDBName() { return db_name_; } + +void DB::BgSaveDB() { + std::shared_lock l(dbs_rw_); + std::lock_guard ml(bgsave_protector_); + if (bgsave_info_.bgsaving) { + return; + } + bgsave_info_.bgsaving = true; + auto bg_task_arg = new BgTaskArg(); + bg_task_arg->db = shared_from_this(); + g_pika_server->BGSaveTaskSchedule(&DoBgSave, static_cast(bg_task_arg)); +} + +void DB::SetBinlogIoError() { return binlog_io_error_.store(true); } +void DB::SetBinlogIoErrorrelieve() { return binlog_io_error_.store(false); } +bool DB::IsBinlogIoError() { return binlog_io_error_.load(); } +std::shared_ptr DB::LockMgr() { return lock_mgr_; } +std::shared_ptr DB::cache() const { return cache_; } +std::shared_ptr DB::storage() const { return storage_; } + +void DB::KeyScan() { + std::lock_guard ml(key_scan_protector_); + if (key_scan_info_.key_scaning_) { + return; + } + + key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, + // has not been scheduled for exec + auto bg_task_arg = new BgTaskArg(); + bg_task_arg->db = shared_from_this(); + g_pika_server->KeyScanTaskSchedule(&DoKeyScan, reinterpret_cast(bg_task_arg)); +} + +bool DB::IsKeyScaning() { + std::lock_guard ml(key_scan_protector_); + return key_scan_info_.key_scaning_; +} + +void DB::RunKeyScan() { + Status s; + std::vector new_key_infos; + + InitKeyScan(); + std::shared_lock l(dbs_rw_); + s = GetKeyNum(&new_key_infos); + key_scan_info_.duration = static_cast(time(nullptr) - key_scan_info_.start_time); + + std::lock_guard lm(key_scan_protector_); + if (s.ok()) { + key_scan_info_.key_infos = new_key_infos; + } + key_scan_info_.key_scaning_ = false; +} + +Status DB::GetKeyNum(std::vector* key_info) { + std::lock_guard l(key_info_protector_); + if (key_scan_info_.key_scaning_) { + *key_info = key_scan_info_.key_infos; + return Status::OK(); + } + InitKeyScan(); + key_scan_info_.key_scaning_ = true; + key_scan_info_.duration = -2; // duration -2 mean the task in waiting status, + // has not been scheduled for exec + rocksdb::Status s = storage_->GetKeyNum(key_info); + key_scan_info_.key_scaning_ = false; + if (!s.ok()) { + return Status::Corruption(s.ToString()); + } + key_scan_info_.key_infos = *key_info; + key_scan_info_.duration = static_cast(time(nullptr) - key_scan_info_.start_time); + return Status::OK(); +} + +void DB::StopKeyScan() { + std::shared_lock rwl(dbs_rw_); + std::lock_guard ml(key_scan_protector_); + + if (!key_scan_info_.key_scaning_) { + return; + } + storage_->StopScanKeyNum(); + key_scan_info_.key_scaning_ = false; +} + +void DB::ScanDatabase(const storage::DataType& type) { + std::shared_lock l(dbs_rw_); + storage_->ScanDatabase(type); +} + +KeyScanInfo DB::GetKeyScanInfo() { + std::lock_guard lm(key_scan_protector_); + return key_scan_info_; +} + +void DB::Compact(const storage::DataType& type) { + std::lock_guard rwl(dbs_rw_); + if (!opened_) { + return; + } + storage_->Compact(type); +} + +void DB::CompactRange(const storage::DataType& type, const std::string& start, const std::string& end) { + std::lock_guard rwl(dbs_rw_); + if (!opened_) { + return; + } + storage_->CompactRange(type, start, end); +} + +void DB::LongestNotCompactionSstCompact(const storage::DataType& type) { + std::lock_guard rwl(dbs_rw_); + if (!opened_) { + return; + } + storage_->LongestNotCompactionSstCompact(type); +} + +void DB::DoKeyScan(void* arg) { + std::unique_ptr bg_task_arg(static_cast(arg)); + bg_task_arg->db->RunKeyScan(); +} + +void DB::InitKeyScan() { + key_scan_info_.start_time = time(nullptr); + char s_time[32]; + size_t len = strftime(s_time, sizeof(s_time), "%Y-%m-%d %H:%M:%S", localtime(&key_scan_info_.start_time)); + key_scan_info_.s_start_time.assign(s_time, len); + key_scan_info_.duration = -1; // duration -1 mean the task in processing +} + +void DB::SetCompactRangeOptions(const bool is_canceled) { + if (!opened_) { + return; + } + storage_->SetCompactRangeOptions(is_canceled); +} + +DisplayCacheInfo DB::GetCacheInfo() { + std::lock_guard l(cache_info_rwlock_); + return cache_info_; +} + +bool DB::FlushDBWithoutLock() { + std::lock_guard l(bgsave_protector_); + if (bgsave_info_.bgsaving) { + return false; + } + + LOG(INFO) << db_name_ << " Delete old db..."; + storage_.reset(); + + std::string dbpath = db_path_; + if (dbpath[dbpath.length() - 1] == '/') { + dbpath.erase(dbpath.length() - 1); + } + std::string delete_suffix("_deleting_"); + delete_suffix.append(std::to_string(NowMicros())); + delete_suffix.append("/"); + dbpath.append(delete_suffix); + auto rename_success = pstd::RenameFile(db_path_, dbpath); + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); + assert(storage_); + assert(s.ok()); + if (rename_success == -1) { + //the storage_->Open actually opened old RocksDB instance, so flushdb failed + LOG(WARNING) << db_name_ << " FlushDB failed due to rename old db_path_ failed"; + return false; + } + LOG(INFO) << db_name_ << " Open new db success"; + + g_pika_server->PurgeDir(dbpath); + return true; +} + +void DB::DoBgSave(void* arg) { + std::unique_ptr bg_task_arg(static_cast(arg)); + + // Do BgSave + bool success = bg_task_arg->db->RunBgsaveEngine(); + + // Some output + BgSaveInfo info = bg_task_arg->db->bgsave_info(); + std::stringstream info_content; + std::ofstream out; + out.open(info.path + "/" + kBgsaveInfoFile, std::ios::in | std::ios::trunc); + if (out.is_open()) { + info_content << (time(nullptr) - info.start_time) << "s\n" + << g_pika_server->host() << "\n" + << g_pika_server->port() << "\n" + << info.offset.b_offset.filenum << "\n" + << info.offset.b_offset.offset << "\n"; + bg_task_arg->db->snapshot_uuid_ = md5(info_content.str()); + out << info_content.rdbuf(); + out.close(); + } + if (!success) { + std::string fail_path = info.path + "_FAILED"; + pstd::RenameFile(info.path, fail_path); + } + bg_task_arg->db->FinishBgsave(); +} + +bool DB::RunBgsaveEngine() { + // Prepare for Bgsaving + if (!InitBgsaveEnv() || !InitBgsaveEngine()) { + ClearBgsave(); + return false; + } + LOG(INFO) << db_name_ << " after prepare bgsave"; + + BgSaveInfo info = bgsave_info(); + LOG(INFO) << db_name_ << " bgsave_info: path=" << info.path << ", filenum=" << info.offset.b_offset.filenum + << ", offset=" << info.offset.b_offset.offset; + + // Backup to tmp dir + rocksdb::Status s = bgsave_engine_->CreateNewBackup(info.path); + + if (!s.ok()) { + LOG(WARNING) << db_name_ << " create new backup failed :" << s.ToString(); + return false; + } + LOG(INFO) << db_name_ << " create new backup finished."; + + return true; +} + +BgSaveInfo DB::bgsave_info() { + std::lock_guard l(bgsave_protector_); + return bgsave_info_; +} + +void DB::FinishBgsave() { + std::lock_guard l(bgsave_protector_); + bgsave_info_.bgsaving = false; + g_pika_server->UpdateLastSave(time(nullptr)); +} + +// Prepare engine, need bgsave_protector protect +bool DB::InitBgsaveEnv() { + std::lock_guard l(bgsave_protector_); + // Prepare for bgsave dir + bgsave_info_.start_time = time(nullptr); + char s_time[32]; + int len = static_cast(strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgsave_info_.start_time))); + bgsave_info_.s_start_time.assign(s_time, len); + std::string time_sub_path = g_pika_conf->bgsave_prefix() + std::string(s_time, 8); + bgsave_info_.path = g_pika_conf->bgsave_path() + time_sub_path + "/" + bgsave_sub_path_; + if (!pstd::DeleteDirIfExist(bgsave_info_.path)) { + LOG(WARNING) << db_name_ << " remove exist bgsave dir failed"; + return false; + } + pstd::CreatePath(bgsave_info_.path, 0755); + // Prepare for failed dir + if (!pstd::DeleteDirIfExist(bgsave_info_.path + "_FAILED")) { + LOG(WARNING) << db_name_ << " remove exist fail bgsave dir failed :"; + return false; + } + return true; +} + +// Prepare bgsave env, need bgsave_protector protect +bool DB::InitBgsaveEngine() { + bgsave_engine_.reset(); + rocksdb::Status s = storage::BackupEngine::Open(storage().get(), bgsave_engine_, g_pika_conf->db_instance_num()); + if (!s.ok()) { + LOG(WARNING) << db_name_ << " open backup engine failed " << s.ToString(); + return false; + } + + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + if (!db) { + LOG(WARNING) << db_name_ << " not found"; + return false; + } + + { + std::lock_guard lock(dbs_rw_); + LogOffset bgsave_offset; + // term, index are 0 + db->Logger()->GetProducerStatus(&(bgsave_offset.b_offset.filenum), &(bgsave_offset.b_offset.offset)); + { + std::lock_guard l(bgsave_protector_); + bgsave_info_.offset = bgsave_offset; + } + s = bgsave_engine_->SetBackupContent(); + if (!s.ok()) { + LOG(WARNING) << db_name_ << " set backup content failed " << s.ToString(); + return false; + } + } + return true; +} + +void DB::Init() { + cache_ = std::make_shared(g_pika_conf->zset_cache_start_direction(), g_pika_conf->zset_cache_field_num_per_key()); + // Create cache + cache::CacheConfig cache_cfg; + g_pika_server->CacheConfigInit(cache_cfg); + cache_->Init(g_pika_conf->GetCacheNum(), &cache_cfg); +} + +void DB::GetBgSaveMetaData(std::vector* fileNames, std::string* snapshot_uuid) { + const std::string dbPath = bgsave_info().path; + + int db_instance_num = g_pika_conf->db_instance_num(); + for (int index = 0; index < db_instance_num; index++) { + std::string instPath = dbPath + ((dbPath.back() != '/') ? "/" : "") + std::to_string(index); + if (!pstd::FileExists(instPath)) { + continue ; + } + + std::vector tmpFileNames; + int ret = pstd::GetChildren(instPath, tmpFileNames); + if (ret) { + LOG(WARNING) << dbPath << " read dump meta files failed, path " << instPath; + return; + } + + for (const std::string fileName : tmpFileNames) { + fileNames -> push_back(std::to_string(index) + "/" + fileName); + } + } + fileNames->push_back(kBgsaveInfoFile); + pstd::Status s = GetBgSaveUUID(snapshot_uuid); + if (!s.ok()) { + LOG(WARNING) << "read dump meta info failed! error:" << s.ToString(); + return; + } +} + +Status DB::GetBgSaveUUID(std::string* snapshot_uuid) { + if (snapshot_uuid_.empty()) { + std::string info_data; + const std::string infoPath = bgsave_info().path + "/info"; + //TODO: using file read function to replace rocksdb::ReadFileToString + rocksdb::Status s = rocksdb::ReadFileToString(rocksdb::Env::Default(), infoPath, &info_data); + if (!s.ok()) { + LOG(WARNING) << "read dump meta info failed! error:" << s.ToString(); + return Status::IOError("read dump meta info failed", infoPath); + } + pstd::MD5 md5 = pstd::MD5(info_data); + snapshot_uuid_ = md5.hexdigest(); + } + *snapshot_uuid = snapshot_uuid_; + return Status::OK(); +} + +// Try to update master offset +// This may happend when dbsync from master finished +// Here we do: +// 1, Check dbsync finished, got the new binlog offset +// 2, Replace the old db +// 3, Update master offset, and the PikaAuxiliaryThread cron will connect and do slaveof task with master +bool DB::TryUpdateMasterOffset() { + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name_)); + if (!slave_db) { + LOG(ERROR) << "Slave DB: " << db_name_ << " not exist"; + slave_db->SetReplState(ReplState::kError); + return false; + } + + std::string info_path = dbsync_path_ + kBgsaveInfoFile; + if (!pstd::FileExists(info_path)) { + LOG(WARNING) << "info path: " << info_path << " not exist, Slave DB:" << GetDBName() << " will restart the sync process..."; + // May failed in RsyncClient, thus the complete snapshot dir got deleted + slave_db->SetReplState(ReplState::kTryConnect); + return false; + } + + // Got new binlog offset + std::ifstream is(info_path); + if (!is) { + LOG(WARNING) << "DB: " << db_name_ << ", Failed to open info file after db sync"; + slave_db->SetReplState(ReplState::kError); + return false; + } + std::string line; + std::string master_ip; + int lineno = 0; + int64_t filenum = 0; + int64_t offset = 0; + int64_t term = 0; + int64_t index = 0; + int64_t tmp = 0; + int64_t master_port = 0; + while (std::getline(is, line)) { + lineno++; + if (lineno == 2) { + master_ip = line; + } else if (lineno > 2 && lineno < 8) { + if ((pstd::string2int(line.data(), line.size(), &tmp) == 0) || tmp < 0) { + LOG(WARNING) << "DB: " << db_name_ + << ", Format of info file after db sync error, line : " << line; + is.close(); + slave_db->SetReplState(ReplState::kError); + return false; + } + if (lineno == 3) { + master_port = tmp; + } else if (lineno == 4) { + filenum = tmp; + } else if (lineno == 5) { + offset = tmp; + } else if (lineno == 6) { + term = tmp; + } else if (lineno == 7) { + index = tmp; + } + } else if (lineno > 8) { + LOG(WARNING) << "DB: " << db_name_ << ", Format of info file after db sync error, line : " << line; + is.close(); + slave_db->SetReplState(ReplState::kError); + return false; + } + } + is.close(); + + LOG(INFO) << "DB: " << db_name_ << " Information from dbsync info" + << ", master_ip: " << master_ip << ", master_port: " << master_port << ", filenum: " << filenum + << ", offset: " << offset << ", term: " << term << ", index: " << index; + + pstd::DeleteFile(info_path); + if (!ChangeDb(dbsync_path_)) { + LOG(WARNING) << "DB: " << db_name_ << ", Failed to change db"; + slave_db->SetReplState(ReplState::kError); + return false; + } + + // Update master offset + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + if (!master_db) { + LOG(WARNING) << "Master DB: " << db_name_ << " not exist"; + slave_db->SetReplState(ReplState::kError); + return false; + } + master_db->Logger()->SetProducerStatus(filenum, offset); + slave_db->SetReplState(ReplState::kTryConnect); + + //now full sync is finished, remove unfinished full sync count + g_pika_conf->RemoveInternalUsedUnfinishedFullSync(slave_db->DBName()); + + return true; +} + +void DB::PrepareRsync() { + pstd::DeleteDirIfExist(dbsync_path_); + int db_instance_num = g_pika_conf->db_instance_num(); + for (int index = 0; index < db_instance_num; index++) { + pstd::CreatePath(dbsync_path_ + std::to_string(index)); + } +} + +bool DB::IsBgSaving() { + std::lock_guard ml(bgsave_protector_); + return bgsave_info_.bgsaving; +} + +/* + * Change a new db locate in new_path + * return true when change success + * db remain the old one if return false + */ +bool DB::ChangeDb(const std::string& new_path) { + std::string tmp_path(db_path_); + if (tmp_path.back() == '/') { + tmp_path.resize(tmp_path.size() - 1); + } + tmp_path += "_bak"; + pstd::DeleteDirIfExist(tmp_path); + + std::lock_guard l(dbs_rw_); + LOG(INFO) << "DB: " << db_name_ << ", Prepare change db from: " << tmp_path; + storage_.reset(); + + if (0 != pstd::RenameFile(db_path_, tmp_path)) { + LOG(WARNING) << "DB: " << db_name_ + << ", Failed to rename db path when change db, error: " << strerror(errno); + return false; + } + + if (0 != pstd::RenameFile(new_path, db_path_)) { + LOG(WARNING) << "DB: " << db_name_ + << ", Failed to rename new db path when change db, error: " << strerror(errno); + return false; + } + + storage_ = std::make_shared(g_pika_conf->db_instance_num(), + g_pika_conf->default_slot_num(), g_pika_conf->classic_mode()); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), db_path_); + assert(storage_); + assert(s.ok()); + pstd::DeleteDirIfExist(tmp_path); + LOG(INFO) << "DB: " << db_name_ << ", Change db success"; + return true; +} + +void DB::ClearBgsave() { + std::lock_guard l(bgsave_protector_); + bgsave_info_.Clear(); +} + +void DB::UpdateCacheInfo(CacheInfo& cache_info) { + std::unique_lock lock(cache_info_rwlock_); + + cache_info_.status = cache_info.status; + cache_info_.cache_num = cache_info.cache_num; + cache_info_.keys_num = cache_info.keys_num; + cache_info_.used_memory = cache_info.used_memory; + cache_info_.waitting_load_keys_num = cache_info.waitting_load_keys_num; + cache_usage_ = cache_info.used_memory; + + uint64_t all_cmds = cache_info.hits + cache_info.misses; + cache_info_.hitratio_all = (0 >= all_cmds) ? 0.0 : (cache_info.hits * 100.0) / all_cmds; + + uint64_t cur_time_us = pstd::NowMicros(); + uint64_t delta_time = cur_time_us - cache_info_.last_time_us + 1; + uint64_t delta_hits = cache_info.hits - cache_info_.hits; + cache_info_.hits_per_sec = delta_hits * 1000000 / delta_time; + + uint64_t delta_all_cmds = all_cmds - (cache_info_.hits + cache_info_.misses); + cache_info_.read_cmd_per_sec = delta_all_cmds * 1000000 / delta_time; + + cache_info_.hitratio_per_sec = (0 >= delta_all_cmds) ? 0.0 : (delta_hits * 100.0) / delta_all_cmds; + + uint64_t delta_load_keys = cache_info.async_load_keys_num - cache_info_.last_load_keys_num; + cache_info_.load_keys_per_sec = delta_load_keys * 1000000 / delta_time; + + cache_info_.hits = cache_info.hits; + cache_info_.misses = cache_info.misses; + cache_info_.last_time_us = cur_time_us; + cache_info_.last_load_keys_num = cache_info.async_load_keys_num; +} + +void DB::ResetDisplayCacheInfo(int status) { + std::unique_lock lock(cache_info_rwlock_); + cache_info_.status = status; + cache_info_.cache_num = 0; + cache_info_.keys_num = 0; + cache_info_.used_memory = 0; + cache_info_.hits = 0; + cache_info_.misses = 0; + cache_info_.hits_per_sec = 0; + cache_info_.read_cmd_per_sec = 0; + cache_info_.hitratio_per_sec = 0.0; + cache_info_.hitratio_all = 0.0; + cache_info_.load_keys_per_sec = 0; + cache_info_.waitting_load_keys_num = 0; + cache_usage_ = 0; +} diff --git a/tools/pika_migrate/src/pika_dispatch_thread.cc b/tools/pika_migrate/src/pika_dispatch_thread.cc new file mode 100644 index 0000000000..bc892e23e4 --- /dev/null +++ b/tools/pika_migrate/src/pika_dispatch_thread.cc @@ -0,0 +1,85 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_dispatch_thread.h" + +#include + +#include "include/pika_conf.h" +#include "include/pika_server.h" +#include "net/src/dispatch_thread.h" +#include "pstd/include/testutil.h" + +extern PikaServer* g_pika_server; + +PikaDispatchThread::PikaDispatchThread(std::set& ips, int port, int work_num, int cron_interval, + int queue_limit, int max_conn_rbuf_size) + : conn_factory_(max_conn_rbuf_size), handles_(this) { + thread_rep_ = net::NewDispatchThread(ips, port, work_num, &conn_factory_, cron_interval, queue_limit, &handles_); + thread_rep_->set_thread_name("Dispatcher"); +} + +PikaDispatchThread::~PikaDispatchThread() { + thread_rep_->StopThread(); + LOG(INFO) << "dispatch thread " << thread_rep_->thread_id() << " exit!!!"; + delete thread_rep_; +} + +int PikaDispatchThread::StartThread() { return thread_rep_->StartThread(); } + +uint64_t PikaDispatchThread::ThreadClientList(std::vector* clients) { + std::vector conns_info = thread_rep_->conns_info(); + if (clients) { + for (auto& info : conns_info) { + clients->push_back({ + info.fd, info.ip_port, info.last_interaction.tv_sec, nullptr /* NetConn pointer, doesn't need here */ + }); + } + } + return conns_info.size(); +} + +bool PikaDispatchThread::ClientKill(const std::string& ip_port) { return thread_rep_->KillConn(ip_port); } + +void PikaDispatchThread::ClientKillAll() { thread_rep_->KillAllConns(); } + +void PikaDispatchThread::UnAuthUserAndKillClient(const std::set& users, + const std::shared_ptr& defaultUser) { + auto dispatchThread = dynamic_cast(thread_rep_); + if (dispatchThread) { + dispatchThread->AllConn([&](const std::shared_ptr& conn) { + auto pikaClientConn = std::dynamic_pointer_cast(conn); + if (pikaClientConn && users.count(pikaClientConn->UserName())) { + pikaClientConn->UnAuth(defaultUser); + conn->SetClose(true); + } + }); + } +} + +void PikaDispatchThread::StopThread() { + thread_rep_->StopThread(); +} + +bool PikaDispatchThread::Handles::AccessHandle(std::string& ip) const { + if (ip == "127.0.0.1") { + ip = g_pika_server->host(); + } + + int client_num = pika_disptcher_->thread_rep_->conn_num(); + if ((client_num >= g_pika_conf->maxclients() + g_pika_conf->root_connection_num()) || + (client_num >= g_pika_conf->maxclients() && ip != g_pika_server->host())) { + LOG(WARNING) << "Max connections reach, Deny new comming: " << ip; + return false; + } + + DLOG(INFO) << "new client comming, ip: " << ip; + g_pika_server->incr_accumulative_connections(); + return true; +} + +void PikaDispatchThread::Handles::CronHandle() const { + pika_disptcher_->thread_rep_->set_keepalive_timeout(g_pika_conf->timeout()); +} diff --git a/tools/pika_migrate/src/pika_geo.cc b/tools/pika_migrate/src/pika_geo.cc new file mode 100644 index 0000000000..7e7575eca1 --- /dev/null +++ b/tools/pika_migrate/src/pika_geo.cc @@ -0,0 +1,589 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_geo.h" + +#include + +#include "pstd/include/pstd_string.h" + +#include "include/pika_geohash_helper.h" +#include "rocksdb/status.h" + +void GeoAddCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoAdd); + return; + } + size_t argc = argv_.size(); + if ((argc - 2) % 3 != 0) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoAdd); + return; + } + key_ = argv_[1]; + pos_.clear(); + struct GeoPoint point; + double longitude; + double latitude; + for (size_t index = 2; index < argc; index += 3) { + if (pstd::string2d(argv_[index].data(), argv_[index].size(), &longitude) == 0) { + res_.SetRes(CmdRes::kInvalidFloat); + return; + } + if (pstd::string2d(argv_[index + 1].data(), argv_[index + 1].size(), &latitude) == 0) { + res_.SetRes(CmdRes::kInvalidFloat); + return; + } + point.member = argv_[index + 2]; + point.longitude = longitude; + point.latitude = latitude; + pos_.push_back(point); + } +} + +void GeoAddCmd::Do() { + std::vector score_members; + for (const auto& geo_point : pos_) { + // Convert coordinates to geohash + GeoHashBits hash; + geohashEncodeWGS84(geo_point.longitude, geo_point.latitude, GEO_STEP_MAX, &hash); + GeoHashFix52Bits bits = geohashAlign52Bits(hash); + // Convert uint64 to double + double score; + std::string str_bits = std::to_string(bits); + pstd::string2d(str_bits.data(), str_bits.size(), &score); + score_members.push_back({score, geo_point.member}); + } + int32_t count = 0; + rocksdb::Status s = db_->storage()->ZAdd(key_, score_members, &count); + if (s.ok()) { + res_.AppendInteger(count); + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void GeoPosCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoPos); + return; + } + key_ = argv_[1]; + members_.clear(); + size_t pos = 2; + while (pos < argv_.size()) { + members_.push_back(argv_[pos++]); + } +} + +void GeoPosCmd::Do() { + double score = 0.0; + res_.AppendArrayLenUint64(members_.size()); + for (const auto& member : members_) { + rocksdb::Status s = db_->storage()->ZScore(key_, member, &score); + if (s.ok()) { + double xy[2]; + GeoHashBits hash = {.bits = static_cast(score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, xy); + + res_.AppendArrayLen(2); + char longitude[32]; + int64_t len = pstd::d2string(longitude, sizeof(longitude), xy[0]); + res_.AppendStringLen(len); + res_.AppendContent(longitude); + + char latitude[32]; + len = pstd::d2string(latitude, sizeof(latitude), xy[1]); + res_.AppendStringLen(len); + res_.AppendContent(latitude); + + } else if (s.IsNotFound()) { + res_.AppendStringLen(-1); + continue; + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + continue; + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + continue; + } + } +} + +static double length_converter(double meters, const std::string& unit) { + if (unit == "m") { + return meters; + } else if (unit == "km") { + return meters / 1000; + } else if (unit == "ft") { + return meters / 0.3048; + } else if (unit == "mi") { + return meters / 1609.34; + } else { + return -1; + } +} + +static bool check_unit(const std::string& unit) { + return unit == "m" || unit == "km" || unit == "ft" || unit == "mi"; +} + +void GeoDistCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoDist); + return; + } + if (argv_.size() < 4) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoDist); + return; + } else if (argv_.size() > 5) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + key_ = argv_[1]; + first_pos_ = argv_[2]; + second_pos_ = argv_[3]; + if (argv_.size() == 5) { + unit_ = argv_[4]; + } else { + unit_ = "m"; + } + if (!check_unit(unit_)) { + res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); + return; + } +} + +void GeoDistCmd::Do() { + double first_score = 0.0; + double second_score = 0.0; + double first_xy[2]; + double second_xy[2]; + rocksdb::Status s = db_->storage()->ZScore(key_, first_pos_, &first_score); + + if (s.ok()) { + GeoHashBits hash = {.bits = static_cast(first_score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, first_xy); + } else if (s.IsNotFound()) { + res_.AppendStringLen(-1); + return; + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + s = db_->storage()->ZScore(key_, second_pos_, &second_score); + if (s.ok()) { + GeoHashBits hash = {.bits = static_cast(second_score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, second_xy); + } else if (s.IsNotFound()) { + res_.AppendStringLen(-1); + return; + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + double distance = geohashGetDistance(first_xy[0], first_xy[1], second_xy[0], second_xy[1]); + distance = length_converter(distance, unit_); + char buf[32]; + snprintf(buf, sizeof(buf), "%.4f", distance); + res_.AppendStringLenUint64(strlen(buf)); + res_.AppendContent(buf); +} + +void GeoHashCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoHash); + return; + } + key_ = argv_[1]; + members_.clear(); + size_t pos = 2; + while (pos < argv_.size()) { + members_.push_back(argv_[pos++]); + } +} + +void GeoHashCmd::Do() { + const char* geoalphabet = "0123456789bcdefghjkmnpqrstuvwxyz"; + res_.AppendArrayLenUint64(members_.size()); + for (const auto& member : members_) { + double score = 0.0; + rocksdb::Status s = db_->storage()->ZScore(key_, member, &score); + if (s.ok()) { + double xy[2]; + GeoHashBits hash = {.bits = static_cast(score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, xy); + GeoHashRange r[2]; + GeoHashBits encode_hash; + r[0].min = -180; + r[0].max = 180; + r[1].min = -90; + r[1].max = 90; + geohashEncode(&r[0], &r[1], xy[0], xy[1], 26, &encode_hash); + + char buf[12]; + int i; + for (i = 0; i < 11; i++) { + uint64_t idx = (encode_hash.bits >> (52 - ((i + 1) * 5))) & 0x1f; + buf[i] = geoalphabet[idx]; + } + buf[11] = '\0'; + res_.AppendStringLen(11); + res_.AppendContent(buf); + continue; + } else if (s.IsNotFound()) { + res_.AppendStringLen(-1); + continue; + } else if (s.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + continue; + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + continue; + } + } +} + +static bool sort_distance_asc(const NeighborPoint& pos1, const NeighborPoint& pos2) { + return pos1.distance < pos2.distance; +} + +static bool sort_distance_desc(const NeighborPoint& pos1, const NeighborPoint& pos2) { + return pos1.distance > pos2.distance; +} + +static void GetAllNeighbors(const std::shared_ptr& db, std::string& key, GeoRange& range, CmdRes& res) { + rocksdb::Status s; + double longitude = range.longitude; + double latitude = range.latitude; + double distance = range.distance; + int count_limit = 0; + // Convert other units to meters + if (range.unit == "m") { + distance = distance; + } else if (range.unit == "km") { + distance = distance * 1000; + } else if (range.unit == "ft") { + distance = distance * 0.3048; + } else if (range.unit == "mi") { + distance = distance * 1609.34; + } else { + distance = -1; + } + // Search the zset for all matching points + GeoHashRadius georadius = geohashGetAreasByRadiusWGS84(longitude, latitude, distance); + GeoHashBits neighbors[9]; + neighbors[0] = georadius.hash; + neighbors[1] = georadius.neighbors.north; + neighbors[2] = georadius.neighbors.south; + neighbors[3] = georadius.neighbors.east; + neighbors[4] = georadius.neighbors.west; + neighbors[5] = georadius.neighbors.north_east; + neighbors[6] = georadius.neighbors.north_west; + neighbors[7] = georadius.neighbors.south_east; + neighbors[8] = georadius.neighbors.south_west; + + // For each neighbor, get all the matching + // members and add them to the potential result list. + std::vector result; + size_t last_processed = 0; + for (size_t i = 0; i < sizeof(neighbors) / sizeof(*neighbors); i++) { + GeoHashFix52Bits min; + GeoHashFix52Bits max; + if (HASHISZERO(neighbors[i])) { + continue; + } + + min = geohashAlign52Bits(neighbors[i]); + neighbors[i].bits++; + max = geohashAlign52Bits(neighbors[i]); + // When a huge Radius (in the 5000 km range or more) is used, + // adjacent neighbors can be the same, so need to remove duplicated elements + if ((last_processed != 0) && neighbors[i].bits == neighbors[last_processed].bits && + neighbors[i].step == neighbors[last_processed].step) { + continue; + } + std::vector score_members; + s = db->storage()->ZRangebyscore(key, static_cast(min), static_cast(max), true, true, &score_members); + if (!s.ok() && !s.IsNotFound()) { + if (s.IsInvalidArgument()) { + res.SetRes(CmdRes::kMultiKey); + return; + } else { + res.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } + // Insert into result only if the point is within the search area. + for (auto & score_member : score_members) { + double xy[2]; + double real_distance = 0.0; + GeoHashBits hash = {.bits = static_cast(score_member.score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, xy); + if (geohashGetDistanceIfInRadiusWGS84(longitude, latitude, xy[0], xy[1], distance, &real_distance) != 0) { + NeighborPoint item; + item.member = score_member.member; + item.score = score_member.score; + item.distance = real_distance; + result.push_back(item); + } + } + last_processed = i; + } + + // If using the count opiton + if (range.count) { + count_limit = static_cast(result.size() < range.count_limit ? result.size() : range.count_limit); + } else { + count_limit = static_cast(result.size()); + } + // If using sort option + if (range.sort != Unsort) { + if (range.sort == Asc) { + std::sort(result.begin(), result.end(), sort_distance_asc); + } else if (range.sort == Desc) { + std::sort(result.begin(), result.end(), sort_distance_desc); + } + } + + if (range.store || range.storedist) { + // Target key, create a sorted set with the results. + std::vector score_members; + for (int i = 0; i < count_limit; ++i) { + double distance = length_converter(result[i].distance, range.unit); + double score = range.store ? result[i].score : distance; + score_members.push_back({score, result[i].member}); + } + int32_t count = 0; + int32_t card = db->storage()->Exists({range.storekey}); + if (card) { + if (db->storage()->Del({range.storekey}) > 0) { + db->cache()->Del({range.storekey}); + } + } + s = db->storage()->ZAdd(range.storekey, score_members, &count); + if (!s.ok()) { + res.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } else { + s = db->cache()->ZAdd(range.storekey, score_members); + } + res.AppendInteger(count_limit); + return; + } else { + // No target key, return results to user. + + // For each the result + res.AppendArrayLen(count_limit); + for (int i = 0; i < count_limit; ++i) { + if (range.option_num != 0) { + res.AppendArrayLen(range.option_num + 1); + } + // Member + res.AppendStringLenUint64(result[i].member.size()); + res.AppendContent(result[i].member); + + // If using withdist option + if (range.withdist) { + double xy[2]; + GeoHashBits hash = {.bits = static_cast(result[i].score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, xy); + double distance = geohashGetDistance(longitude, latitude, xy[0], xy[1]); + distance = length_converter(distance, range.unit); + char buf[32]; + snprintf(buf, sizeof(buf), "%.4f", distance); + res.AppendStringLenUint64(strlen(buf)); + res.AppendContent(buf); + } + // If using withhash option + if (range.withhash) { + res.AppendInteger(static_cast(result[i].score)); + } + // If using withcoord option + if (range.withcoord) { + res.AppendArrayLen(2); + double xy[2]; + GeoHashBits hash = {.bits = static_cast(result[i].score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, xy); + + char longitude[32]; + int64_t len = pstd::d2string(longitude, sizeof(longitude), xy[0]); + res.AppendStringLen(len); + res.AppendContent(longitude); + + char latitude[32]; + len = pstd::d2string(latitude, sizeof(latitude), xy[1]); + res.AppendStringLen(len); + res.AppendContent(latitude); + } + } + } +} + +void GeoRadiusCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoRadius); + return; + } + key_ = argv_[1]; + pstd::string2d(argv_[2].data(), argv_[2].size(), &range_.longitude); + pstd::string2d(argv_[3].data(), argv_[3].size(), &range_.latitude); + pstd::string2d(argv_[4].data(), argv_[4].size(), &range_.distance); + range_.unit = argv_[5]; + if (!check_unit(range_.unit)) { + res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); + return; + } + size_t pos = 6; + range_.sort = Asc; + while (pos < argv_.size()) { + if (strcasecmp(argv_[pos].c_str(), "withdist") == 0) { + range_.withdist = true; + range_.option_num++; + } else if (strcasecmp(argv_[pos].c_str(), "withhash") == 0) { + range_.withhash = true; + range_.option_num++; + } else if (strcasecmp(argv_[pos].c_str(), "withcoord") == 0) { + range_.withcoord = true; + range_.option_num++; + } else if (strcasecmp(argv_[pos].c_str(), "count") == 0) { + range_.count = true; + if (argv_.size() < (pos + 2)) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + std::string str_count = argv_[++pos]; + for (auto s : str_count) { + if (isdigit(s) == 0) { + res_.SetRes(CmdRes::kErrOther, "value is not an integer or out of range"); + return; + } + } + range_.count_limit = std::stoi(str_count); + } else if (strcasecmp(argv_[pos].c_str(), "store") == 0) { + range_.store = true; + if (argv_.size() < (pos + 2)) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + range_.storekey = argv_[++pos]; + } else if (strcasecmp(argv_[pos].c_str(), "storedist") == 0) { + range_.storedist = true; + if (argv_.size() < (pos + 2)) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + range_.storekey = argv_[++pos]; + } else if (strcasecmp(argv_[pos].c_str(), "asc") == 0) { + range_.sort = Asc; + } else if (strcasecmp(argv_[pos].c_str(), "desc") == 0) { + range_.sort = Desc; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + pos++; + } + if (range_.store && (range_.withdist || range_.withcoord || range_.withhash)) { + res_.SetRes(CmdRes::kErrOther, + "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); + return; + } +} + +void GeoRadiusCmd::Do() { GetAllNeighbors(db_, key_, range_, this->res_); } + +void GeoRadiusByMemberCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGeoRadius); + return; + } + key_ = argv_[1]; + range_.member = argv_[2]; + pstd::string2d(argv_[3].data(), argv_[3].size(), &range_.distance); + range_.unit = argv_[4]; + if (!check_unit(range_.unit)) { + res_.SetRes(CmdRes::kErrOther, "unsupported unit provided. please use m, km, ft, mi"); + return; + } + size_t pos = 5; + while (pos < argv_.size()) { + if (strcasecmp(argv_[pos].c_str(), "withdist") == 0) { + range_.withdist = true; + range_.option_num++; + } else if (strcasecmp(argv_[pos].c_str(), "withhash") == 0) { + range_.withhash = true; + range_.option_num++; + } else if (strcasecmp(argv_[pos].c_str(), "withcoord") == 0) { + range_.withcoord = true; + range_.option_num++; + } else if (strcasecmp(argv_[pos].c_str(), "count") == 0) { + range_.count = true; + if (argv_.size() < (pos + 2)) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + std::string str_count = argv_[++pos]; + for (auto s : str_count) { + if (isdigit(s) == 0) { + res_.SetRes(CmdRes::kErrOther, "value is not an integer or out of range"); + return; + } + } + range_.count_limit = std::stoi(str_count); + } else if (strcasecmp(argv_[pos].c_str(), "store") == 0) { + range_.store = true; + if (argv_.size() < (pos + 2)) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + range_.storekey = argv_[++pos]; + } else if (strcasecmp(argv_[pos].c_str(), "storedist") == 0) { + range_.storedist = true; + if (argv_.size() < (pos + 2)) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + range_.storekey = argv_[++pos]; + } else if (strcasecmp(argv_[pos].c_str(), "asc") == 0) { + range_.sort = Asc; + } else if (strcasecmp(argv_[pos].c_str(), "desc") == 0) { + range_.sort = Desc; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + pos++; + } + if (range_.store && (range_.withdist || range_.withcoord || range_.withhash)) { + res_.SetRes(CmdRes::kErrOther, + "STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options"); + return; + } +} + +void GeoRadiusByMemberCmd::Do() { + double score = 0.0; + rocksdb::Status s = db_->storage()->ZScore(key_, range_.member, &score); + if (s.IsNotFound() && !s.ToString().compare("NotFound: Invalid member")) { + res_.SetRes(CmdRes::kErrOther, "could not decode requested zset member"); + return; + } + if (s.ok()) { + double xy[2]; + GeoHashBits hash = {.bits = static_cast(score), .step = GEO_STEP_MAX}; + geohashDecodeToLongLatWGS84(hash, xy); + range_.longitude = xy[0]; + range_.latitude = xy[1]; + } + GetAllNeighbors(db_, key_, range_, this->res_); +} diff --git a/tools/pika_migrate/src/pika_geohash.cc b/tools/pika_migrate/src/pika_geohash.cc new file mode 100644 index 0000000000..a59d0cf1cb --- /dev/null +++ b/tools/pika_migrate/src/pika_geohash.cc @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015-2016, Salvatore Sanfilippo . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "include/pika_geohash.h" + +/** + * Hashing works like this: + * Divide the world into 4 buckets. Label each one as such: + * ----------------- + * | | | + * | | | + * | 0,1 | 1,1 | + * ----------------- + * | | | + * | | | + * | 0,0 | 1,0 | + * ----------------- + */ + +/* Interleave lower bits of x and y, so the bits of x + * are in the even positions and bits from y in the odd; + * x and y must initially be less than 2**32 (65536). + * From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN + */ +static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) { + static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, + 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL}; + static const unsigned int S[] = {1, 2, 4, 8, 16}; + + uint64_t x = xlo; + uint64_t y = ylo; + + x = (x | (x << S[4])) & B[4]; + y = (y | (y << S[4])) & B[4]; + + x = (x | (x << S[3])) & B[3]; + y = (y | (y << S[3])) & B[3]; + + x = (x | (x << S[2])) & B[2]; + y = (y | (y << S[2])) & B[2]; + + x = (x | (x << S[1])) & B[1]; + y = (y | (y << S[1])) & B[1]; + + x = (x | (x << S[0])) & B[0]; + y = (y | (y << S[0])) & B[0]; + + return x | (y << 1); +} + +/* reverse the interleave process + * derived from http://stackoverflow.com/questions/4909263 + */ +static inline uint64_t deinterleave64(uint64_t interleaved) { + static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, + 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; + static const unsigned int S[] = {0, 1, 2, 4, 8, 16}; + + uint64_t x = interleaved; + uint64_t y = interleaved >> 1; + + x = (x | (x >> S[0])) & B[0]; + y = (y | (y >> S[0])) & B[0]; + + x = (x | (x >> S[1])) & B[1]; + y = (y | (y >> S[1])) & B[1]; + + x = (x | (x >> S[2])) & B[2]; + y = (y | (y >> S[2])) & B[2]; + + x = (x | (x >> S[3])) & B[3]; + y = (y | (y >> S[3])) & B[3]; + + x = (x | (x >> S[4])) & B[4]; + y = (y | (y >> S[4])) & B[4]; + + x = (x | (x >> S[5])) & B[5]; + y = (y | (y >> S[5])) & B[5]; + + return x | (y << 32); +} + +void geohashGetCoordRange(GeoHashRange* long_range, GeoHashRange* lat_range) { + /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ + /* We can't geocode at the north/south pole. */ + long_range->max = GEO_LONG_MAX; + long_range->min = GEO_LONG_MIN; + lat_range->max = GEO_LAT_MAX; + lat_range->min = GEO_LAT_MIN; +} + +int geohashEncode(const GeoHashRange* long_range, const GeoHashRange* lat_range, double longitude, double latitude, + uint8_t step, GeoHashBits* hash) { + /* Check basic arguments sanity. */ + if (!hash || step > 32 || step == 0 || RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) { + return 0; + } + + /* Return an error when trying to index outside the supported + * constraints. */ + if (longitude > 180 || longitude < -180 || latitude > 85.05112878 || latitude < -85.05112878) { + return 0; + } + + hash->bits = 0; + hash->step = step; + + if (latitude < lat_range->min || latitude > lat_range->max || longitude < long_range->min || + longitude > long_range->max) { + return 0; + } + + double lat_offset = (latitude - lat_range->min) / (lat_range->max - lat_range->min); + double long_offset = (longitude - long_range->min) / (long_range->max - long_range->min); + + /* convert to fixed point based on the step size */ + auto lat_offset_step = static_cast(lat_offset * static_cast(1ULL << step)); + auto long_offset_step = static_cast(long_offset * static_cast(1ULL << step)); + hash->bits = interleave64(lat_offset_step, long_offset_step); + return 1; +} + +int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits* hash) { + GeoHashRange r[2] = {{0}}; + geohashGetCoordRange(&r[0], &r[1]); + return geohashEncode(&r[0], &r[1], longitude, latitude, step, hash); +} + +int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits* hash) { + return geohashEncodeType(longitude, latitude, step, hash); +} + +int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, + GeoHashArea* area) { + if (HASHISZERO(hash) || nullptr == area || RANGEISZERO(lat_range) || RANGEISZERO(long_range)) { + return 0; + } + + area->hash = hash; + uint8_t step = hash.step; + uint64_t hash_sep = deinterleave64(hash.bits); /* hash = [LAT][LONG] */ + + double lat_scale = lat_range.max - lat_range.min; + double long_scale = long_range.max - long_range.min; + + uint32_t ilato = hash_sep; /* get lat part of deinterleaved hash */ + uint32_t ilono = hash_sep >> 32; /* shift over to get long part of hash */ + + /* divide by 2**step. + * Then, for 0-1 coordinate, multiply times scale and add + to the min to get the absolute coordinate. */ + area->latitude.min = lat_range.min + (ilato * 1.0 / static_cast(1ULL << step)) * lat_scale; + area->latitude.max = lat_range.min + ((ilato + 1) * 1.0 / static_cast(1ULL << step)) * lat_scale; + area->longitude.min = long_range.min + (ilono * 1.0 / static_cast(1ULL << step)) * long_scale; + area->longitude.max = long_range.min + ((ilono + 1) * 1.0 / static_cast(1ULL << step)) * long_scale; + + return 1; +} + +int geohashDecodeType(const GeoHashBits hash, GeoHashArea* area) { + GeoHashRange r[2] = {{0}}; + geohashGetCoordRange(&r[0], &r[1]); + return geohashDecode(r[0], r[1], hash, area); +} + +int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea* area) { return geohashDecodeType(hash, area); } + +int geohashDecodeAreaToLongLat(const GeoHashArea* area, double* xy) { + if (!xy) { + return 0; + } + xy[0] = (area->longitude.min + area->longitude.max) / 2; + xy[1] = (area->latitude.min + area->latitude.max) / 2; + return 1; +} + +int geohashDecodeToLongLatType(const GeoHashBits hash, double* xy) { + GeoHashArea area = {{0}}; + if (!xy || !(geohashDecodeType(hash, &area))) { + return 0; + } + return geohashDecodeAreaToLongLat(&area, xy); +} + +int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double* xy) { return geohashDecodeToLongLatType(hash, xy); } + +static void geohash_move_x(GeoHashBits* hash, int8_t d) { + if (d == 0) { + return; + } + + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; + uint64_t y = hash->bits & 0x5555555555555555ULL; + + uint64_t zz = 0x5555555555555555ULL >> (64 - hash->step * 2); + + if (d > 0) { + x = x + (zz + 1); + } else { + x = x | zz; + x = x - (zz + 1); + } + + x &= (0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2)); + hash->bits = (x | y); +} + +static void geohash_move_y(GeoHashBits* hash, int8_t d) { + if (d == 0) { + return; + } + + uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; + uint64_t y = hash->bits & 0x5555555555555555ULL; + + uint64_t zz = 0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2); + if (d > 0) { + y = y + (zz + 1); + } else { + y = y | zz; + y = y - (zz + 1); + } + y &= (0x5555555555555555ULL >> (64 - hash->step * 2)); + hash->bits = (x | y); +} + +void geohashNeighbors(const GeoHashBits* hash, GeoHashNeighbors* neighbors) { + neighbors->east = *hash; + neighbors->west = *hash; + neighbors->north = *hash; + neighbors->south = *hash; + neighbors->south_east = *hash; + neighbors->south_west = *hash; + neighbors->north_east = *hash; + neighbors->north_west = *hash; + + geohash_move_x(&neighbors->east, 1); + geohash_move_y(&neighbors->east, 0); + + geohash_move_x(&neighbors->west, -1); + geohash_move_y(&neighbors->west, 0); + + geohash_move_x(&neighbors->south, 0); + geohash_move_y(&neighbors->south, -1); + + geohash_move_x(&neighbors->north, 0); + geohash_move_y(&neighbors->north, 1); + + geohash_move_x(&neighbors->north_west, -1); + geohash_move_y(&neighbors->north_west, 1); + + geohash_move_x(&neighbors->north_east, 1); + geohash_move_y(&neighbors->north_east, 1); + + geohash_move_x(&neighbors->south_east, 1); + geohash_move_y(&neighbors->south_east, -1); + + geohash_move_x(&neighbors->south_west, -1); + geohash_move_y(&neighbors->south_west, -1); +} diff --git a/tools/pika_migrate/src/pika_geohash_helper.cc b/tools/pika_migrate/src/pika_geohash_helper.cc new file mode 100644 index 0000000000..bc671de7dc --- /dev/null +++ b/tools/pika_migrate/src/pika_geohash_helper.cc @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2013-2014, yinqiwen + * Copyright (c) 2014, Matt Stancliff . + * Copyright (c) 2015-2016, Salvatore Sanfilippo . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* This is a C++ to C conversion from the ardb project. + * This file started out as: + * https://github.com/yinqiwen/ardb/blob/d42503/src/geo/geohash_helper.cpp + */ + +// #include "fmacros.h" +#include "include/pika_geohash_helper.h" +// #include "debugmacro.h" +#include +#define D_R (M_PI / 180.0) +#define R_MAJOR 6378137.0 +#define R_MINOR 6356752.3142 +#define RATIO (R_MINOR / R_MAJOR) +#define ECCENT (sqrt(1.0 - (RATIO * RATIO))) +#define COM (0.5 * ECCENT) + +/// @brief The usual PI/180 constant +const double DEG_TO_RAD = 0.017453292519943295769236907684886; +/// @brief Earth's quatratic mean radius for WGS-84 +const double EARTH_RADIUS_IN_METERS = 6372797.560856; + +const double MERCATOR_MAX = 20037726.37; +const double MERCATOR_MIN = -20037726.37; + +static inline double deg_rad(double ang) { return ang * D_R; } +static inline double rad_deg(double ang) { return ang / D_R; } + +/* This function is used in order to estimate the step (bits precision) + * of the 9 search area boxes during radius queries. */ +uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { + if (range_meters == 0) { + return 26; + } + int step = 1; + while (range_meters < MERCATOR_MAX) { + range_meters *= 2; + step++; + } + step -= 2; /* Make sure range is included in most of the base cases. */ + + /* Wider range torwards the poles... Note: it is possible to do better + * than this approximation by computing the distance between meridians + * at this latitude, but this does the trick for now. */ + if (lat > 66 || lat < -66) { + step--; + if (lat > 80 || lat < -80) { + step--; + } + } + /* Frame to valid range. */ + if (step < 1) { + step = 1; + } + if (step > 26) { + step = 26; + } + return step; +} + +/* Return the bounding box of the search area centered at latitude,longitude + * having a radius of radius_meter. bounds[0] - bounds[2] is the minimum + * and maxium longitude, while bounds[1] - bounds[3] is the minimum and + * maximum latitude. + * + * This function does not behave correctly with very large radius values, for + * instance for the coordinates 81.634948934258375 30.561509253718668 and a + * radius of 7083 kilometers, it reports as bounding boxes: + * + * min_lon 7.680495, min_lat -33.119473, max_lon 155.589402, max_lat 94.242491 + * + * However, for instance, a min_lon of 7.680495 is not correct, because the + * point -1.27579540014266968 61.33421815228281559 is at less than 7000 + * kilometers away. + * + * Since this function is currently only used as an optimization, the + * optimization is not used for very big radiuses, however the function + * should be fixed. */ +int geohashBoundingBox(double longitude, double latitude, double radius_meters, double* bounds) { + if (!bounds) { + return 0; + } + double height = radius_meters; + double width = radius_meters; + + const double lat_delta = rad_deg(height/EARTH_RADIUS_IN_METERS); + const double long_delta_top = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude+lat_delta))); + const double long_delta_bottom = rad_deg(width/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude-lat_delta))); + + int southern_hemisphere = latitude < 0 ? 1 : 0; + bounds[0] = southern_hemisphere ? longitude-long_delta_bottom : longitude-long_delta_top; + bounds[2] = southern_hemisphere ? longitude+long_delta_bottom : longitude+long_delta_top; + bounds[1] = latitude - lat_delta; + bounds[3] = latitude + lat_delta; + + return 1; +} + +/* Return a set of areas (center + 8) that are able to cover a range query + * for the specified position and radius. */ +GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters) { + GeoHashRange long_range; + GeoHashRange lat_range; + GeoHashRadius radius; + GeoHashBits hash; + GeoHashNeighbors neighbors; + GeoHashArea area; + double min_lon; + double max_lon; + double min_lat; + double max_lat; + double bounds[4]; + int steps; + + geohashBoundingBox(longitude, latitude, radius_meters, bounds); + min_lon = bounds[0]; + min_lat = bounds[1]; + max_lon = bounds[2]; + max_lat = bounds[3]; + steps = geohashEstimateStepsByRadius(radius_meters, latitude); + + geohashGetCoordRange(&long_range, &lat_range); + geohashEncode(&long_range, &lat_range, longitude, latitude, steps, &hash); + geohashNeighbors(&hash, &neighbors); + geohashDecode(long_range, lat_range, hash, &area); + /* Check if the step is enough at the limits of the covered area. + * Sometimes when the search area is near an edge of the + * area, the estimated step is not small enough, since one of the + * north / south / west / east square is too near to the search area + * to cover everything. */ + int decrease_step = 0; + { + GeoHashArea north; + GeoHashArea south; + GeoHashArea east; + GeoHashArea west; + + geohashDecode(long_range, lat_range, neighbors.north, &north); + geohashDecode(long_range, lat_range, neighbors.south, &south); + geohashDecode(long_range, lat_range, neighbors.east, &east); + geohashDecode(long_range, lat_range, neighbors.west, &west); + + if (north.latitude.max < max_lat) { + decrease_step = 1; + } + if (south.latitude.min > min_lat) { + decrease_step = 1; + } + if (east.longitude.max < max_lon) { + decrease_step = 1; + } + if (west.longitude.min > min_lon) { + decrease_step = 1; + } + } + if (steps > 1 && (decrease_step != 0)) { + steps--; + geohashEncode(&long_range, &lat_range, longitude, latitude, steps, &hash); + geohashNeighbors(&hash, &neighbors); + geohashDecode(long_range, lat_range, hash, &area); + } + + /* Exclude the search areas that are useless. */ + if (steps >= 2) { + if (area.latitude.min < min_lat) { + GZERO(neighbors.south); + GZERO(neighbors.south_west); + GZERO(neighbors.south_east); + } + if (area.latitude.max > max_lat) { + GZERO(neighbors.north); + GZERO(neighbors.north_east); + GZERO(neighbors.north_west); + } + if (area.longitude.min < min_lon) { + GZERO(neighbors.west); + GZERO(neighbors.south_west); + GZERO(neighbors.north_west); + } + if (area.longitude.max > max_lon) { + GZERO(neighbors.east); + GZERO(neighbors.south_east); + GZERO(neighbors.north_east); + } + } + radius.hash = hash; + radius.neighbors = neighbors; + radius.area = area; + return radius; +} + +GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters) { + return geohashGetAreasByRadius(longitude, latitude, radius_meters); +} + +GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits& hash) { + uint64_t bits = hash.bits; + bits <<= (52 - hash.step * 2); + return bits; +} +/* Calculate distance using simplified haversine great circle distance formula. + * Given longitude diff is 0 the asin(sqrt(a)) on the haversine is asin(sin(abs(u))). + * arcsin(sin(x)) equal to x when x ∈[−𝜋/2,𝜋/2]. Given latitude is between [−𝜋/2,𝜋/2] + * we can simplify arcsin(sin(x)) to x. + */ +double geohashGetLatDistance(double lat1d, double lat2d) { + return EARTH_RADIUS_IN_METERS * fabs(deg_rad(lat2d) - deg_rad(lat1d)); +} +/* Calculate distance using haversine great circle distance formula. */ +double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) { + double lat1r, lon1r, lat2r, lon2r, u, v, a; + lon1r = deg_rad(lon1d); + lon2r = deg_rad(lon2d); + v = sin((lon2r - lon1r) / 2); + /* if v == 0 we can avoid doing expensive math when lons are practically the same */ + if (v == 0.0) + return geohashGetLatDistance(lat1d, lat2d); + lat1r = deg_rad(lat1d); + lat2r = deg_rad(lat2d); + u = sin((lat2r - lat1r) / 2); + a = u * u + cos(lat1r) * cos(lat2r) * v * v; + return 2.0 * EARTH_RADIUS_IN_METERS * asin(sqrt(a)); +} + +int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double* distance) { + *distance = geohashGetDistance(x1, y1, x2, y2); + if (*distance > radius) { + return 0; + } + return 1; +} + +int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double* distance) { + return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance); +} diff --git a/tools/pika_migrate/src/pika_hash.cc b/tools/pika_migrate/src/pika_hash.cc new file mode 100644 index 0000000000..d1b7498bcb --- /dev/null +++ b/tools/pika_migrate/src/pika_hash.cc @@ -0,0 +1,892 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_hash.h" + +#include "pstd/include/pstd_string.h" + +#include "include/pika_conf.h" +#include "include/pika_slot_command.h" +#include "include/pika_cache.h" + +extern std::unique_ptr g_pika_conf; + +void HDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHDel); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + iter++; + fields_.assign(iter, argv_.end()); +} + +void HDelCmd::Do() { + s_ = db_->storage()->HDel(key_, fields_, &deleted_); + + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(deleted_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HDelCmd::DoThroughDB() { + Do(); +} + +void HDelCmd::DoUpdateCache() { + if (s_.ok() && deleted_ > 0) { + db_->cache()->HDel(key_, fields_); + } +} + +void HSetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHSet); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; + value_ = argv_[3]; +} + +void HSetCmd::Do() { + int32_t ret = 0; + s_ = db_->storage()->HSet(key_, field_, value_, &ret); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(ret)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HSetCmd::DoThroughDB() { + Do(); +} + +void HSetCmd::DoUpdateCache() { + // HSetIfKeyExist() can void storing large key, but IsTooLargeKey() can speed up it + if (IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + db_->cache()->HSetIfKeyExist(key_, field_, value_); + } +} + +void HGetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHGet); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; +} + +void HGetCmd::Do() { + std::string value; + s_ = db_->storage()->HGet(key_, field_, &value); + if (s_.ok()) { + res_.AppendStringLenUint64(value.size()); + res_.AppendContent(value); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HGetCmd::ReadCache() { + std::string value; + auto s = db_->cache()->HGet(key_, field_, &value); + if (s.ok()) { + res_.AppendStringLen(value.size()); + res_.AppendContent(value); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void HGetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HGetCmd::DoUpdateCache() { + if (IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HGetallCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHGetall); + return; + } + key_ = argv_[1]; +} + +void HGetallCmd::Do() { + int64_t total_fv = 0; + int64_t cursor = 0; + int64_t next_cursor = 0; + size_t raw_limit = g_pika_conf->max_client_response_size(); + std::string raw; + std::vector fvs; + + do { + fvs.clear(); + s_ = db_->storage()->HScan(key_, cursor, "*", PIKA_SCAN_STEP_LENGTH, &fvs, &next_cursor); + if (!s_.ok()) { + raw.clear(); + total_fv = 0; + break; + } else { + for (const auto& fv : fvs) { + RedisAppendLenUint64(raw, fv.field.size(), "$"); + RedisAppendContent(raw, fv.field); + RedisAppendLenUint64(raw, fv.value.size(), "$"); + RedisAppendContent(raw, fv.value); + } + if (raw.size() >= raw_limit) { + res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); + return; + } + total_fv += static_cast(fvs.size()); + cursor = next_cursor; + } + } while (cursor != 0); + + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLen(total_fv * 2); + res_.AppendStringRaw(raw); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HGetallCmd::ReadCache() { + std::vector fvs; + auto s = db_->cache()->HGetall(key_, &fvs); + if (s.ok()) { + res_.AppendArrayLen(fvs.size() * 2); + for (const auto& fv : fvs) { + res_.AppendStringLen(fv.field.size()); + res_.AppendContent(fv.field); + res_.AppendStringLen(fv.value.size()); + res_.AppendContent(fv.value); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void HGetallCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HGetallCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HExistsCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHExists); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; +} + +void HExistsCmd::Do() { + s_ = db_->storage()->HExists(key_, field_); + if (s_.ok()) { + res_.AppendContent(":1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendContent(":0"); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HExistsCmd::ReadCache() { + auto s = db_->cache()->HExists(key_, field_); + if (s.ok()) { + res_.AppendContent(":1"); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void HExistsCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HExistsCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HIncrbyCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHIncrby); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; + if (argv_[3].find(' ') != std::string::npos || (pstd::string2int(argv_[3].data(), argv_[3].size(), &by_) == 0)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void HIncrbyCmd::Do() { + int64_t new_value = 0; + s_ = db_->storage()->HIncrby(key_, field_, by_, &new_value); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendContent(":" + std::to_string(new_value)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: hash value is not an integer") { + res_.SetRes(CmdRes::kInvalidInt); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HIncrbyCmd::DoThroughDB() { + Do(); +} + +void HIncrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->HIncrbyxx(key_, field_, by_); + } +} + +void HIncrbyfloatCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHIncrbyfloat); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; + by_ = argv_[3]; +} + +void HIncrbyfloatCmd::Do() { + std::string new_value; + s_ = db_->storage()->HIncrbyfloat(key_, field_, by_, &new_value); + if (s_.ok()) { + res_.AppendStringLenUint64(new_value.size()); + res_.AppendContent(new_value); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: value is not a vaild float") { + res_.SetRes(CmdRes::kInvalidFloat); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HIncrbyfloatCmd::DoThroughDB() { + Do(); +} + +void HIncrbyfloatCmd::DoUpdateCache() { + if (s_.ok()) { + long double long_double_by; + if (storage::StrToLongDouble(by_.data(), by_.size(), &long_double_by) != -1) { + db_->cache()->HIncrbyfloatxx(key_, field_, long_double_by); + } + } +} + +void HKeysCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHKeys); + return; + } + key_ = argv_[1]; +} + +void HKeysCmd::Do() { + std::vector fields; + s_ = db_->storage()->HKeys(key_, &fields); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(fields.size()); + for (const auto& field : fields) { + res_.AppendString(field); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HKeysCmd::ReadCache() { + std::vector fields; + auto s = db_->cache()->HKeys(key_, &fields); + if (s.ok()) { + res_.AppendArrayLen(fields.size()); + for (const auto& field : fields) { + res_.AppendString(field); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void HKeysCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HKeysCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HLenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHLen); + return; + } + key_ = argv_[1]; +} + +void HLenCmd::Do() { + int32_t len = 0; + s_ = db_->storage()->HLen(key_, &len); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hlen"); + } +} + +void HLenCmd::ReadCache() { + uint64_t len = 0; + auto s = db_->cache()->HLen(key_, &len); + if (s.ok()) { + res_.AppendInteger(len); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hlen"); + } +} + +void HLenCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HLenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HMgetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHMget); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + iter++; + fields_.assign(iter, argv_.end()); +} + +void HMgetCmd::Do() { + std::vector vss; + s_ = db_->storage()->HMGet(key_, fields_, &vss); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(vss.size()); + for (const auto& vs : vss) { + if (vs.status.ok()) { + res_.AppendStringLenUint64(vs.value.size()); + res_.AppendContent(vs.value); + } else { + res_.AppendContent("$-1"); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HMgetCmd::ReadCache() { + std::vector vss; + auto s = db_->cache()->HMGet(key_, fields_, &vss); + if (s.ok()) { + res_.AppendArrayLen(vss.size()); + for (const auto& vs : vss) { + if (vs.status.ok()) { + res_.AppendStringLen(vs.value.size()); + res_.AppendContent(vs.value); + } else { + res_.AppendContent("$-1"); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void HMgetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HMgetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HMsetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHMset); + return; + } + key_ = argv_[1]; + size_t argc = argv_.size(); + if (argc % 2 != 0) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHMset); + return; + } + size_t index = 2; + fvs_.clear(); + for (; index < argc; index += 2) { + fvs_.push_back({argv_[index], argv_[index + 1]}); + } +} + +void HMsetCmd::Do() { + s_ = db_->storage()->HMSet(key_, fvs_); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HMsetCmd::DoThroughDB() { + Do(); +} + +void HMsetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->HMSetxx(key_, fvs_); + } +} + +void HSetnxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHSetnx); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; + value_ = argv_[3]; +} + +void HSetnxCmd::Do() { + int32_t ret = 0; + s_ = db_->storage()->HSetnx(key_, field_, value_, &ret); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(ret)); + AddSlotKey("h", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HSetnxCmd::DoThroughDB() { + Do(); +} + +void HSetnxCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->HSetIfKeyExistAndFieldNotExist(key_, field_, value_); + } +} + +void HStrlenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHStrlen); + return; + } + key_ = argv_[1]; + field_ = argv_[2]; +} + +void HStrlenCmd::Do() { + int32_t len = 0; + s_ = db_->storage()->HStrlen(key_, field_, &len); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hstrlen"); + } +} + +void HStrlenCmd::ReadCache() { + uint64_t len = 0; + auto s = db_->cache()->HStrlen(key_, field_, &len); + if (s.ok()) { + res_.AppendInteger(len); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, "something wrong in hstrlen"); + } + return; +} + +void HStrlenCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HStrlenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HValsCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHVals); + return; + } + key_ = argv_[1]; +} + +void HValsCmd::Do() { + std::vector values; + s_ = db_->storage()->HVals(key_, &values); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(values.size()); + for (const auto& value : values) { + res_.AppendStringLenUint64(value.size()); + res_.AppendContent(value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void HValsCmd::ReadCache() { + std::vector values; + auto s = db_->cache()->HVals(key_, &values); + if (s.ok()) { + res_.AppendArrayLen(values.size()); + for (const auto& value : values) { + res_.AppendStringLen(value.size()); + res_.AppendContent(value); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void HValsCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void HValsCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_HASH, key_, db_); + } +} + +void HScanCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHScan); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + size_t index = 3; + size_t argc = argv_.size(); + + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void HScanCmd::Do() { + int64_t next_cursor = 0; + std::vector field_values; + auto s = db_->storage()->HScan(key_, cursor_, pattern_, count_, &field_values, &next_cursor); + + if (s.ok() || s.IsNotFound()) { + res_.AppendContent("*2"); + char buf[32]; + int32_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(field_values.size() * 2); + for (const auto& field_value : field_values) { + res_.AppendString(field_value.field); + res_.AppendString(field_value.value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void HScanxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameHScan); + return; + } + key_ = argv_[1]; + start_field_ = argv_[2]; + + size_t index = 3; + size_t argc = argv_.size(); + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void HScanxCmd::Do() { + std::string next_field; + std::vector field_values; + rocksdb::Status s = db_->storage()->HScanx(key_, start_field_, pattern_, count_, &field_values, &next_field); + + if (s.ok() || s.IsNotFound()) { + res_.AppendArrayLen(2); + res_.AppendStringLenUint64(next_field.size()); + res_.AppendContent(next_field); + + res_.AppendArrayLenUint64(2 * field_values.size()); + for (const auto& field_value : field_values) { + res_.AppendString(field_value.field); + res_.AppendString(field_value.value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKHScanRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHScanRange); + return; + } + key_ = argv_[1]; + field_start_ = argv_[2]; + field_end_ = argv_[3]; + + size_t index = 4; + size_t argc = argv_.size(); + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void PKHScanRangeCmd::Do() { + std::string next_field; + std::vector field_values; + rocksdb::Status s = + db_->storage()->PKHScanRange(key_, field_start_, field_end_, pattern_, static_cast(limit_), &field_values, &next_field); + + if (s.ok() || s.IsNotFound()) { + res_.AppendArrayLen(2); + res_.AppendString(next_field); + + res_.AppendArrayLenUint64(2 * field_values.size()); + for (const auto& field_value : field_values) { + res_.AppendString(field_value.field); + res_.AppendString(field_value.value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void PKHRScanRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKHRScanRange); + return; + } + key_ = argv_[1]; + field_start_ = argv_[2]; + field_end_ = argv_[3]; + + size_t index = 4; + size_t argc = argv_.size(); + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void PKHRScanRangeCmd::Do() { + std::string next_field; + std::vector field_values; + rocksdb::Status s = + db_->storage()->PKHRScanRange(key_, field_start_, field_end_, pattern_, static_cast(limit_), &field_values, &next_field); + + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLen(2); + res_.AppendString(next_field); + + res_.AppendArrayLenUint64(2 * field_values.size()); + for (const auto& field_value : field_values) { + res_.AppendString(field_value.field); + res_.AppendString(field_value.value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} diff --git a/tools/pika_migrate/src/pika_hyperloglog.cc b/tools/pika_migrate/src/pika_hyperloglog.cc new file mode 100644 index 0000000000..5b333934cc --- /dev/null +++ b/tools/pika_migrate/src/pika_hyperloglog.cc @@ -0,0 +1,91 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_hyperloglog.h" + +void PfAddCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePfAdd); + return; + } + if (argv_.size() > 1) { + key_ = argv_[1]; + size_t pos = 2; + while (pos < argv_.size()) { + values_.push_back(argv_[pos++]); + } + } +} + +void PfAddCmd::Do() { + bool update = false; + rocksdb::Status s = db_->storage()->PfAdd(key_, values_, &update); + if (s.ok() && update) { + res_.AppendInteger(1); + } else if (s.ok() && !update) { + res_.AppendInteger(0); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void PfCountCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePfCount); + return; + } + size_t pos = 1; + while (pos < argv_.size()) { + keys_.push_back(argv_[pos++]); + } +} + +void PfCountCmd::Do() { + int64_t value_ = 0; + rocksdb::Status s = db_->storage()->PfCount(keys_, &value_); + if (s.ok()) { + res_.AppendInteger(value_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void PfMergeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePfMerge); + return; + } + size_t pos = 1; + while (pos < argv_.size()) { + keys_.push_back(argv_[pos++]); + } +} + +void PfMergeCmd::Do() { + rocksdb::Status s = db_->storage()->PfMerge(keys_, value_to_dest_); + if (s.ok()) { + res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} +void PfMergeCmd::DoBinlog() { + PikaCmdArgsType set_args; + //used "set" instead of "SET" to distinguish the binlog of SetCmd + set_args.emplace_back("set"); + set_args.emplace_back(keys_[0]); + set_args.emplace_back(value_to_dest_); + set_cmd_->Initial(set_args, db_name_); + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + //value of this binlog might be strange, it's an string with size of 128KB + set_cmd_->DoBinlog(); +} diff --git a/tools/pika_migrate/src/pika_inner_message.proto b/tools/pika_migrate/src/pika_inner_message.proto new file mode 100644 index 0000000000..9e2a3ef04c --- /dev/null +++ b/tools/pika_migrate/src/pika_inner_message.proto @@ -0,0 +1,166 @@ +syntax = "proto2"; +package InnerMessage; + +enum Type { + kMetaSync = 1; + kTrySync = 2; + kDBSync = 3; + kBinlogSync = 4; + kHeatBeat = 5; + kRemoveSlaveNode = 6; +} + +enum StatusCode { + kOk = 1; + kError = 2; + kOther = 3; +} + +message BinlogOffset { + required uint32 filenum = 1; + required uint64 offset = 2; + // consensus use + optional uint32 term = 3; + optional uint64 index = 4; +} + +message Node { + required string ip = 1; + required int32 port = 2; +} + +message Slot { + required string db_name = 1; + required uint32 slot_id = 2; +} + +message DBInfo { + required string db_name = 1; + required uint32 slot_num = 2; + repeated uint32 slot_ids = 3; +} + +message PikaMeta { + repeated DBInfo db_infos = 1; +} + +message ConsensusMeta { + optional uint32 term = 1; + // Leader -> Follower prev_log_offset + // Follower -> Leader last_log_offset + optional BinlogOffset log_offset = 2; + optional BinlogOffset commit = 3; + optional bool reject = 4; + repeated BinlogOffset hint = 5; +} + +// Request message +message InnerRequest { + // slave to master + message MetaSync { + required Node node = 1; + optional string auth = 2; + } + + // slave to master + message TrySync { + required Node node = 1; + required Slot slot = 2; + required BinlogOffset binlog_offset = 3; + } + + // slave to master + message DBSync { + required Node node = 1; + required Slot slot = 2; + required BinlogOffset binlog_offset = 3; + } + + message BinlogSync { + required Node node = 1; + required string db_name = 2; + required uint32 slot_id = 3; + required BinlogOffset ack_range_start = 4; + required BinlogOffset ack_range_end = 5; + required int32 session_id = 6; + required bool first_send = 7; + } + + message RemoveSlaveNode { + required Node node = 1; + required Slot slot = 2; + } + + required Type type = 1; + optional MetaSync meta_sync = 2; + optional TrySync try_sync = 3; + optional DBSync db_sync = 4; + optional BinlogSync binlog_sync = 5; + repeated RemoveSlaveNode remove_slave_node = 6; + optional ConsensusMeta consensus_meta = 7; +} + +message SlotInfo { + required uint32 slot_id = 1; + required Node master = 2; + repeated Node slaves = 3; +} + +// Response message +message InnerResponse { + // master to slave + message MetaSync { + message DBInfo { + required string db_name = 1; + required int32 slot_num = 2; + required int32 db_instance_num = 3; + } + required bool classic_mode = 1; + repeated DBInfo dbs_info = 2; + required string run_id = 3; + optional string replication_id = 4; + } + + // master to slave + message TrySync { + enum ReplyCode { + kOk = 1; + kSyncPointBePurged = 2; + kSyncPointLarger = 3; + kError = 4; + } + required ReplyCode reply_code = 1; + required Slot slot = 2; + optional BinlogOffset binlog_offset = 3; + optional int32 session_id = 4; + } + + message DBSync { + required Slot slot = 1; + required int32 session_id = 2; + } + + // master to slave + message BinlogSync { + required Slot slot = 1; + required BinlogOffset binlog_offset = 2; + required bytes binlog = 3; + required int32 session_id = 4; + } + + message RemoveSlaveNode { + required Node node = 1; + required Slot slot = 2; + } + + required Type type = 1; + required StatusCode code = 2; + optional string reply = 3; + optional MetaSync meta_sync = 4; + optional DBSync db_sync = 5; + optional TrySync try_sync = 6; + repeated BinlogSync binlog_sync = 7; + repeated RemoveSlaveNode remove_slave_node = 8; + // consensus use + optional ConsensusMeta consensus_meta = 9; +} diff --git a/tools/pika_migrate/src/pika_instant.cc b/tools/pika_migrate/src/pika_instant.cc new file mode 100644 index 0000000000..b2e33287fb --- /dev/null +++ b/tools/pika_migrate/src/pika_instant.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include "../include/pika_instant.h" + +/* Return the mean of all the samples. */ +double Instant::getInstantaneousMetric(std::string metric) { + size_t j; + size_t sum = 0; + + for (j = 0; j < STATS_METRIC_SAMPLES; j++) + sum += inst_metrics_[metric].samples[j]; + + return sum / STATS_METRIC_SAMPLES; +} + +/* ======================= Cron: called every 5 s ======================== */ + +/* Add a sample to the instantaneous metric. This function computes the quotient + * of the increment of value and base, which is useful to record operation count + * per second, or the average time consumption of an operation. + * + * current_value - The dividend + * current_base - The divisor + * */ +void Instant::trackInstantaneousMetric(std::string metric, size_t current_value, size_t current_base, size_t factor) { + if (inst_metrics_[metric].last_sample_base > 0) { + size_t base = current_base - inst_metrics_[metric].last_sample_base; + size_t value = current_value - inst_metrics_[metric].last_sample_value; + size_t avg = base > 0 ? (value * factor / base) : 0; + inst_metrics_[metric].samples[inst_metrics_[metric].idx] = avg; + inst_metrics_[metric].idx++; + inst_metrics_[metric].idx %= STATS_METRIC_SAMPLES; + } + inst_metrics_[metric].last_sample_base = current_base; + inst_metrics_[metric].last_sample_value = current_value; +} \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_kv.cc b/tools/pika_migrate/src/pika_kv.cc new file mode 100644 index 0000000000..4c9c459184 --- /dev/null +++ b/tools/pika_migrate/src/pika_kv.cc @@ -0,0 +1,1968 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_kv.h" +#include + +#include "include/pika_command.h" +#include "include/pika_slot_command.h" +#include "include/pika_cache.h" +#include "include/pika_conf.h" +#include "pstd/include/pstd_string.h" + +extern std::unique_ptr g_pika_conf; +/* SET key value [NX] [XX] [EX ] [PX ] */ +void SetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSet); + return; + } + key_ = argv_[1]; + value_ = argv_[2]; + condition_ = SetCmd::kNONE; + ttl_millsec = 0; + size_t index = 3; + while (index != argv_.size()) { + std::string opt = argv_[index]; + if (strcasecmp(opt.data(), "xx") == 0) { + condition_ = SetCmd::kXX; + } else if (strcasecmp(opt.data(), "nx") == 0) { + condition_ = SetCmd::kNX; + } else if (strcasecmp(opt.data(), "vx") == 0) { + condition_ = SetCmd::kVX; + index++; + if (index == argv_.size()) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } else { + target_ = argv_[index]; + } + } else if ((strcasecmp(opt.data(), "ex") == 0) || (strcasecmp(opt.data(), "px") == 0)) { + condition_ = (condition_ == SetCmd::kNONE) ? SetCmd::kEXORPX : condition_; + index++; + if (index == argv_.size()) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (pstd::string2int(argv_[index].data(), argv_[index].size(), &ttl_millsec) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if (strcasecmp(opt.data(), "ex") == 0) { + ttl_millsec *= 1000; + } + has_ttl_ = true; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void SetCmd::Do() { + int32_t res = 1; + switch (condition_) { + case SetCmd::kXX: + s_ = db_->storage()->Setxx(key_, value_, &res, ttl_millsec); + break; + case SetCmd::kNX: + s_ = db_->storage()->Setnx(key_, value_, &res, ttl_millsec); + break; + case SetCmd::kVX: + s_ = db_->storage()->Setvx(key_, target_, value_, &success_, ttl_millsec); + break; + case SetCmd::kEXORPX: + s_ = db_->storage()->Setex(key_, value_, ttl_millsec); + break; + default: + s_ = db_->storage()->Set(key_, value_); + break; + } + + if (s_.ok() || s_.IsNotFound()) { + if (condition_ == SetCmd::kVX) { + res_.AppendInteger(success_); + } else { + if (res == 1) { + res_.SetRes(CmdRes::kOk); + AddSlotKey("k", key_, db_); + } else { + res_.AppendStringLen(-1); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SetCmd::DoThroughDB() { + Do(); +} + +void SetCmd::DoUpdateCache() { + if (SetCmd::kNX == condition_ || IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + if (has_ttl_) { + db_->cache()->Setxx(key_, value_, ttl_millsec / 1000); + } else { + db_->cache()->SetxxWithoutTTL(key_, value_); + } + } +} + +std::string SetCmd::ToRedisProtocol() { + if (condition_ == SetCmd::kEXORPX) { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + + // TODO 精度损失 + auto time_stamp = time(nullptr) + ttl_millsec / 1000; + pstd::ll2string(buf, 100, time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + RedisAppendLenUint64(content, value_.size(), "$"); + RedisAppendContent(content, value_); + return content; + } else { + return Cmd::ToRedisProtocol(); + } +} + +void GetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGet); + return; + } + key_ = argv_[1]; +} + +void GetCmd::Do() { + s_ = db_->storage()->GetWithTTL(key_, &value_, &ttl_millsec_); + if (s_.ok()) { + res_.AppendStringLenUint64(value_.size()); + res_.AppendContent(value_); + } else if (s_.IsNotFound()) { + res_.AppendStringLen(-1); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetCmd::ReadCache() { + auto s = db_->cache()->Get(key_, &value_); + if (s.ok()) { + res_.AppendStringLen(value_.size()); + res_.AppendContent(value_); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void GetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void GetCmd::DoUpdateCache() { + if (IsTooLargeKey(g_pika_conf->max_key_size_in_cache())) { + return; + } + if (s_.ok()) { + db_->cache()->WriteKVToCache(key_, value_, ttl_millsec_ / 1000); + } +} + +void DelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } + auto iter = argv_.begin(); + keys_.assign(++iter, argv_.end()); +} + +void DelCmd::Do() { + int64_t count = db_->storage()->Del(keys_); + if (count >= 0) { + res_.AppendInteger(count); + s_ = rocksdb::Status::OK(); + std::vector::const_iterator it; + for (it = keys_.begin(); it != keys_.end(); it++) { + RemSlotKey(*it, db_); + } + } else { + res_.SetRes(CmdRes::kErrOther, "delete error"); + s_ = rocksdb::Status::Corruption("delete error"); + } +} + +void DelCmd::DoThroughDB() { + Do(); +} + +void DelCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Del(keys_); + } +} + +void DelCmd::Split(const HintKeys& hint_keys) { + std::map type_status; + int64_t count = db_->storage()->Del(hint_keys.keys); + if (count >= 0) { + split_res_ += count; + } else { + res_.SetRes(CmdRes::kErrOther, "delete error"); + } +} + +void DelCmd::Merge() { res_.AppendInteger(split_res_); } + +void DelCmd::DoBinlog() { + std::string opt = argv_.at(0); + for(auto& key: keys_) { + argv_.clear(); + argv_.emplace_back(opt); + argv_.emplace_back(key); + Cmd::DoBinlog(); + } +} + +void IncrCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameIncr); + return; + } + key_ = argv_[1]; +} + +void IncrCmd::Do() { + s_ = db_->storage()->Incrby(key_, 1, &new_value_, &expired_timestamp_millsec_); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(new_value_)); + AddSlotKey("k", key_, db_); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { + res_.SetRes(CmdRes::kInvalidInt); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void IncrCmd::DoThroughDB() { + Do(); +} + +void IncrCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Incrxx(key_); + } +} + +std::string IncrCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + std::string new_value_str = std::to_string(new_value_); + RedisAppendLenUint64(content, new_value_str.size(), "$"); + RedisAppendContent(content, new_value_str); + return content; +} + +void IncrbyCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameIncrby); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &by_) == 0) { + res_.SetRes(CmdRes::kInvalidInt, kCmdNameIncrby); + return; + } +} + +void IncrbyCmd::Do() { + s_ = db_->storage()->Incrby(key_, by_, &new_value_, &expired_timestamp_millsec_); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(new_value_)); + AddSlotKey("k", key_, db_); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { + res_.SetRes(CmdRes::kInvalidInt); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void IncrbyCmd::DoThroughDB() { + Do(); +} + +void IncrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->IncrByxx(key_, by_); + } +} + +std::string IncrbyCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + std::string new_value_str = std::to_string(new_value_); + RedisAppendLenUint64(content, new_value_str.size(), "$"); + RedisAppendContent(content, new_value_str); + return content; +} + +void IncrbyfloatCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameIncrbyfloat); + return; + } + key_ = argv_[1]; + value_ = argv_[2]; + if (pstd::string2d(argv_[2].data(), argv_[2].size(), &by_) == 0) { + res_.SetRes(CmdRes::kInvalidFloat); + return; + } +} + +void IncrbyfloatCmd::Do() { + s_ = db_->storage()->Incrbyfloat(key_, value_, &new_value_, &expired_timestamp_millsec_); + if (s_.ok()) { + res_.AppendStringLenUint64(new_value_.size()); + res_.AppendContent(new_value_); + AddSlotKey("k", key_, db_); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a vaild float") { + res_.SetRes(CmdRes::kInvalidFloat); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::KIncrByOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void IncrbyfloatCmd::DoThroughDB() { + Do(); +} + +void IncrbyfloatCmd::DoUpdateCache() { + if (s_.ok()) { + long double long_double_by; + if (storage::StrToLongDouble(value_.data(), value_.size(), &long_double_by) != -1) { + db_->cache()->Incrbyfloatxx(key_, long_double_by); + } + } +} + +std::string IncrbyfloatCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + RedisAppendLenUint64(content, new_value_.size(), "$"); + RedisAppendContent(content, new_value_); + return content; +} + + +void DecrCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDecr); + return; + } + key_ = argv_[1]; +} + +void DecrCmd::Do() { + s_= db_->storage()->Decrby(key_, 1, &new_value_); + if (s_.ok()) { + res_.AppendContent(":" + std::to_string(new_value_)); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { + res_.SetRes(CmdRes::kInvalidInt); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void DecrCmd::DoThroughDB() { + Do(); +} + +void DecrCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Decrxx(key_); + } +} + +void DecrbyCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDecrby); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &by_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void DecrbyCmd::Do() { + s_ = db_->storage()->Decrby(key_, by_, &new_value_); + if (s_.ok()) { + AddSlotKey("k", key_, db_); + res_.AppendContent(":" + std::to_string(new_value_)); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: Value is not a integer") { + res_.SetRes(CmdRes::kInvalidInt); + } else if (s_.IsInvalidArgument() && s_.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kOverFlow); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void DecrbyCmd::DoThroughDB() { + Do(); +} + +void DecrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->DecrByxx(key_, by_); + } +} + +void GetsetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGetset); + return; + } + key_ = argv_[1]; + new_value_ = argv_[2]; +} + +void GetsetCmd::Do() { + std::string old_value; + s_ = db_->storage()->GetSet(key_, new_value_, &old_value); + if (s_.ok()) { + if (old_value.empty()) { + res_.AppendContent("$-1"); + } else { + res_.AppendStringLenUint64(old_value.size()); + res_.AppendContent(old_value); + } + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetsetCmd::DoThroughDB() { + Do(); +} + +void GetsetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SetxxWithoutTTL(key_, new_value_); + } +} + +void AppendCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameAppend); + return; + } + key_ = argv_[1]; + value_ = argv_[2]; +} + +void AppendCmd::Do() { + int32_t new_len = 0; + s_ = db_->storage()->Append(key_, value_, &new_len, &expired_timestamp_millsec_, new_value_); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(new_len); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void AppendCmd::DoThroughDB() { + Do(); +} + +void AppendCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Appendxx(key_, value_); + } +} + +std::string AppendCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = expired_timestamp_millsec_ > 0 ? expired_timestamp_millsec_ / 1000 : expired_timestamp_millsec_; + pstd::ll2string(buf, sizeof(buf), time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + RedisAppendLenUint64(content, new_value_.size(), "$"); + RedisAppendContent(content, new_value_); + return content; +} + +void MgetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameMget); + return; + } + keys_ = argv_; + keys_.erase(keys_.begin()); + split_res_.resize(keys_.size()); + cache_miss_keys_.clear(); +} + +void MgetCmd::AssembleResponseFromCache() { + res_.AppendArrayLenUint64(keys_.size()); + for (const auto& key : keys_) { + auto it = cache_hit_values_.find(key); + if (it != cache_hit_values_.end()) { + res_.AppendStringLen(it->second.size()); + res_.AppendContent(it->second); + } else { + res_.SetRes(CmdRes::kErrOther, "Internal error during cache assembly"); + return; + } + } +} + +void MgetCmd::Do() { + // Without using the cache and querying only the DB, we need to use keys_. + // This line will only be assigned when querying the DB directly. + if (cache_miss_keys_.size() == 0) { + cache_miss_keys_ = keys_; + } + db_value_status_array_.clear(); + s_ = db_->storage()->MGetWithTTL(cache_miss_keys_, &db_value_status_array_); + if (!s_.ok()) { + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } + return; + } + + MergeCachedAndDbResults(); +} + +void MgetCmd::Split(const HintKeys& hint_keys) { + std::vector vss; + const std::vector& keys = hint_keys.keys; + rocksdb::Status s = db_->storage()->MGet(keys, &vss); + if (s.ok()) { + if (hint_keys.hints.size() != vss.size()) { + res_.SetRes(CmdRes::kErrOther, "internal Mget return size invalid"); + } + const std::vector& hints = hint_keys.hints; + for (size_t i = 0; i < vss.size(); ++i) { + split_res_[hints[i]] = vss[i]; + } + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void MgetCmd::Merge() { + res_.AppendArrayLenUint64(split_res_.size()); + for (const auto& vs : split_res_) { + if (vs.status.ok()) { + res_.AppendStringLenUint64(vs.value.size()); + res_.AppendContent(vs.value); + } else { + res_.AppendContent("$-1"); + } + } +} + +void MgetCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void MgetCmd::ReadCache() { + for (const auto key : keys_) { + std::string value; + auto s = db_->cache()->Get(const_cast(key), &value); + if (s.ok()) { + cache_hit_values_[key] = value; + } else { + cache_miss_keys_.push_back(key); + } + } + if (cache_miss_keys_.empty()) { + AssembleResponseFromCache(); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void MgetCmd::DoUpdateCache() { + size_t db_index = 0; + for (const auto key : cache_miss_keys_) { + if (db_index < db_value_status_array_.size() && db_value_status_array_[db_index].status.ok()) { + int64_t ttl_millsec = db_value_status_array_[db_index].ttl_millsec; + db_->cache()->WriteKVToCache(const_cast(key), db_value_status_array_[db_index].value, ttl_millsec > 0 ? ttl_millsec / 1000 : ttl_millsec); + } + db_index++; + } +} + +void MgetCmd::MergeCachedAndDbResults() { + res_.AppendArrayLenUint64(keys_.size()); + + std::unordered_map db_results_map; + for (size_t i = 0; i < cache_miss_keys_.size(); ++i) { + if (db_value_status_array_[i].status.ok()) { + db_results_map[cache_miss_keys_[i]] = db_value_status_array_[i].value; + } + } + + for (const auto& key : keys_) { + auto cache_it = cache_hit_values_.find(key); + + if (cache_it != cache_hit_values_.end()) { + res_.AppendStringLen(cache_it->second.size()); + res_.AppendContent(cache_it->second); + } else { + auto db_it = db_results_map.find(key); + if (db_it != db_results_map.end()) { + res_.AppendStringLen(db_it->second.size()); + res_.AppendContent(db_it->second); + } else { + res_.AppendContent("$-1"); + } + } + } +} + + +void KeysCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameKeys); + return; + } + pattern_ = argv_[1]; + if (argv_.size() == 3) { + std::string opt = argv_[2]; + if (strcasecmp(opt.data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(opt.data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(opt.data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(opt.data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else if (strcasecmp(opt.data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(opt.data(), "stream") == 0) { + type_ = storage::DataType::kStreams; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + } + } else if (argv_.size() > 3) { + res_.SetRes(CmdRes::kSyntaxErr); + } +} + +void KeysCmd::Do() { + int64_t total_key = 0; + int64_t cursor = 0; + size_t raw_limit = g_pika_conf->max_client_response_size(); + std::string raw; + std::vector keys; + do { + keys.clear(); + cursor = db_->storage()->Scan(type_, cursor, pattern_, PIKA_SCAN_STEP_LENGTH, &keys); + for (const auto& key : keys) { + RedisAppendLenUint64(raw, key.size(), "$"); + RedisAppendContent(raw, key); + } + if (raw.size() >= raw_limit) { + res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); + return; + } + total_key += static_cast(keys.size()); + } while (cursor != 0); + + res_.AppendArrayLen(total_key); + res_.AppendStringRaw(raw); +} + +void SetnxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSetnx); + return; + } + key_ = argv_[1]; + value_ = argv_[2]; +} + +void SetnxCmd::Do() { + success_ = 0; + s_ = db_->storage()->Setnx(key_, value_, &success_); + if (s_.ok()) { + res_.AppendInteger(success_); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +std::string SetnxCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 3, "*"); + + // don't check variable 'success_', because if 'success_' was false, an empty binlog will be saved into file. + // to setnx cmd + std::string set_cmd("setnx"); + RedisAppendLenUint64(content, set_cmd.size(), "$"); + RedisAppendContent(content, set_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // value + RedisAppendLenUint64(content, value_.size(), "$"); + RedisAppendContent(content, value_); + return content; +} + +void SetexCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSetex); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_sec_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + value_ = argv_[3]; +} + +void SetexCmd::Do() { + s_ = db_->storage()->Setex(key_, value_, ttl_sec_ * 1000); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SetexCmd::DoThroughDB() { + Do(); +} + +void SetexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Setxx(key_, value_, ttl_sec_); + } +} + +std::string SetexCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = time(nullptr) + ttl_sec_; + pstd::ll2string(buf, 100, time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + RedisAppendLenUint64(content, value_.size(), "$"); + RedisAppendContent(content, value_); + return content; +} + +void PsetexCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePsetex); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_millsec) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + value_ = argv_[3]; +} + +void PsetexCmd::Do() { + s_ = db_->storage()->Setex(key_, value_, ttl_millsec); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PsetexCmd::DoThroughDB() { + Do(); +} + +void PsetexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Setxx(key_, value_, ttl_millsec / 1000); + } +} + +std::string PsetexCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 4, "*"); + + // to pksetexat cmd + std::string pksetexat_cmd("pksetexat"); + RedisAppendLenUint64(content, pksetexat_cmd.size(), "$"); + RedisAppendContent(content, pksetexat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // time_stamp + char buf[100]; + auto time_stamp = pstd::NowMillis() + ttl_millsec; + pstd::ll2string(buf, 100, time_stamp); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + // value + RedisAppendLenUint64(content, value_.size(), "$"); + RedisAppendContent(content, value_); + return content; +} + +void DelvxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameDelvx); + return; + } + key_ = argv_[1]; + value_ = argv_[2]; +} + +void DelvxCmd::Do() { + rocksdb::Status s = db_->storage()->Delvx(key_, value_, &success_); + if (s.ok() || s.IsNotFound()) { + res_.AppendInteger(success_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void MsetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameMset); + return; + } + size_t argc = argv_.size(); + if (argc % 2 == 0) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameMset); + return; + } + kvs_.clear(); + for (size_t index = 1; index != argc; index += 2) { + kvs_.push_back({argv_[index], argv_[index + 1]}); + } +} + +void MsetCmd::Do() { + s_ = db_->storage()->MSet(kvs_); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + std::vector::const_iterator it; + for (it = kvs_.begin(); it != kvs_.end(); it++) { + AddSlotKey("k", it->key, db_); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void MsetCmd::DoThroughDB() { + Do(); +} + +void MsetCmd::DoUpdateCache() { + if (s_.ok()) { + for (auto key : kvs_) { + db_->cache()->SetxxWithoutTTL(key.key, key.value); + } + } +} + +void MsetCmd::Split(const HintKeys& hint_keys) { + std::vector kvs; + const std::vector& keys = hint_keys.keys; + const std::vector& hints = hint_keys.hints; + if (keys.size() != hints.size()) { + res_.SetRes(CmdRes::kErrOther, "SplitError hint_keys size not match"); + } + for (size_t i = 0; i < keys.size(); i++) { + if (kvs_[hints[i]].key == keys[i]) { + kvs.push_back(kvs_[hints[i]]); + } else { + res_.SetRes(CmdRes::kErrOther, "SplitError hint key: " + keys[i]); + return; + } + } + storage::Status s = db_->storage()->MSet(kvs); + if (s.ok()) { + res_.SetRes(CmdRes::kOk); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } +} + +void MsetCmd::Merge() {} + +void MsetCmd::DoBinlog() { + PikaCmdArgsType set_argv; + set_argv.resize(3); + //used "set" instead of "SET" to distinguish the binlog of Set + set_argv[0] = "set"; + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + for(auto& kv: kvs_) { + set_argv[1] = kv.key; + set_argv[2] = kv.value; + set_cmd_->Initial(set_argv, db_name_); + set_cmd_->DoBinlog(); + } +} + +void MsetnxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameMsetnx); + return; + } + size_t argc = argv_.size(); + if (argc % 2 == 0) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameMsetnx); + return; + } + kvs_.clear(); + for (size_t index = 1; index != argc; index += 2) { + kvs_.push_back({argv_[index], argv_[index + 1]}); + } +} + +void MsetnxCmd::Do() { + success_ = 0; + rocksdb::Status s = db_->storage()->MSetnx(kvs_, &success_); + if (s.ok()) { + res_.AppendInteger(success_); + std::vector::const_iterator it; + for (it = kvs_.begin(); it != kvs_.end(); it++) { + AddSlotKey("k", it->key, db_); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void MsetnxCmd::DoBinlog() { + if (!success_) { + //some keys already exist, set operations aborted, no need of binlog + return; + } + PikaCmdArgsType set_argv; + set_argv.resize(3); + //used "set" instead of "SET" to distinguish the binlog of SetCmd + set_argv[0] = "set"; + set_cmd_->SetConn(GetConn()); + set_cmd_->SetResp(resp_.lock()); + for (auto& kv: kvs_) { + set_argv[1] = kv.key; + set_argv[2] = kv.value; + set_cmd_->Initial(set_argv, db_name_); + set_cmd_->DoBinlog(); + } +} + +void GetrangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameGetrange); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &end_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void GetrangeCmd::Do() { + std::string substr; + s_= db_->storage()->Getrange(key_, start_, end_, &substr); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendStringLenUint64(substr.size()); + res_.AppendContent(substr); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetrangeCmd::ReadCache() { + std::string substr; + auto s = db_->cache()->GetRange(key_, start_, end_, &substr); + if (s.ok()) { + res_.AppendStringLen(substr.size()); + res_.AppendContent(substr); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void GetrangeCmd::DoThroughDB() { + res_.clear(); + std::string substr; + s_ = db_->storage()->GetrangeWithValue(key_, start_, end_, &substr, &value_, &sec_); + if (s_.ok()) { + res_.AppendStringLen(substr.size()); + res_.AppendContent(substr); + } else if (s_.IsNotFound()) { + res_.AppendStringLen(substr.size()); + res_.AppendContent(substr); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void GetrangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->WriteKVToCache(key_, value_, sec_); + } +} + +void SetrangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSetrange); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + value_ = argv_[3]; +} + +void SetrangeCmd::Do() { + int32_t new_len = 0; + s_ = db_->storage()->Setrange(key_, offset_, value_, &new_len); + if (s_.ok()) { + res_.AppendInteger(new_len); + AddSlotKey("k", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SetrangeCmd::DoThroughDB() { + Do(); +} + +void SetrangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SetRangexx(key_, offset_, value_); + } +} + +void StrlenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameStrlen); + return; + } + key_ = argv_[1]; +} + +void StrlenCmd::Do() { + int32_t len = 0; + s_ = db_->storage()->Strlen(key_, &len); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(len); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void StrlenCmd::ReadCache() { + int32_t len = 0; + auto s= db_->cache()->Strlen(key_, &len); + if (s.ok()) { + res_.AppendInteger(len); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void StrlenCmd::DoThroughDB() { + res_.clear(); + s_ = db_->storage()->GetWithTTL(key_, &value_, &ttl_millsec); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(value_.size()); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void StrlenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->WriteKVToCache(key_, value_, ttl_millsec > 0 ? ttl_millsec : ttl_millsec / 1000); + } +} + +void ExistsCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameExists); + return; + } + keys_ = argv_; + keys_.erase(keys_.begin()); +} + +void ExistsCmd::Do() { + int64_t res = db_->storage()->Exists(keys_); + if (res != -1) { + res_.AppendInteger(res); + } else { + res_.SetRes(CmdRes::kErrOther, "exists internal error"); + } +} + +void ExistsCmd::Split(const HintKeys& hint_keys) { + int64_t res = db_->storage()->Exists(hint_keys.keys); + if (res != -1) { + split_res_ += res; + } else { + res_.SetRes(CmdRes::kErrOther, "exists internal error"); + } +} + +void ExistsCmd::Merge() { res_.AppendInteger(split_res_); } + +void ExistsCmd::ReadCache() { + if (keys_.size() > 1) { + res_.SetRes(CmdRes::kCacheMiss); + return; + } + bool exist = db_->cache()->Exists(keys_[0]); + if (exist) { + res_.AppendInteger(1); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void ExistsCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ExpireCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameExpire); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_sec_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void ExpireCmd::Do() { + int32_t res = db_->storage()->Expire(key_, ttl_sec_ * 1000); + if (res != -1) { + res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); + } else { + res_.SetRes(CmdRes::kErrOther, "expire internal error"); + s_ = rocksdb::Status::Corruption("expire internal error"); + } +} + +std::string ExpireCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLen(content, 3, "*"); + + // to expireat cmd + std::string expireat_cmd("expireat"); + RedisAppendLenUint64(content, expireat_cmd.size(), "$"); + RedisAppendContent(content, expireat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // sec + char buf[100]; + int64_t expireat = time(nullptr) + ttl_sec_; + pstd::ll2string(buf, 100, expireat); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + return content; +} + +void ExpireCmd::DoThroughDB() { + Do(); +} + +void ExpireCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expire(key_, ttl_sec_); + } +} + +void PexpireCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePexpire); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &ttl_millsec) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void PexpireCmd::Do() { + int64_t res = db_->storage()->Expire(key_, ttl_millsec); + if (res != -1) { + res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); + } else { + res_.SetRes(CmdRes::kErrOther, "expire internal error"); + s_ = rocksdb::Status::Corruption("expire internal error"); + } +} + +std::string PexpireCmd::ToRedisProtocol() { + std::string content; + content.reserve(RAW_ARGS_LEN); + RedisAppendLenUint64(content, argv_.size(), "*"); + + // to pexpireat cmd + std::string expireat_cmd("pexpireat"); + RedisAppendLenUint64(content, expireat_cmd.size(), "$"); + RedisAppendContent(content, expireat_cmd); + // key + RedisAppendLenUint64(content, key_.size(), "$"); + RedisAppendContent(content, key_); + // sec + char buf[100]; + int64_t expireat = pstd::NowMillis() + ttl_millsec; + pstd::ll2string(buf, 100, expireat); + std::string at(buf); + RedisAppendLenUint64(content, at.size(), "$"); + RedisAppendContent(content, at); + return content; +} + +void PexpireCmd::DoThroughDB() { + Do(); +} + +void PexpireCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expire(key_, ttl_millsec); + } +} + +void ExpireatCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameExpireat); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &time_stamp_sec_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void ExpireatCmd::Do() { + int32_t res = db_->storage()->Expireat(key_, time_stamp_sec_ * 1000); + if (res != -1) { + res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); + } else { + res_.SetRes(CmdRes::kErrOther, "expireat internal error"); + s_ = rocksdb::Status::Corruption("expireat internal error"); + } +} + +void ExpireatCmd::DoThroughDB() { + Do(); +} + +void ExpireatCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expireat(key_, time_stamp_sec_); + } +} + +void PexpireatCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePexpireat); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &time_stamp_millsec_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void PexpireatCmd::Do() { + int32_t res = db_->storage()->Expireat(key_, static_cast(time_stamp_millsec_)); + if (res != -1) { + res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); + } else { + res_.SetRes(CmdRes::kErrOther, "pexpireat internal error"); + s_ = rocksdb::Status::Corruption("pexpireat internal error"); + } +} + +void PexpireatCmd::DoThroughDB() { + Do(); +} + +void PexpireatCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Expireat(key_, time_stamp_millsec_ / 1000); + } +} + +void TtlCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameTtl); + return; + } + key_ = argv_[1]; +} + +void TtlCmd::Do() { + int64_t ttl_sec_ = db_->storage()->TTL(key_); + if (ttl_sec_ == -3) { + res_.SetRes(CmdRes::kErrOther, "ttl internal error"); + } else { + res_.AppendInteger(ttl_sec_); + } +} + +void TtlCmd::ReadCache() { + int64_t timestamp = db_->cache()->TTL(key_); + if (timestamp == -3) { + res_.SetRes(CmdRes::kErrOther, "ttl internal error"); + } else if (timestamp != -2) { + res_.AppendInteger(timestamp); + } else { + res_.SetRes(CmdRes::kCacheMiss); + } +} + +void TtlCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void PttlCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePttl); + return; + } + key_ = argv_[1]; +} + +void PttlCmd::Do() { + int64_t ttl_millsec = db_->storage()->PTTL(key_); + if (ttl_millsec == -3) { + res_.SetRes(CmdRes::kErrOther, "ttl internal error"); + } else { + res_.AppendInteger(ttl_millsec); + } +} + +void PttlCmd::ReadCache() { + // redis cache don't support pttl cache, so read directly from db + DoThroughDB(); +} + +void PttlCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void PersistCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePersist); + return; + } + key_ = argv_[1]; +} + +void PersistCmd::Do() { + int32_t res = db_->storage()->Persist(key_); + if (res != -1) { + res_.AppendInteger(res); + s_ = rocksdb::Status::OK(); + } else { + res_.SetRes(CmdRes::kErrOther, "persist internal error"); + s_ = rocksdb::Status::Corruption("persist internal error"); + } +} + +void PersistCmd::DoThroughDB() { + Do(); +} + +void PersistCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->Persist(key_); + } +} + +void TypeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameType); + return; + } + key_ = argv_[1]; +} + +void TypeCmd::Do() { + enum storage::DataType type = storage::DataType::kNones; + std::string key_type; + rocksdb::Status s = db_->storage()->GetType(key_, type); + if (s.ok()) { + res_.AppendContent("+" + std::string(DataTypeToString(type))); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void TypeCmd::ReadCache() { + enum storage::DataType type = storage::DataType::kNones; + std::string key_type; + // TODO Cache GetType function + rocksdb::Status s = db_->storage()->GetType(key_, type); + if (s.ok()) { + res_.AppendContent("+" + std::string(DataTypeToString(type))); + } else { + res_.SetRes(CmdRes::kCacheMiss, s.ToString()); + } +} + +void TypeCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ScanCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameScan); + return; + } + if (pstd::string2int(argv_[1].data(), argv_[1].size(), &cursor_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + size_t index = 2; + size_t argc = argv_.size(); + + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0) || + (strcasecmp(opt.data(), "type") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if (strcasecmp(opt.data(), "type") == 0) { + std::string str_type = argv_[index]; + if (strcasecmp(str_type.data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(str_type.data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(str_type.data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(str_type.data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else if (strcasecmp(str_type.data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + } + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) || count_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void ScanCmd::Do() { + int64_t total_key = 0; + int64_t batch_count = 0; + int64_t left = count_; + int64_t cursor_ret = cursor_; + size_t raw_limit = g_pika_conf->max_client_response_size(); + std::string raw; + std::vector keys; + // To avoid memory overflow, we call the Scan method in batches + do { + keys.clear(); + batch_count = left < PIKA_SCAN_STEP_LENGTH ? left : PIKA_SCAN_STEP_LENGTH; + left = left > PIKA_SCAN_STEP_LENGTH ? left - PIKA_SCAN_STEP_LENGTH : 0; + cursor_ret = db_->storage()->Scan(type_, cursor_ret, pattern_, batch_count, &keys); + for (const auto& key : keys) { + RedisAppendLenUint64(raw, key.size(), "$"); + RedisAppendContent(raw, key); + } + if (raw.size() >= raw_limit) { + res_.SetRes(CmdRes::kErrOther, "Response exceeds the max-client-response-size limit"); + return; + } + total_key += static_cast(keys.size()); + } while (cursor_ret != 0 && (left != 0)); + + res_.AppendArrayLen(2); + + char buf[32]; + int len = pstd::ll2string(buf, sizeof(buf), cursor_ret); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLen(total_key); + res_.AppendStringRaw(raw); +} + +void ScanxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameScanx); + return; + } + if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else { + res_.SetRes(CmdRes::kInvalidDbType); + return; + } + + start_key_ = argv_[2]; + size_t index = 3; + size_t argc = argv_.size(); + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) || count_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void ScanxCmd::Do() { + std::string next_key; + std::vector keys; + rocksdb::Status s = db_->storage()->Scanx(type_, start_key_, pattern_, count_, &keys, &next_key); + + if (s.ok()) { + res_.AppendArrayLen(2); + res_.AppendStringLenUint64(next_key.size()); + res_.AppendContent(next_key); + + res_.AppendArrayLenUint64(keys.size()); + std::vector::iterator iter; + for (const auto& key : keys) { + res_.AppendString(key); + } + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void PKSetexAtCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKSetexAt); + return; + } + key_ = argv_[1]; + value_ = argv_[3]; + if ((pstd::string2int(argv_[2].data(), argv_[2].size(), &time_stamp_sec_) == 0) || time_stamp_sec_ >= INT32_MAX) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void PKSetexAtCmd::Do() { + s_ = db_->storage()->PKSetexAt(key_, value_, static_cast(time_stamp_sec_ * 1000)); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKSetexAtCmd::DoThroughDB() { + Do(); +} + +void PKSetexAtCmd::DoUpdateCache() { + if (s_.ok()) { + auto expire = time_stamp_sec_ - static_cast(std::time(nullptr)); + if (expire <= 0) [[unlikely]] { + db_->cache()->Del({key_}); + return; + } + db_->cache()->Setxx(key_, value_, expire); + } +} + +void PKScanRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKScanRange); + return; + } + if (strcasecmp(argv_[1].data(), "string_with_value") == 0) { + type_ = storage::DataType::kStrings; + string_with_value = true; + } else if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else { + res_.SetRes(CmdRes::kInvalidDbType); + return; + } + + key_start_ = argv_[2]; + key_end_ = argv_[3]; + // start key and end key hash tag have to be same in non classic mode + if (!HashtagIsConsistent(key_start_, key_start_)) { + res_.SetRes(CmdRes::kInconsistentHashTag); + return; + } + size_t index = 4; + size_t argc = argv_.size(); + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void PKScanRangeCmd::Do() { + std::string next_key; + std::vector keys; + std::vector kvs; + s_ = db_->storage()->PKScanRange(type_, key_start_, key_end_, pattern_, static_cast(limit_), &keys, &kvs, &next_key); + + if (s_.ok()) { + res_.AppendArrayLen(2); + res_.AppendStringLenUint64(next_key.size()); + res_.AppendContent(next_key); + if (type_ == storage::DataType::kStrings) { + res_.AppendArrayLenUint64(string_with_value ? 2 * kvs.size() : kvs.size()); + for (const auto& kv : kvs) { + res_.AppendString(kv.key); + if (string_with_value) { + res_.AppendString(kv.value); + } + } + } else { + res_.AppendArrayLenUint64(keys.size()); + for (const auto& key : keys) { + res_.AppendString(key); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void PKRScanRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePKRScanRange); + return; + } + if (strcasecmp(argv_[1].data(), "string_with_value") == 0) { + type_ = storage::DataType::kStrings; + string_with_value = true; + } else if (strcasecmp(argv_[1].data(), "string") == 0) { + type_ = storage::DataType::kStrings; + } else if (strcasecmp(argv_[1].data(), "hash") == 0) { + type_ = storage::DataType::kHashes; + } else if (strcasecmp(argv_[1].data(), "set") == 0) { + type_ = storage::DataType::kSets; + } else if (strcasecmp(argv_[1].data(), "zset") == 0) { + type_ = storage::DataType::kZSets; + } else if (strcasecmp(argv_[1].data(), "list") == 0) { + type_ = storage::DataType::kLists; + } else { + res_.SetRes(CmdRes::kInvalidDbType); + return; + } + + key_start_ = argv_[2]; + key_end_ = argv_[3]; + // start key and end key hash tag have to be same in non classic mode + if (!HashtagIsConsistent(key_start_, key_start_)) { + res_.SetRes(CmdRes::kInconsistentHashTag); + return; + } + size_t index = 4; + size_t argc = argv_.size(); + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "limit") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if ((pstd::string2int(argv_[index].data(), argv_[index].size(), &limit_) == 0) || limit_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void PKRScanRangeCmd::Do() { + std::string next_key; + std::vector keys; + std::vector kvs; + s_ = db_->storage()->PKRScanRange(type_, key_start_, key_end_, pattern_, static_cast(limit_), + &keys, &kvs, &next_key); + + if (s_.ok()) { + res_.AppendArrayLen(2); + res_.AppendStringLenUint64(next_key.size()); + res_.AppendContent(next_key); + + if (type_ == storage::DataType::kStrings) { + res_.AppendArrayLenUint64(string_with_value ? 2 * kvs.size() : kvs.size()); + for (const auto& kv : kvs) { + res_.AppendString(kv.key); + if (string_with_value) { + res_.AppendString(kv.value); + } + } + } else { + res_.AppendArrayLenUint64(keys.size()); + for (const auto& key : keys) { + res_.AppendString(key); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} diff --git a/tools/pika_migrate/src/pika_list.cc b/tools/pika_migrate/src/pika_list.cc new file mode 100644 index 0000000000..9cec350baa --- /dev/null +++ b/tools/pika_migrate/src/pika_list.cc @@ -0,0 +1,925 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_list.h" +#include +#include "include/pika_cache.h" +#include "include/pika_data_distribution.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pstd_string.h" +#include "scope_record_lock.h" + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +void LIndexCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLIndex); + return; + } + key_ = argv_[1]; + std::string index = argv_[2]; + if (pstd::string2int(index.data(), index.size(), &index_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } +} + +void LIndexCmd::Do() { + std::string value; + s_ = db_->storage()->LIndex(key_, index_, &value); + if (s_.ok()) { + res_.AppendString(value); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendStringLen(-1); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LIndexCmd::ReadCache() { + std::string value; + auto s = db_->cache()->LIndex(key_, index_, &value); + if (s.ok()) { + res_.AppendString(value); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void LIndexCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void LIndexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_LIST, key_, db_); + } +} + +void LInsertCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLInsert); + return; + } + key_ = argv_[1]; + std::string dir = argv_[2]; + if (strcasecmp(dir.data(), "before") == 0) { + dir_ = storage::Before; + } else if (strcasecmp(dir.data(), "after") == 0) { + dir_ = storage::After; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + pivot_ = argv_[3]; + value_ = argv_[4]; +} + +void LInsertCmd::Do() { + int64_t llen = 0; + s_ = db_->storage()->LInsert(key_, dir_, pivot_, value_, &llen); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(llen); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LInsertCmd::DoThroughDB() { + Do(); +} + +void LInsertCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LInsert(key_, dir_, pivot_, value_); + } +} + +void LLenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLLen); + return; + } + key_ = argv_[1]; +} + +void LLenCmd::Do() { + uint64_t llen = 0; + s_ = db_->storage()->LLen(key_, &llen); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(llen)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LLenCmd::ReadCache() { + uint64_t llen = 0; + auto s = db_->cache()->LLen(key_, &llen); + if (s.ok()) { + res_.AppendInteger(llen); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void LLenCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void LLenCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_LIST, key_, db_); + } +} + +void BlockingBaseCmd::TryToServeBLrPopWithThisKey(const std::string& key, std::shared_ptr db) { + std::shared_ptr curr_conn = std::dynamic_pointer_cast(GetConn()); + if (!curr_conn) { + // current node is a slave and is applying a binlog of lpush/rpush/rpoplpush, just return + return; + } + auto dispatchThread = dynamic_cast(curr_conn->thread()); + + { + std::shared_lock read_latch(dispatchThread->GetBlockMtx()); + auto& key_to_conns = dispatchThread->GetMapFromKeyToConns(); + net::BlockKey blrPop_key{curr_conn->GetCurrentTable(), key}; + + if (auto it = key_to_conns.find(blrPop_key); it == key_to_conns.end()) { + // no client is waitting for this key + return; + } + } + + auto* args = new UnblockTaskArgs(key, std::move(db), dispatchThread); + bool is_slow_cmd = g_pika_conf->is_slow_cmd("LPOP") || g_pika_conf->is_slow_cmd("RPOP"); + bool is_admin_cmd = false; + g_pika_server->ScheduleClientPool(&ServeAndUnblockConns, args, is_slow_cmd, is_admin_cmd); +} + +void BlockingBaseCmd::ServeAndUnblockConns(void* args) { + auto bg_args = std::unique_ptr(static_cast(args)); + net::DispatchThread* dispatchThread = bg_args->dispatchThread; + std::shared_ptr db = bg_args->db; + std::string key = std::move(bg_args->key); + auto& key_to_conns_ = dispatchThread->GetMapFromKeyToConns(); + net::BlockKey blrPop_key{db->GetDBName(), key}; + + pstd::lock::ScopeRecordLock record_lock(db->LockMgr(), key);//It's a RAII Lock + std::unique_lock map_lock(dispatchThread->GetBlockMtx());// do not change the sequence of these 3 locks, or deadlock will happen + auto it = key_to_conns_.find(blrPop_key); + if (it == key_to_conns_.end()) { + return; + } + CmdRes res; + std::vector pop_binlog_args; + auto& waitting_list = it->second; + std::vector values; + rocksdb::Status s; + // traverse this list from head to tail(in the order of adding sequence) ,means "first blocked, first get served“ + for (auto conn_blocked = waitting_list->begin(); conn_blocked != waitting_list->end();) { + if (conn_blocked->GetBlockType() == BlockKeyType::Blpop) { + s = db->storage()->LPop(key, 1, &values); + } else { // BlockKeyType is Brpop + s = db->storage()->RPop(key, 1, &values); + } + if (s.ok()) { + res.AppendArrayLen(2); + res.AppendString(key); + res.AppendString(values[0]); + } else if (s.IsNotFound() || s.ToString().substr(0, std::char_traits::length(ErrTypeMessage)) == ErrTypeMessage) { + // this key has no more elements to serve more blocked conn. + break; + } else { + res.SetRes(CmdRes::kErrOther, s.ToString()); + } + auto conn_ptr = conn_blocked->GetConnBlocked(); + // send response to this client + conn_ptr->WriteResp(res.message()); + res.clear(); + conn_ptr->NotifyEpoll(true); + pop_binlog_args.emplace_back(conn_blocked->GetBlockType(), key, db, conn_ptr); + conn_blocked = waitting_list->erase(conn_blocked); // remove this conn from current waiting list + // erase all waiting info of this conn + dispatchThread->CleanWaitNodeOfUnBlockedBlrConn(conn_ptr); + } + dispatchThread->CleanKeysAfterWaitNodeCleaned(); + map_lock.unlock(); + WriteBinlogOfPopAndUpdateCache(pop_binlog_args); +} + +void BlockingBaseCmd::WriteBinlogOfPopAndUpdateCache(std::vector& pop_args) { + // write binlog of l/rpop + for (auto& pop_arg : pop_args) { + std::shared_ptr pop_cmd; + std::string pop_type; + if (pop_arg.block_type == BlockKeyType::Blpop) { + pop_type = kCmdNameLPop; + pop_cmd = std::make_shared(kCmdNameLPop, 2, kCmdFlagsWrite | kCmdFlagsList); + } else if (pop_arg.block_type == BlockKeyType::Brpop) { + pop_type = kCmdNameRPop; + pop_cmd = std::make_shared(kCmdNameRPop, 2, kCmdFlagsWrite | kCmdFlagsList); + } + + PikaCmdArgsType args; + args.push_back(std::move(pop_type)); + args.push_back(pop_arg.key); + pop_cmd->Initial(args, pop_arg.db->GetDBName()); + pop_cmd->SetConn(pop_arg.conn); + auto resp_ptr = std::make_shared("this resp won't be used for current code(consensus-level always be 0)"); + pop_cmd->SetResp(resp_ptr); + pop_cmd->DoUpdateCache(); + pop_cmd->DoBinlog(); + } +} + +void LPushCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLPush); + return; + } + key_ = argv_[1]; + size_t pos = 2; + while (pos < argv_.size()) { + values_.push_back(argv_[pos++]); + } +} + +void LPushCmd::Do() { + uint64_t llen = 0; + s_ = db_->storage()->LPush(key_, values_, &llen); + if (s_.ok()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + return; + } + } + TryToServeBLrPopWithThisKey(key_, db_); +} + +void LPushCmd::DoThroughDB() { + Do(); +} + +void LPushCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LPushx(key_, values_); + } +} + +void BlockingBaseCmd::BlockThisClientToWaitLRPush(BlockKeyType block_pop_type, std::vector& keys, + int64_t expire_time) { + std::shared_ptr conn_to_block = std::dynamic_pointer_cast(GetConn()); + + auto dispatchThread = dynamic_cast(conn_to_block->thread()); + std::lock_guard latch(dispatchThread->GetBlockMtx()); + auto& key_to_conns = dispatchThread->GetMapFromKeyToConns(); + auto& conn_to_keys_ = dispatchThread->GetMapFromConnToKeys(); + + std::vector blrpop_keys; + for (auto& key : keys) { + net::BlockKey blrpop_key{conn_to_block->GetCurrentTable(), key}; + blrpop_keys.push_back(blrpop_key); + auto it = key_to_conns.find(blrpop_key); + if (it == key_to_conns.end()) { + // no waiting info found, means no other clients are waiting for the list related with this key right now + key_to_conns.emplace(blrpop_key, std::make_unique>()); + it = key_to_conns.find(blrpop_key); + } + auto& wait_list_of_this_key = it->second; + // add current client-connection to the tail of waiting list of this key + wait_list_of_this_key->emplace_back(expire_time, conn_to_block, block_pop_type); + } + + // construct a list of keys and insert into this map as value(while key of the map is conn_fd) + conn_to_keys_.emplace(conn_to_block->fd(), + std::make_unique>(blrpop_keys.begin(), blrpop_keys.end())); +} + +void BlockingBaseCmd::removeDuplicates(std::vector& keys_) { + std::unordered_set seen; + auto it = std::remove_if(keys_.begin(), keys_.end(), [&seen](const auto& key) { return !seen.insert(key).second; }); + keys_.erase(it, keys_.end()); +} + +void BLPopCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBLPop); + return; + } + + // fetching all keys(*argv_.begin is the command itself and *argv_.end() is the timeout value) + keys_.assign(++argv_.begin(), --argv_.end()); + removeDuplicates(keys_); + int64_t timeout = 0; + if (!pstd::string2int(argv_.back().data(), argv_.back().size(), &timeout)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + constexpr int64_t seconds_of_ten_years = 10 * 365 * 24 * 3600; + if (timeout < 0 || timeout > seconds_of_ten_years) { + res_.SetRes(CmdRes::kErrOther, + "timeout can't be a negative value and can't exceed the number of seconds in 10 years"); + return; + } + + if (timeout > 0) { + auto now = std::chrono::system_clock::now(); + expire_time_ = + std::chrono::time_point_cast(now).time_since_epoch().count() + timeout * 1000; + } // else(timeout is 0): expire_time_ default value is 0, means never expire; +} + +void BLPopCmd::Do() { + for (auto& this_key : keys_) { + std::vector values; + rocksdb::Status s = db_->storage()->LPop(this_key, 1, &values); + if (s.ok()) { + res_.AppendArrayLen(2); + res_.AppendString(this_key); + res_.AppendString(values[0]); + // write a binlog of lpop + binlog_args_.block_type = BlockKeyType::Blpop; + binlog_args_.key = this_key; + binlog_args_.db = db_; + binlog_args_.conn = GetConn(); + is_binlog_deferred_ = false; + return; + } else if (s.IsNotFound()) { + continue; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } + is_binlog_deferred_ = true; + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + res_.AppendArrayLen(-1); + return ; + } + } + BlockThisClientToWaitLRPush(BlockKeyType::Blpop, keys_, expire_time_); +} + +void BLPopCmd::DoBinlog() { + if (is_binlog_deferred_) { + return; + } + std::vector args; + args.push_back(std::move(binlog_args_)); + WriteBinlogOfPopAndUpdateCache(args); +} + +void LPopCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLPop); + return; + } + key_ = argv_[1]; + size_t argc = argv_.size(); + if (argc > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLPop); + } else if (argc == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameLPop); + return; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + } +} + +void LPopCmd::Do() { + std::vector elements; + s_ = db_->storage()->LPop(key_, count_, &elements); + + if (s_.ok()) { + if (elements.size() > 1) { + res_.AppendArrayLenUint64(elements.size()); + } + for (const auto& element : elements) { + res_.AppendString(element); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendStringLen(-1); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LPopCmd::DoThroughDB() { + Do(); +} + +void LPopCmd::DoUpdateCache() { + if (s_.ok()) { + std::string value; + db_->cache()->LPop(key_, &value); + } +} + +void LPushxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLPushx); + return; + } + key_ = argv_[1]; + size_t pos = 2; + while (pos < argv_.size()) { + values_.push_back(argv_[pos++]); + } +} + +void LPushxCmd::Do() { + uint64_t llen = 0; + s_ = db_->storage()->LPushx(key_, values_, &llen); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LPushxCmd::DoThroughDB() { + Do(); +} + +void LPushxCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LPushx(key_, values_); + } +} + +void LRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLRange); + return; + } + key_ = argv_[1]; + std::string left = argv_[2]; + if (pstd::string2int(left.data(), left.size(), &left_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + std::string right = argv_[3]; + if (pstd::string2int(right.data(), right.size(), &right_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } +} + +void LRangeCmd::Do() { + std::vector values; + s_ = db_->storage()->LRange(key_, left_, right_, &values); + if (s_.ok()) { + res_.AppendArrayLenUint64(values.size()); + for (const auto& value : values) { + res_.AppendString(value); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (s_.IsNotFound()) { + res_.AppendArrayLen(0); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LRangeCmd::ReadCache() { + std::vector values; + auto s = db_->cache()->LRange(key_, left_, right_, &values); + if (s.ok()) { + res_.AppendArrayLen(values.size()); + for (const auto& value : values) { + res_.AppendString(value); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void LRangeCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void LRangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_LIST, key_, db_); + } +} + +void LRemCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLRem); + return; + } + key_ = argv_[1]; + std::string count = argv_[2]; + if (pstd::string2int(count.data(), count.size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + value_ = argv_[3]; +} + +void LRemCmd::Do() { + uint64_t res = 0; + s_ = db_->storage()->LRem(key_, count_, value_, &res); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(res)); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LRemCmd::DoThroughDB() { + Do(); +} + +void LRemCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LRem(key_, count_, value_); + } +} + +void LSetCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLSet); + return; + } + key_ = argv_[1]; + std::string index = argv_[2]; + if (pstd::string2int(index.data(), index.size(), &index_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + value_ = argv_[3]; +} + +void LSetCmd::Do() { + s_ = db_->storage()->LSet(key_, index_, value_); + if (s_.ok()) { + res_.SetRes(CmdRes::kOk); + AddSlotKey("l", key_, db_); + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNotFound); + } else if (s_.IsCorruption() && s_.ToString() == "Corruption: index out of range") { + // TODO(): refine return value + res_.SetRes(CmdRes::kOutOfRange); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LSetCmd::DoThroughDB() { + Do(); +} + +void LSetCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LSet(key_, index_, value_); + } +} + +void LTrimCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameLSet); + return; + } + key_ = argv_[1]; + std::string start = argv_[2]; + if (pstd::string2int(start.data(), start.size(), &start_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + std::string stop = argv_[3]; + if (pstd::string2int(stop.data(), stop.size(), &stop_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } +} + +void LTrimCmd::Do() { + s_ = db_->storage()->LTrim(key_, start_, stop_); + if (s_.ok() || s_.IsNotFound()) { + res_.SetRes(CmdRes::kOk); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void LTrimCmd::DoThroughDB() { + Do(); +} + +void LTrimCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->LTrim(key_, start_, stop_); + } +} + +void BRPopCmd::Do() { + for (auto& this_key : keys_) { + std::vector values; + s_ = db_->storage()->RPop(this_key, 1, &values); + if (s_.ok()) { + res_.AppendArrayLen(2); + res_.AppendString(this_key); + res_.AppendString(values[0]); + // write an binlog of rpop + binlog_args_.block_type = BlockKeyType::Brpop; + binlog_args_.key = this_key; + binlog_args_.db = db_; + binlog_args_.conn = GetConn(); + is_binlog_deferred_ = false; + return; + } else if (s_.IsNotFound()) { + continue; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + } + is_binlog_deferred_ = true; + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + res_.AppendArrayLen(-1); + return ; + } + } + BlockThisClientToWaitLRPush(BlockKeyType::Brpop, keys_, expire_time_); +} + +void BRPopCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameBLPop); + return; + } + + // fetching all keys(*argv_.begin is the command itself and *argv_.end() is the timeout value) + keys_.assign(++argv_.begin(), --argv_.end()); + removeDuplicates(keys_); + int64_t timeout = 0; + if (!pstd::string2int(argv_.back().data(), argv_.back().size(), &timeout)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + constexpr int64_t seconds_of_ten_years = 10 * 365 * 24 * 3600; + if (timeout < 0 || timeout > seconds_of_ten_years) { + res_.SetRes(CmdRes::kErrOther, + "timeout can't be a negative value and can't exceed the number of seconds in 10 years"); + return; + } + + if (timeout > 0) { + auto now = std::chrono::system_clock::now(); + expire_time_ = + std::chrono::time_point_cast(now).time_since_epoch().count() + timeout * 1000; + } // else(timeout is 0): expire_time_ default value is 0, means never expire; +} + +void BRPopCmd::DoBinlog() { + if (is_binlog_deferred_) { + return; + } + std::vector args; + args.push_back(std::move(binlog_args_)); + WriteBinlogOfPopAndUpdateCache(args); +} + + + +void RPopCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameRPop); + return; + } + key_ = argv_[1]; + if (argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameRPop); + } else if (argv_.size() == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameRPop); + return; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + } +} + +void RPopCmd::Do() { + std::vector elements; + s_ = db_->storage()->RPop(key_, count_, &elements); + if (s_.ok()) { + if (elements.size() > 1) { + res_.AppendArrayLenUint64(elements.size()); + } + for (const auto &element: elements) { + res_.AppendString(element); + } + } else if (s_.IsNotFound()) { + res_.AppendStringLen(-1); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void RPopCmd::DoThroughDB() { + Do(); +} + +void RPopCmd::DoUpdateCache() { + if (s_.ok()) { + std::string value; + db_->cache()->RPop(key_, &value); + } +} + +void RPopLPushCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameRPopLPush); + return; + } + source_ = argv_[1]; + receiver_ = argv_[2]; + if (!HashtagIsConsistent(source_, receiver_)) { + res_.SetRes(CmdRes::kInconsistentHashTag); + } +} + +void RPopLPushCmd::Do() { + std::string value; + s_ = db_->storage()->RPoplpush(source_, receiver_, &value); + if (s_.ok()) { + AddSlotKey("k", receiver_, db_); + res_.AppendString(value); + value_poped_from_source_ = value; + is_write_binlog_ = true; + } else if (s_.IsNotFound()) { + // no actual write operation happened, will not write binlog + res_.AppendStringLen(-1); + is_write_binlog_ = false; + return; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + TryToServeBLrPopWithThisKey(receiver_, db_); +} + +void RPopLPushCmd::ReadCache() { + res_.SetRes(CmdRes::kErrOther, "the command is not support in cache mode"); +} + +void RPopLPushCmd::DoBinlog() { + if (!is_write_binlog_) { + return; + } + PikaCmdArgsType rpop_args; + rpop_args.push_back("RPOP"); + rpop_args.push_back(source_); + rpop_cmd_->Initial(rpop_args, db_name_); + + PikaCmdArgsType lpush_args; + lpush_args.push_back("LPUSH"); + lpush_args.push_back(receiver_); + lpush_args.push_back(value_poped_from_source_); + lpush_cmd_->Initial(lpush_args, db_name_); + + rpop_cmd_->SetConn(GetConn()); + rpop_cmd_->SetResp(resp_.lock()); + lpush_cmd_->SetConn(GetConn()); + lpush_cmd_->SetResp(resp_.lock()); + + rpop_cmd_->DoBinlog(); + lpush_cmd_->DoBinlog(); +} + +void RPushCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameRPush); + return; + } + key_ = argv_[1]; + size_t pos = 2; + while (pos < argv_.size()) { + values_.push_back(argv_[pos++]); + } +} + +void RPushCmd::Do() { + uint64_t llen = 0; + s_ = db_->storage()->RPush(key_, values_, &llen); + if (s_.ok()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } + if (auto client_conn = std::dynamic_pointer_cast(GetConn()); client_conn != nullptr) { + if (client_conn->IsInTxn()) { + return; + } + } + TryToServeBLrPopWithThisKey(key_, db_); +} + +void RPushCmd::DoThroughDB() { + Do(); +} + +void RPushCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->RPushx(key_, values_); + } +} + +void RPushxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameRPushx); + return; + } + key_ = argv_[1]; + size_t pos = 2; + while (pos < argv_.size()) { + values_.push_back(argv_[pos++]); + } +} + +void RPushxCmd::Do() { + uint64_t llen = 0; + s_ = db_->storage()->RPushx(key_, values_, &llen); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(static_cast(llen)); + AddSlotKey("l", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void RPushxCmd::DoThroughDB() { + Do(); +} + +void RPushxCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->RPushx(key_, values_); + } +} \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_migrate_thread.cc b/tools/pika_migrate/src/pika_migrate_thread.cc new file mode 100644 index 0000000000..fd221f0b8e --- /dev/null +++ b/tools/pika_migrate/src/pika_migrate_thread.cc @@ -0,0 +1,979 @@ +#include + +#include + +#include "include/pika_admin.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_conf.h" +#include "include/pika_define.h" +#include "include/pika_migrate_thread.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pika_codis_slot.h" +#include "src/redis_streams.h" + +#define min(a, b) (((a) > (b)) ? (b) : (a)) + +const int32_t MAX_MEMBERS_NUM = 512; +const std::string INVALID_STR = "NL"; + +extern std::unique_ptr g_pika_server; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +// do migrate key to dest pika server +static int doMigrate(net::NetCli *cli, std::string send_str) { + pstd::Status s; + s = cli->Send(&send_str); + if (!s.ok()) { + LOG(WARNING) << "DB Migrate Send error: " << s.ToString(); + return -1; + } + return 1; +} + +// do migrate cli auth +static int doAuth(net::NetCli *cli) { + net::RedisCmdArgsType argv; + std::string wbuf_str; + std::string requirepass = g_pika_conf->requirepass(); + if (requirepass != "") { + argv.emplace_back("auth"); + argv.emplace_back(requirepass); + } else { + argv.emplace_back("ping"); + } + net::SerializeRedisCommand(argv, &wbuf_str); + + pstd::Status s; + s = cli->Send(&wbuf_str); + if (!s.ok()) { + LOG(WARNING) << "DB Migrate auth Send error: " << s.ToString(); + return -1; + } + // Recv + s = cli->Recv(&argv); + if (!s.ok()) { + LOG(WARNING) << "DB Migrate auth Recv error: " << s.ToString(); + return -1; + } + pstd::StringToLower(argv[0]); + if (argv[0] != "ok" && argv[0] != "pong" && argv[0].find("no password") == std::string::npos) { + LOG(WARNING) << "DB Migrate auth error: " << argv[0]; + return -1; + } + return 0; +} + +static int migrateKeyTTl(net::NetCli *cli, const std::string& key, storage::DataType data_type, + const std::shared_ptr& db) { + net::RedisCmdArgsType argv; + std::string send_str; + int64_t type_timestamp = db->storage()->TTL(key); + if (PIKA_TTL_ZERO == type_timestamp || PIKA_TTL_STALE == type_timestamp) { + argv.emplace_back("del"); + argv.emplace_back(key); + net::SerializeRedisCommand(argv, &send_str); + } else if (0 < type_timestamp) { + argv.emplace_back("expire"); + argv.emplace_back(key); + argv.emplace_back(std::to_string(type_timestamp)); + net::SerializeRedisCommand(argv, &send_str); + } else { + // no expire + return 0; + } + + if (doMigrate(cli, send_str) < 0) { + return -1; + } + + return 1; +} + +// get set key all values +static int setGetall(const std::string& key, std::vector *members, const std::shared_ptr& db) { + rocksdb::Status s = db->storage()->SMembers(key, members); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "Set get key: " << key << " value not found "; + return 0; + } else { + LOG(WARNING) << "Set get key: " << key << " value error: " << s.ToString(); + return -1; + } + } + return 1; +} + +static int MigrateKv(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + std::string value; + rocksdb::Status s = db->storage()->Get(key, &value); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "Get kv key: " << key << " not found "; + return 0; + } else { + LOG(WARNING) << "Get kv key: " << key << " error: " << strerror(errno); + return -1; + } + } + + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("SET"); + argv.emplace_back(key); + argv.emplace_back(value); + net::SerializeRedisCommand(argv, &send_str); + + int send_num = 0; + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + + int r; + if (0 > (r = migrateKeyTTl(cli, key, storage::DataType::kStrings, db))) { + return -1; + } else { + send_num += r; + } + + return send_num; +} + +static int MigrateHash(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector field_values; + rocksdb::Status s; + + do { + s = db->storage()->HScan(key, cursor, "*", MAX_MEMBERS_NUM, &field_values, &cursor); + if (s.ok() && field_values.size() > 0) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("HMSET"); + argv.emplace_back(key); + for (const auto &field_value : field_values) { + argv.emplace_back(field_value.field); + argv.emplace_back(field_value.value); + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } while (cursor != 0 && s.ok()); + + if (send_num > 0) { + int r; + if ((r = migrateKeyTTl(cli, key, storage::DataType::kHashes, db)) < 0) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +static int MigrateList(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + // del old key, before migrate list; prevent redo when failed + int send_num = 0; + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("DEL"); + argv.emplace_back(key); + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + + std::vector values; + rocksdb::Status s = db->storage()->LRange(key, 0, -1, &values); + if (s.ok()) { + auto iter = values.begin(); + while (iter != values.end()) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("RPUSH"); + argv.emplace_back(key); + + for (int i = 0; iter != values.end() && i < MAX_MEMBERS_NUM; ++iter, ++i) { + argv.emplace_back(*iter); + } + + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } + + // has send del key command + if (send_num > 1) { + int r; + if (0 > (r = migrateKeyTTl(cli, key, storage::DataType::kLists, db))) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +static int MigrateStreams(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector members; + rocksdb::Status s; + + std::vector id_messages; + storage::StreamScanArgs arg; + storage::StreamUtils::StreamParseIntervalId("-", arg.start_sid, &arg.start_ex, 0); + storage::StreamUtils::StreamParseIntervalId("+", arg.end_sid, &arg.end_ex, UINT64_MAX); + s = db->storage()->XRange(key, arg, id_messages); + if (s.ok()) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("XADD"); + argv.emplace_back(key); + for (auto &fv : id_messages) { + std::vector message; + storage::StreamUtils::DeserializeMessage(fv.value, message); + storage::streamID sid; + sid.DeserializeFrom(fv.field); + argv.emplace_back(sid.ToString()); + for (auto &m : message) { + argv.emplace_back(m); + } + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + return send_num; +} + +static int MigrateSet(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector members; + rocksdb::Status s; + + do { + s = db->storage()->SScan(key, cursor, "*", MAX_MEMBERS_NUM, &members, &cursor); + if (s.ok() && members.size() > 0) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("SADD"); + argv.emplace_back(key); + + for (const auto &member : members) { + argv.emplace_back(member); + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } while (cursor != 0 && s.ok()); + + if (0 < send_num) { + int r; + if (0 > (r = migrateKeyTTl(cli, key, storage::DataType::kSets, db))) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +static int MigrateZset(net::NetCli *cli, const std::string& key, const std::shared_ptr& db) { + int send_num = 0; + int64_t cursor = 0; + std::vector score_members; + rocksdb::Status s; + + do { + s = db->storage()->ZScan(key, cursor, "*", MAX_MEMBERS_NUM, &score_members, &cursor); + if (s.ok() && score_members.size() > 0) { + net::RedisCmdArgsType argv; + std::string send_str; + argv.emplace_back("ZADD"); + argv.emplace_back(key); + + for (const auto &score_member : score_members) { + argv.emplace_back(std::to_string(score_member.score)); + argv.emplace_back(score_member.member); + } + net::SerializeRedisCommand(argv, &send_str); + if (doMigrate(cli, send_str) < 0) { + return -1; + } else { + ++send_num; + } + } + } while (cursor != 0 && s.ok()); + + if (send_num > 0) { + int r; + if ((r = migrateKeyTTl(cli, key, storage::DataType::kZSets, db)) < 0) { + return -1; + } else { + send_num += r; + } + } + + return send_num; +} + +// get list key all values +static int listGetall(const std::string& key, std::vector *values, const std::shared_ptr& db) { + rocksdb::Status s = db->storage()->LRange(key, 0, -1, values); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "List get key: " << key << " value not found "; + return 0; + } else { + LOG(WARNING) << "List get key: " << key << " value error: " << s.ToString(); + return -1; + } + } + return 1; +} + +PikaParseSendThread::PikaParseSendThread(PikaMigrateThread *migrate_thread, const std::shared_ptr& db) + : dest_ip_("none"), + dest_port_(-1), + timeout_ms_(3000), + mgrtkeys_num_(64), + should_exit_(false), + migrate_thread_(migrate_thread), + db_(db) {} + +PikaParseSendThread::~PikaParseSendThread() { + if (is_running()) { + should_exit_ = true; + StopThread(); + } + + if (cli_) { + delete cli_; + cli_ = nullptr; + } +} + +bool PikaParseSendThread::Init(const std::string &ip, int64_t port, int64_t timeout_ms, int64_t mgrtkeys_num) { + dest_ip_ = ip; + dest_port_ = port; + timeout_ms_ = timeout_ms; + mgrtkeys_num_ = static_cast(mgrtkeys_num); + + cli_ = net::NewRedisCli(); + cli_->set_connect_timeout(static_cast(timeout_ms_)); + cli_->set_send_timeout(static_cast(timeout_ms_)); + cli_->set_recv_timeout(static_cast(timeout_ms_)); + LOG(INFO) << "PikaParseSendThread init cli_, dest_ip_: " << dest_ip_ << " ,dest_port_: " << dest_port_; + pstd::Status result = cli_->Connect(dest_ip_, static_cast(dest_port_), g_pika_server->host()); + if (!result.ok()) { + LOG(ERROR) << "PikaParseSendThread::Init failed. Connect server(" << dest_ip_ << ":" << dest_port_ << ") " + << result.ToString(); + return false; + } + + // do auth + if (doAuth(cli_) < 0) { + LOG(WARNING) << "PikaParseSendThread::Init do auth failed !!"; + cli_->Close(); + return false; + } + + return true; +} + +void PikaParseSendThread::ExitThread(void) { should_exit_ = true; } + +int PikaParseSendThread::MigrateOneKey(net::NetCli *cli, const std::string& key, const char key_type, bool async) { + int send_num; + switch (key_type) { + case 'k': + if (0 > (send_num = MigrateKv(cli_, key, db_))) { + return -1; + } + break; + case 'h': + if (0 > (send_num = MigrateHash(cli_, key, db_))) { + return -1; + } + break; + case 'l': + if (0 > (send_num = MigrateList(cli_, key, db_))) { + return -1; + } + break; + case 's': + if (0 > (send_num = MigrateSet(cli_, key, db_))) { + return -1; + } + break; + case 'z': + if (0 > (send_num = MigrateZset(cli_, key, db_))) { + return -1; + } + break; + case 'm': + if (0 > (send_num = MigrateStreams(cli_, key, db_))) { + return -1; + } + break; + default: + return -1; + break; + } + return send_num; +} + +void PikaParseSendThread::DelKeysAndWriteBinlog(std::deque> &send_keys, + const std::shared_ptr& db) { + for (const auto& send_key : send_keys) { + DeleteKey(send_key.second, send_key.first, db_); + WriteDelKeyToBinlog(send_key.second, db_); + } +} + +// write del key to binlog for slave +void WriteDelKeyToBinlog(const std::string& key, const std::shared_ptr& db) { + std::shared_ptr cmd_ptr = g_pika_cmd_table_manager->GetCmd("del"); + std::unique_ptr args = std::make_unique(); + args->emplace_back("DEL"); + args->emplace_back(key); + cmd_ptr->Initial(*args, db->GetDBName()); + + std::shared_ptr sync_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db->GetDBName())); + pstd::Status s = sync_db->ConsensusProposeLog(cmd_ptr); + if (!s.ok()) { + LOG(ERROR) << "write delete key to binlog failed, key: " << key; + } +} + +bool PikaParseSendThread::CheckMigrateRecv(int64_t need_receive_num) { + net::RedisCmdArgsType argv; + for (int64_t i = 0; i < need_receive_num; ++i) { + pstd::Status s; + s = cli_->Recv(&argv); + if (!s.ok()) { + LOG(ERROR) << "PikaParseSendThread::CheckMigrateRecv Recv error: " << s.ToString(); + return false; + } + + // set return ok + // zadd return number + // hset return 0 or 1 + // hmset return ok + // sadd return number + // rpush return length + std::string reply = argv[0]; + int64_t ret; + if (1 == argv.size() && + (kInnerReplOk == pstd::StringToLower(reply) || pstd::string2int(reply.data(), reply.size(), &ret))) { + continue; + } else { + LOG(ERROR) << "PikaParseSendThread::CheckMigrateRecv reply error: " << reply; + return false; + } + } + return true; +} + +void *PikaParseSendThread::ThreadMain() { + while (!should_exit_) { + std::deque> send_keys; + { + std::unique_lock lq(migrate_thread_->mgrtkeys_queue_mutex_); + while (!should_exit_ && 0 >= migrate_thread_->mgrtkeys_queue_.size()) { + migrate_thread_->mgrtkeys_cond_.wait(lq); + } + + if (should_exit_) { + LOG(INFO) << "PikaParseSendThread::ThreadMain :" << pthread_self() << " exit !!!"; + return nullptr; + } + + migrate_thread_->IncWorkingThreadNum(); + for (int32_t i = 0; i < mgrtkeys_num_; ++i) { + if (migrate_thread_->mgrtkeys_queue_.empty()) { + break; + } + send_keys.emplace_back(migrate_thread_->mgrtkeys_queue_.front()); + migrate_thread_->mgrtkeys_queue_.pop_front(); + } + } + + int64_t send_num = 0; + int64_t need_receive_num = 0; + int32_t migrate_keys_num = 0; + for (const auto& send_key : send_keys) { + if (0 > (send_num = MigrateOneKey(cli_, send_key.second, send_key.first, false))) { + LOG(WARNING) << "PikaParseSendThread::ThreadMain MigrateOneKey: " << send_key.second << " failed !!!"; + migrate_thread_->OnTaskFailed(); + migrate_thread_->DecWorkingThreadNum(); + return nullptr; + } else { + need_receive_num += send_num; + ++migrate_keys_num; + } + } + + // check response + if (!CheckMigrateRecv(need_receive_num)) { + LOG(INFO) << "PikaMigrateThread::ThreadMain CheckMigrateRecv failed !!!"; + migrate_thread_->OnTaskFailed(); + migrate_thread_->DecWorkingThreadNum(); + return nullptr; + } else { + DelKeysAndWriteBinlog(send_keys, db_); + } + + migrate_thread_->AddResponseNum(migrate_keys_num); + migrate_thread_->DecWorkingThreadNum(); + } + + return nullptr; +} + +PikaMigrateThread::PikaMigrateThread() + : net::Thread(), + dest_ip_("none"), + dest_port_(-1), + timeout_ms_(3000), + keys_num_(-1), + slot_id_(-1), + is_migrating_(false), + should_exit_(false), + is_task_success_(true), + send_num_(0), + response_num_(0), + moved_num_(0), + + workers_num_(8), + working_thread_num_(0) + {} + +PikaMigrateThread::~PikaMigrateThread() { + LOG(INFO) << "PikaMigrateThread::~PikaMigrateThread"; + + if (is_running()) { + should_exit_ = true; + NotifyRequestMigrate(); + workers_cond_.notify_all(); + StopThread(); + } +} + +bool PikaMigrateThread::ReqMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slot_id, + int64_t keys_num, const std::shared_ptr& db) { + if (migrator_mutex_.try_lock()) { + if (is_migrating_) { + if (dest_ip_ != ip || dest_port_ != port || slot_id != slot_id_) { + LOG(INFO) << "PikaMigrateThread::ReqMigrate current: " << dest_ip_ << ":" << dest_port_ << " slot[" << slot_id_ + << "] request: " << ip << ":" << port << "db[" << db << "]";; + migrator_mutex_.unlock(); + return false; + } + db_ = db; + timeout_ms_ = time_out; + keys_num_ = keys_num; + NotifyRequestMigrate(); + migrator_mutex_.unlock(); + return true; + } else { + dest_ip_ = ip; + dest_port_ = port; + timeout_ms_ = time_out; + keys_num_ = keys_num; + slot_id_ = slot_id; + should_exit_ = false; + db_ = db; + + ResetThread(); + int ret = StartThread(); + if (0 != ret) { + LOG(ERROR) << "PikaMigrateThread::ReqMigrateBatch StartThread failed. " + << " ret=" << ret; + is_migrating_ = false; + StopThread(); + } else { + LOG(INFO) << "PikaMigrateThread::ReqMigrateBatch DB" << db; + is_migrating_ = true; + NotifyRequestMigrate(); + } + migrator_mutex_.unlock(); + return true; + } + } + return false; +} + +int PikaMigrateThread::ReqMigrateOne(const std::string &key, const std::shared_ptr &db) { + std::unique_lock lm(migrator_mutex_); + + int slot_id = GetSlotID(g_pika_conf->default_slot_num(), key); + storage::DataType type; + char key_type; + rocksdb::Status s = db->storage()->GetType(key, type); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " not found"; + return 0; + } else { + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " error: " << strerror(errno); + return -1; + } + } + key_type = storage::DataTypeToTag(type); + if (type == storage::DataType::kNones) { + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne key: " << key << " type: " << static_cast(type) + << " is illegal"; + return 0; + } + + if (slot_id != slot_id_) { + LOG(WARNING) << "PikaMigrateThread::ReqMigrateOne Slot : " << slot_id << " is not the migrating slot:" << slot_id_; + return -1; + } + + // if the migrate thread exit, start it + if (!is_migrating_) { + ResetThread(); + int ret = StartThread(); + if (0 != ret) { + LOG(ERROR) << "PikaMigrateThread::ReqMigrateOne StartThread failed. " + << " ret=" << ret; + is_migrating_ = false; + StopThread(); + } else { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne StartThread"; + is_migrating_ = true; + usleep(100); + } + } + // check the key is migrating + std::pair kpair = std::make_pair(key_type, key); + if (IsMigrating(kpair)) { + LOG(INFO) << "PikaMigrateThread::ReqMigrateOne key: " << key << " is migrating ! "; + return 1; + } else { + std::unique_lock lo(mgrtone_queue_mutex_); + mgrtone_queue_.emplace_back(kpair); + NotifyRequestMigrate(); + } + + return 1; +} + +void PikaMigrateThread::GetMigrateStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, + int64_t *remained) { + std::unique_lock lm(migrator_mutex_); + // todo for sure + if (!is_migrating_) { + *remained = -1; + return; + } + + *ip = dest_ip_; + *port = dest_port_; + *migrating = is_migrating_; + *moved = moved_num_; + *slot = slot_id_; + std::unique_lock lq(mgrtkeys_queue_mutex_); + int64_t migrating_keys_num = static_cast(mgrtkeys_queue_.size()); + std::string slotKey = GetSlotKey(static_cast(slot_id_)); + int32_t slot_size = 0; + rocksdb::Status s = db_->storage()->SCard(slotKey, &slot_size); + if (s.ok()) { + *remained = slot_size + migrating_keys_num; + } else { + *remained = migrating_keys_num; + } +} + +void PikaMigrateThread::CancelMigrate(void) { + LOG(INFO) << "PikaMigrateThread::CancelMigrate"; + + if (is_running()) { + should_exit_ = true; + NotifyRequestMigrate(); + workers_cond_.notify_one(); + StopThread(); + } +} + +void PikaMigrateThread::IncWorkingThreadNum(void) { ++working_thread_num_; } + +void PikaMigrateThread::DecWorkingThreadNum(void) { + std::unique_lock lw(workers_mutex_); + --working_thread_num_; + workers_cond_.notify_one(); +} + +void PikaMigrateThread::OnTaskFailed() { + LOG(ERROR) << "PikaMigrateThread::OnTaskFailed !!!"; + is_task_success_ = false; +} + +void PikaMigrateThread::AddResponseNum(int32_t response_num) { response_num_ += response_num; } + +void PikaMigrateThread::ResetThread(void) { + if (0 != thread_id()) { + JoinThread(); + } +} + +void PikaMigrateThread::DestroyThread(bool is_self_exit) { + std::unique_lock lm(migrator_mutex_); + LOG(INFO) << "PikaMigrateThread::DestroyThread"; + + // Destroy work threads + DestroyParseSendThreads(); + + if (is_self_exit) { + set_is_running(false); + } + + { + std::unique_lock lq(mgrtkeys_queue_mutex_); + std::unique_lock lm(mgrtkeys_map_mutex_); + std::deque>().swap(mgrtkeys_queue_); + std::map, std::string>().swap(mgrtkeys_map_); + } + + cursor_ = 0; + is_migrating_ = false; + is_task_success_ = true; + moved_num_ = 0; +} + +void PikaMigrateThread::NotifyRequestMigrate(void) { + std::unique_lock lr(request_migrate_mutex_); + request_migrate_ = true; + request_migrate_cond_.notify_one(); +} + +bool PikaMigrateThread::IsMigrating(std::pair &kpair) { + std::unique_lock lo(mgrtone_queue_mutex_); + std::unique_lock lm(mgrtkeys_map_mutex_); + + for (const auto& iter : mgrtone_queue_) { + if (iter.first == kpair.first && iter.second == kpair.second) { + return true; + } + } + + auto iter = mgrtkeys_map_.find(kpair); + if (iter != mgrtkeys_map_.end()) { + return true; + } + + return false; +} + +void PikaMigrateThread::ReadSlotKeys(const std::string &slotKey, int64_t need_read_num, int64_t &real_read_num, + int32_t *finish) { + real_read_num = 0; + std::string key; + char key_type; + int32_t is_member = 0; + std::vector members; + + rocksdb::Status s = db_->storage()->SScan(slotKey, cursor_, "*", need_read_num, &members, &cursor_); + if (s.ok() && 0 < members.size()) { + for (const auto &member : members) { + db_->storage()->SIsmember(slotKey, member, &is_member); + if (is_member) { + key = member; + key_type = key.at(0); + key.erase(key.begin()); + std::pair kpair = std::make_pair(key_type, key); + if (mgrtkeys_map_.find(kpair) == mgrtkeys_map_.end()) { + mgrtkeys_queue_.emplace_back(kpair); + mgrtkeys_map_[kpair] = INVALID_STR; + ++real_read_num; + } + } else { + LOG(INFO) << "PikaMigrateThread::ReadSlotKeys key " << member << " not found in" << slotKey; + } + } + } + + *finish = (0 == cursor_) ? 1 : 0; +} + +bool PikaMigrateThread::CreateParseSendThreads(int32_t dispatch_num) { + workers_num_ = static_cast(g_pika_conf->slotmigrate_thread_num()); + for (int32_t i = 0; i < workers_num_; ++i) { + auto worker = new PikaParseSendThread(this, db_); + if (!worker->Init(dest_ip_, dest_port_, timeout_ms_, dispatch_num)) { + delete worker; + DestroyParseSendThreads(); + return false; + } else { + int ret = worker->StartThread(); + if (0 != ret) { + LOG(INFO) << "PikaMigrateThread::CreateParseSendThreads start work thread failed ret=" << ret; + delete worker; + DestroyParseSendThreads(); + return false; + } else { + workers_.emplace_back(worker); + } + } + } + return true; +} + +void PikaMigrateThread::DestroyParseSendThreads(void) { + if (!workers_.empty()) { + for (auto worker : workers_) { + worker->ExitThread(); + } + + { + std::unique_lock lm(mgrtkeys_queue_mutex_); + mgrtkeys_cond_.notify_all(); + } + + for (auto worker : workers_) { + delete worker; + } + workers_.clear(); + } +} + +void *PikaMigrateThread::ThreadMain() { + LOG(INFO) << "PikaMigrateThread::ThreadMain Start"; + + // Create parse_send_threads + auto dispatch_num = static_cast(g_pika_conf->thread_migrate_keys_num()); + if (!CreateParseSendThreads(dispatch_num)) { + LOG(INFO) << "PikaMigrateThread::ThreadMain CreateParseSendThreads failed !!!"; + DestroyThread(true); + return nullptr; + } + + std::string slotKey = GetSlotKey(static_cast(slot_id_)); + int32_t slot_size = 0; + db_->storage()->SCard(slotKey, &slot_size); + + while (!should_exit_) { + // Waiting migrate task + { + std::unique_lock lm(request_migrate_mutex_); + while (!request_migrate_) { + request_migrate_cond_.wait(lm); + } + request_migrate_ = false; + + if (should_exit_) { + LOG(INFO) << "PikaMigrateThread::ThreadMain :" << pthread_self() << " exit1 !!!"; + DestroyThread(false); + return nullptr; + } + } + + // read keys form slot and push to mgrtkeys_queue_ + int64_t round_remained_keys = keys_num_; + int64_t real_read_num = 0; + int32_t is_finish = 0; + send_num_ = 0; + response_num_ = 0; + do { + std::unique_lock lq(mgrtkeys_queue_mutex_); + std::unique_lock lo(mgrtone_queue_mutex_); + std::unique_lock lm(mgrtkeys_map_mutex_); + + // first check whether need migrate one key + if (!mgrtone_queue_.empty()) { + while (!mgrtone_queue_.empty()) { + mgrtkeys_queue_.push_front(mgrtone_queue_.front()); + mgrtkeys_map_[mgrtone_queue_.front()] = INVALID_STR; + mgrtone_queue_.pop_front(); + ++send_num_; + } + } else { + int64_t need_read_num = (0 < round_remained_keys - dispatch_num) ? dispatch_num : round_remained_keys; + ReadSlotKeys(slotKey, need_read_num, real_read_num, &is_finish); + round_remained_keys -= need_read_num; + send_num_ += static_cast(real_read_num); + } + mgrtkeys_cond_.notify_all(); + + } while (0 < round_remained_keys && !is_finish); + + LOG(INFO) << "PikaMigrateThread:: wait ParseSenderThread finish"; + // wait all ParseSenderThread finish + { + std::unique_lock lw(workers_mutex_); + while (!should_exit_ && is_task_success_ && send_num_ != response_num_) { + if (workers_cond_.wait_for(lw, std::chrono::seconds(60)) == std::cv_status::timeout) { + break; + } + } + } + LOG(INFO) << "PikaMigrateThread::ThreadMain send_num:" << send_num_ << " response_num:" << response_num_; + + if (should_exit_) { + LOG(INFO) << "PikaMigrateThread::ThreadMain :" << pthread_self() << " exit2 !!!"; + DestroyThread(false); + return nullptr; + } + + // check one round migrate task success + if (!is_task_success_) { + LOG(ERROR) << "PikaMigrateThread::ThreadMain one round migrate task failed !!!"; + DestroyThread(true); + return nullptr; + } else { + moved_num_ += response_num_; + + std::unique_lock lm(mgrtkeys_map_mutex_); + std::map, std::string>().swap(mgrtkeys_map_); + } + + // check slot migrate finish + int32_t slot_remained_keys = 0; + db_->storage()->SCard(slotKey, &slot_remained_keys); + if (0 == slot_remained_keys) { + LOG(INFO) << "PikaMigrateThread::ThreadMain slot_size:" << slot_size << " moved_num:" << moved_num_; + if (slot_size != moved_num_) { + LOG(ERROR) << "PikaMigrateThread::ThreadMain moved_num != slot_size !!!"; + } + DestroyThread(true); + return nullptr; + } + } + + return nullptr; +} + +/* EOF */ diff --git a/tools/pika_migrate/src/pika_monotonic_time.cc b/tools/pika_migrate/src/pika_monotonic_time.cc new file mode 100644 index 0000000000..1c3f6e820d --- /dev/null +++ b/tools/pika_migrate/src/pika_monotonic_time.cc @@ -0,0 +1,63 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#if defined(__APPLE__) // Mac +#include + +#include "include/pika_monotonic_time.h" + +monotime getMonotonicUs() { + static mach_timebase_info_data_t timebase; + if (timebase.denom == 0) { + mach_timebase_info(&timebase); + } + uint64_t nanos = mach_absolute_time() * timebase.numer / timebase.denom; + return nanos / 1000; +} + +#elif defined(__FreeBSD__) // FreeBSD +#include + +#include "include/pika_monotonic_time.h" + +monotime getMonotonicUs() { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (ts.tv_sec * 1000000) + (ts.tv_nsec / 1000); +} + +#elif defined(__linux__) // Linux + +#ifdef __x86_64__ // x86_64 + +#include + +#include "include/pika_monotonic_time.h" + +monotime getMonotonicUs() { + timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return static_cast(ts.tv_sec) * 1000000 + static_cast(ts.tv_nsec) / 1000; +} + +#elif __arm__ || __aarch64__ // ARM + +#include + +#include "include/pika_monotonic_time.h" + +uint64_t getMonotonicUs() { + timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + static_cast(tv.tv_usec); +} + +#else +#error "Unsupported architecture for Linux" +#endif // __x86_64__, __arm__ + +#else +#error "Unsupported platform" +#endif // __APPLE__, __linux__ \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_pubsub.cc b/tools/pika_migrate/src/pika_pubsub.cc new file mode 100644 index 0000000000..935015ae7c --- /dev/null +++ b/tools/pika_migrate/src/pika_pubsub.cc @@ -0,0 +1,242 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_pubsub.h" + +#include "include/pika_server.h" + +extern PikaServer* g_pika_server; + +static std::string ConstructPubSubResp(const std::string& cmd, const std::vector>& result) { + std::stringstream resp; + if (result.empty()) { + resp << "*3\r\n" + << "$" << cmd.length() << "\r\n" + << cmd << "\r\n" + << "$" << -1 << "\r\n" + << ":" << 0 << "\r\n"; + } + for (const auto & it : result) { + resp << "*3\r\n" + << "$" << cmd.length() << "\r\n" + << cmd << "\r\n" + << "$" << it.first.length() << "\r\n" + << it.first << "\r\n" + << ":" << it.second << "\r\n"; + } + return resp.str(); +} + +void PublishCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePublish); + return; + } + channel_ = argv_[1]; + msg_ = argv_[2]; +} + +void PublishCmd::Do() { + int receivers = g_pika_server->Publish(channel_, msg_); + res_.AppendInteger(receivers); +} + +void SubscribeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSubscribe); + return; + } + for (size_t i = 1; i < argv_.size(); i++) { + channels_.push_back(argv_[i]); + } +} + +void SubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNameSubscribe); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); + if (!cli_conn->IsPubSub()) { + cli_conn->server_thread()->MoveConnOut(conn->fd()); + cli_conn->SetIsPubSub(true); + cli_conn->SetHandleType(net::HandleType::kSynchronous); + cli_conn->SetWriteCompleteCallback([cli_conn]() { + if (!cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(true); + g_pika_server->EnablePublish(cli_conn->fd()); + }); + } + std::vector> result; + g_pika_server->Subscribe(conn, channels_, name_ == kCmdNamePSubscribe, &result); + return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); +} + +void UnSubscribeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameUnSubscribe); + return; + } + for (size_t i = 1; i < argv_.size(); i++) { + channels_.push_back(argv_[i]); + } +} + +void UnSubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNameUnSubscribe); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); + + std::vector> result; + int subscribed = g_pika_server->UnSubscribe(conn, channels_, name_ == kCmdNamePUnSubscribe, &result); + if (subscribed == 0 && cli_conn->IsPubSub()) { + /* + * if the number of client subscribed is zero, + * the client will exit the Pub/Sub state + */ + cli_conn->SetIsPubSub(false); + cli_conn->SetWriteCompleteCallback([cli_conn, conn]() { + if (cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(false); + cli_conn->SetHandleType(net::HandleType::kAsynchronous); + cli_conn->server_thread()->MoveConnIn(conn, net::NotifyType::kNotiWait); + }); + } + return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); +} + +void PSubscribeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePSubscribe); + return; + } + for (size_t i = 1; i < argv_.size(); i++) { + channels_.push_back(argv_[i]); + } +} + +void PSubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNamePSubscribe); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); + if (!cli_conn->IsPubSub()) { + cli_conn->server_thread()->MoveConnOut(conn->fd()); + cli_conn->SetIsPubSub(true); + cli_conn->SetHandleType(net::HandleType::kSynchronous); + cli_conn->SetWriteCompleteCallback([cli_conn]() { + if (!cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(true); + g_pika_server->EnablePublish(cli_conn->fd()); + }); + } + std::vector> result; + g_pika_server->Subscribe(conn, channels_, name_ == kCmdNamePSubscribe, &result); + return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); +} + +void PUnSubscribeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePUnSubscribe); + return; + } + for (size_t i = 1; i < argv_.size(); i++) { + channels_.push_back(argv_[i]); + } + +} + +void PUnSubscribeCmd::Do() { + std::shared_ptr conn = GetConn(); + if (!conn) { + res_.SetRes(CmdRes::kErrOther, kCmdNamePUnSubscribe); + LOG(WARNING) << name_ << " weak ptr is empty"; + return; + } + std::shared_ptr cli_conn = std::dynamic_pointer_cast(conn); + + std::vector> result; + int subscribed = g_pika_server->UnSubscribe(conn, channels_, name_ == kCmdNamePUnSubscribe, &result); + if (subscribed == 0 && cli_conn->IsPubSub()) { + /* + * if the number of client subscribed is zero, + * the client will exit the Pub/Sub state + */ + cli_conn->SetIsPubSub(false); + cli_conn->SetWriteCompleteCallback([cli_conn, conn]() { + if (cli_conn->IsPubSub()) { + return; + } + cli_conn->set_is_writable(false); + cli_conn->SetHandleType(net::HandleType::kAsynchronous); + cli_conn->server_thread()->MoveConnIn(conn, net::NotifyType::kNotiWait); + }); + } + return res_.SetRes(CmdRes::kNone, ConstructPubSubResp(name_, result)); +} + +void PubSubCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNamePubSub); + return; + } + subcommand_ = argv_[1]; + if (strcasecmp(subcommand_.data(), "channels") != 0 && strcasecmp(subcommand_.data(), "numsub") != 0 && + strcasecmp(subcommand_.data(), "numpat") != 0) { + res_.SetRes(CmdRes::kErrOther, "Unknown PUBSUB subcommand or wrong number of arguments for '" + subcommand_ + "'"); + } + for (size_t i = 2; i < argv_.size(); i++) { + arguments_.push_back(argv_[i]); + } +} + +void PubSubCmd::Do() { + if (strcasecmp(subcommand_.data(), "channels") == 0) { + std::string pattern; + std::vector result; + if (arguments_.size() == 1) { + pattern = arguments_[0]; + } else if (arguments_.size() > 1) { + res_.SetRes(CmdRes::kErrOther, + "Unknown PUBSUB subcommand or wrong number of arguments for '" + subcommand_ + "'"); + return; + } + g_pika_server->PubSubChannels(pattern, &result); + + res_.AppendArrayLenUint64(result.size()); + for (auto &it : result) { + res_.AppendStringLenUint64(it.length()); + res_.AppendContent(it); + } + } else if (strcasecmp(subcommand_.data(), "numsub") == 0) { + std::vector> result; + g_pika_server->PubSubNumSub(arguments_, &result); + res_.AppendArrayLenUint64(result.size() * 2); + for (auto &it : result) { + res_.AppendStringLenUint64(it.first.length()); + res_.AppendContent(it.first); + res_.AppendInteger(it.second); + } + return; + } else if (strcasecmp(subcommand_.data(), "numpat") == 0) { + int subscribed = g_pika_server->PubSubNumPat(); + res_.AppendInteger(subscribed); + } +} diff --git a/tools/pika_migrate/src/pika_repl_bgworker.cc b/tools/pika_migrate/src/pika_repl_bgworker.cc new file mode 100644 index 0000000000..1e12ffdf0a --- /dev/null +++ b/tools/pika_migrate/src/pika_repl_bgworker.cc @@ -0,0 +1,274 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/pika_repl_bgworker.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "pstd/include/pstd_defer.h" +#include "src/pstd/include/scope_record_lock.h" +#include "include/pika_conf.h" + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +PikaReplBgWorker::PikaReplBgWorker(int queue_size) : bg_thread_(queue_size) { + bg_thread_.set_thread_name("ReplBgWorker"); + net::RedisParserSettings settings; + settings.DealMessage = &(PikaReplBgWorker::HandleWriteBinlog); + redis_parser_.RedisParserInit(REDIS_PARSER_REQUEST, settings); + redis_parser_.data = this; + db_name_ = g_pika_conf->default_db(); +} + +int PikaReplBgWorker::StartThread() { return bg_thread_.StartThread(); } + +int PikaReplBgWorker::StopThread() { return bg_thread_.StopThread(); } + +void PikaReplBgWorker::Schedule(net::TaskFunc func, void* arg) { bg_thread_.Schedule(func, arg); } + +void PikaReplBgWorker::Schedule(net::TaskFunc func, void* arg, std::function& call_back) { + bg_thread_.Schedule(func, arg, call_back); +} + +void PikaReplBgWorker::ParseBinlogOffset(const InnerMessage::BinlogOffset& pb_offset, LogOffset* offset) { + offset->b_offset.filenum = pb_offset.filenum(); + offset->b_offset.offset = pb_offset.offset(); + offset->l_offset.term = pb_offset.term(); + offset->l_offset.index = pb_offset.index(); +} + +void PikaReplBgWorker::HandleBGWorkerWriteBinlog(void* arg) { + auto task_arg = static_cast(arg); + const std::shared_ptr res = task_arg->res; + std::shared_ptr conn = task_arg->conn; + auto index = static_cast*>(task_arg->res_private_data); + PikaReplBgWorker* worker = task_arg->worker; + worker->ip_port_ = conn->ip_port(); + + DEFER { + delete index; + delete task_arg; + }; + + std::string db_name; + + LogOffset pb_begin; + LogOffset pb_end; + bool only_keepalive = false; + + // find the first not keepalive binlogsync + for (size_t i = 0; i < index->size(); ++i) { + const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync((*index)[i]); + if (i == 0) { + db_name = binlog_res.slot().db_name(); + } + if (!binlog_res.binlog().empty()) { + ParseBinlogOffset(binlog_res.binlog_offset(), &pb_begin); + break; + } + } + + // find the last not keepalive binlogsync + for (int i = static_cast(index->size() - 1); i >= 0; i--) { + const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync((*index)[i]); + if (!binlog_res.binlog().empty()) { + ParseBinlogOffset(binlog_res.binlog_offset(), &pb_end); + break; + } + } + + if (pb_begin == LogOffset()) { + only_keepalive = true; + } + + LogOffset ack_start; + if (only_keepalive) { + ack_start = LogOffset(); + } else { + ack_start = pb_begin; + } + + // because DispatchBinlogRes() have been order them. + worker->db_name_ = db_name; + + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { + LOG(WARNING) << "DB " << db_name << " Not Found"; + return; + } + + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB " << db_name << " Not Found"; + return; + } + + for (int i : *index) { + const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync(i); + // if pika are not current a slave or DB not in + // BinlogSync state, we drop remain write binlog task + if (((g_pika_server->role() & PIKA_ROLE_SLAVE) == 0) || + ((slave_db->State() != ReplState::kConnected) && (slave_db->State() != ReplState::kWaitDBSync))) { + return; + } + + if (slave_db->MasterSessionId() != binlog_res.session_id()) { + LOG(WARNING) << "Check SessionId Mismatch: " << slave_db->MasterIp() << ":" + << slave_db->MasterPort() << ", " << slave_db->SyncDBInfo().ToString() + << " expected_session: " << binlog_res.session_id() + << ", actual_session:" << slave_db->MasterSessionId(); + LOG(WARNING) << "Check Session failed " << binlog_res.slot().db_name(); + slave_db->SetReplState(ReplState::kTryConnect); + return; + } + + // empty binlog treated as keepalive packet + if (binlog_res.binlog().empty()) { + continue; + } + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog_res.binlog(), &worker->binlog_item_)) { + LOG(WARNING) << "Binlog item decode failed"; + slave_db->SetReplState(ReplState::kTryConnect); + return; + } + const char* redis_parser_start = binlog_res.binlog().data() + BINLOG_ENCODE_LEN; + int redis_parser_len = static_cast(binlog_res.binlog().size()) - BINLOG_ENCODE_LEN; + int processed_len = 0; + net::RedisParserStatus ret = + worker->redis_parser_.ProcessInputBuffer(redis_parser_start, redis_parser_len, &processed_len); + if (ret != net::kRedisParserDone) { + LOG(WARNING) << "Redis parser failed"; + slave_db->SetReplState(ReplState::kTryConnect); + return; + } + } + + LogOffset ack_end; + if (only_keepalive) { + ack_end = LogOffset(); + } else { + LogOffset productor_status; + // Reply Ack to master immediately + std::shared_ptr logger = db->Logger(); + logger->GetProducerStatus(&productor_status.b_offset.filenum, &productor_status.b_offset.offset, + &productor_status.l_offset.term, &productor_status.l_offset.index); + ack_end = productor_status; + ack_end.l_offset.term = pb_end.l_offset.term; + } + + g_pika_rm->SendBinlogSyncAckRequest(db_name, ack_start, ack_end); +} + +int PikaReplBgWorker::HandleWriteBinlog(net::RedisParser* parser, const net::RedisCmdArgsType& argv) { + std::string opt = argv[0]; + auto worker = static_cast(parser->data); + // Monitor related + std::string monitor_message; + if (g_pika_server->HasMonitorClients()) { + std::string db_name = worker->db_name_.substr(2); + std::string monitor_message = + std::to_string(static_cast(pstd::NowMicros()) / 1000000) + " [" + db_name + " " + worker->ip_port_ + "]"; + for (const auto& item : argv) { + monitor_message += " " + pstd::ToRead(item); + } + g_pika_server->AddMonitorMessage(monitor_message); + } + + std::shared_ptr c_ptr = g_pika_cmd_table_manager->GetCmd(pstd::StringToLower(opt)); + if (!c_ptr) { + LOG(WARNING) << "Command " << opt << " not in the command db"; + return -1; + } + // Initial + c_ptr->Initial(argv, worker->db_name_); + if (!c_ptr->res().ok()) { + LOG(WARNING) << "Fail to initial command from binlog: " << opt; + return -1; + } + + g_pika_server->UpdateQueryNumAndExecCountDB(worker->db_name_, opt, c_ptr->is_write()); + + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(worker->db_name_)); + if (!db) { + LOG(WARNING) << worker->db_name_ << "Not found."; + } + + db->ConsensusProcessLeaderLog(c_ptr, worker->binlog_item_); + return 0; +} + +void PikaReplBgWorker::HandleBGWorkerWriteDB(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr c_ptr = task_arg->cmd_ptr; + WriteDBInSyncWay(c_ptr); +} + +void PikaReplBgWorker::WriteDBInSyncWay(const std::shared_ptr& c_ptr) { + const PikaCmdArgsType& argv = c_ptr->argv(); + + uint64_t start_us = 0; + if (g_pika_conf->slowlog_slower_than() >= 0) { + start_us = pstd::NowMicros(); + } + // Add read lock for no suspend command + pstd::lock::MultiRecordLock record_lock(c_ptr->GetDB()->LockMgr()); + record_lock.Lock(c_ptr->current_key()); + if (!c_ptr->IsSuspend()) { + c_ptr->GetDB()->DBLockShared(); + } + if (c_ptr->IsNeedCacheDo() + && PIKA_CACHE_NONE != g_pika_conf->cache_mode() + && c_ptr->GetDB()->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { + if (c_ptr->is_write()) { + c_ptr->DoThroughDB(); + if (c_ptr->IsNeedUpdateCache()) { + c_ptr->DoUpdateCache(); + } + } else { + LOG(WARNING) << "It is impossbile to reach here"; + } + } else { + c_ptr->Do(); + } + if (!c_ptr->IsSuspend()) { + c_ptr->GetDB()->DBUnlockShared(); + } + + if (c_ptr->res().ok() + && c_ptr->is_write() + && c_ptr->name() != kCmdNameFlushdb + && c_ptr->name() != kCmdNameFlushall + && c_ptr->name() != kCmdNameExec) { + auto table_keys = c_ptr->current_key(); + for (auto& key : table_keys) { + key = c_ptr->db_name().append(key); + } + auto dispatcher = dynamic_cast(g_pika_server->pika_dispatch_thread()->server_thread()); + auto involved_conns = dispatcher->GetInvolvedTxn(table_keys); + for (auto& conn : involved_conns) { + auto c = std::dynamic_pointer_cast(conn); + c->SetTxnWatchFailState(true); + } + } + + record_lock.Unlock(c_ptr->current_key()); + if (g_pika_conf->slowlog_slower_than() >= 0) { + auto start_time = static_cast(start_us / 1000000); + auto duration = static_cast(pstd::NowMicros() - start_us); + if (duration > g_pika_conf->slowlog_slower_than()) { + g_pika_server->SlowlogPushEntry(argv, start_time, duration); + if (g_pika_conf->slowlog_write_errorlog()) { + LOG(INFO) << "command: " << argv[0] << ", start_time(s): " << start_time << ", duration(us): " << duration; + } + } + } +} diff --git a/tools/pika_migrate/src/pika_repl_client.cc b/tools/pika_migrate/src/pika_repl_client.cc new file mode 100644 index 0000000000..117b5adb8c --- /dev/null +++ b/tools/pika_migrate/src/pika_repl_client.cc @@ -0,0 +1,332 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_repl_client.h" + +#include +#include +#include + +#include + +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" +#include "pstd/include/env.h" +#include "pstd/include/pstd_coding.h" +#include "pstd/include/pstd_string.h" + +#include "include/pika_rm.h" +#include "include/pika_server.h" + +using pstd::Status; +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +PikaReplClient::PikaReplClient(int cron_interval, int keepalive_timeout) { + for (int i = 0; i < MAX_DB_NUM; i++) { + async_write_db_task_counts_[i].store(0, std::memory_order::memory_order_seq_cst); + } + client_thread_ = std::make_unique(cron_interval, keepalive_timeout); + client_thread_->set_thread_name("PikaReplClient"); + for (int i = 0; i < g_pika_conf->sync_binlog_thread_num(); i++) { + auto new_binlog_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string binlog_worker_name = "ReplBinlogWorker" + std::to_string(i); + new_binlog_worker->SetThreadName(binlog_worker_name); + write_binlog_workers_.emplace_back(std::move(new_binlog_worker)); + } + for (int i = 0; i < g_pika_conf->sync_thread_num(); ++i) { + auto new_db_worker = std::make_unique(PIKA_SYNC_BUFFER_SIZE); + std::string db_worker_name = "ReplWriteDBWorker" + std::to_string(i); + new_db_worker->SetThreadName(db_worker_name); + write_db_workers_.emplace_back(std::move(new_db_worker)); + } +} + +PikaReplClient::~PikaReplClient() { + client_thread_->StopThread(); + LOG(INFO) << "PikaReplClient exit!!!"; +} + +int PikaReplClient::Start() { + int res = client_thread_->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start ReplClient ClientThread Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + for (auto & binlog_worker : write_binlog_workers_) { + res = binlog_worker->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start Pika Repl Write Binlog Worker Thread Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } + for (auto & db_worker : write_db_workers_) { + res = db_worker->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start Pika Repl Write DB Worker Thread Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } + return res; +} + +int PikaReplClient::Stop() { + client_thread_->StopThread(); + for (auto & binlog_worker : write_binlog_workers_) { + binlog_worker->StopThread(); + } + + // write DB task is async task, we must wait all writeDB task done and then to exit + // or some data will be loss + bool all_write_db_task_done = true; + do { + for (auto &db_worker: write_db_workers_) { + if (db_worker->TaskQueueSize() != 0) { + all_write_db_task_done = false; + std::this_thread::sleep_for(std::chrono::microseconds(300)); + break; + } else { + all_write_db_task_done = true; + } + } + //if there are unfinished async write db task, just continue to wait + } while (!all_write_db_task_done); + + for (auto &db_worker: write_db_workers_) { + db_worker->StopThread(); + } + return 0; +} + +void PikaReplClient::Schedule(net::TaskFunc func, void* arg) { + write_binlog_workers_[next_avail_]->Schedule(func, arg); + UpdateNextAvail(); +} + +void PikaReplClient::ScheduleByDBName(net::TaskFunc func, void* arg, const std::string& db_name) { + size_t index = GetBinlogWorkerIndexByDBName(db_name); + write_binlog_workers_[index]->Schedule(func, arg); +}; + +void PikaReplClient::ScheduleWriteBinlogTask(const std::string& db_name, + const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data) { + size_t index = GetBinlogWorkerIndexByDBName(db_name); + auto task_arg = new ReplClientWriteBinlogTaskArg(res, conn, res_private_data, write_binlog_workers_[index].get()); + write_binlog_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteBinlog, static_cast(task_arg)); +} + +void PikaReplClient::ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name) { + const PikaCmdArgsType& argv = cmd_ptr->argv(); + std::string dispatch_key = argv.size() >= 2 ? argv[1] : argv[0]; + size_t index = GetHashIndexByKey(dispatch_key); + auto task_arg = new ReplClientWriteDBTaskArg(cmd_ptr); + + IncrAsyncWriteDBTaskCount(db_name, 1); + std::function task_finish_call_back = [this, db_name]() { this->DecrAsyncWriteDBTaskCount(db_name, 1); }; + + write_db_workers_[index]->Schedule(&PikaReplBgWorker::HandleBGWorkerWriteDB, static_cast(task_arg), + task_finish_call_back); +} + +size_t PikaReplClient::GetBinlogWorkerIndexByDBName(const std::string &db_name) { + char db_num_c = db_name.back(); + int32_t db_num = db_num_c - '0'; + //Valid range of db_num is [0, MAX_DB_NUM) + if (db_num < 0 || db_num >= MAX_DB_NUM) { + LOG(ERROR) + << "Corruption in consuming binlog: the last char of the db_name(extracted from binlog) is not a valid db num, the extracted db_num is " + << db_num_c << " while write_binlog_workers.size() is " << write_binlog_workers_.size(); + if (db_num < 0) { assert(false && "db_num invalid, check if the db_name in the request is valid, also check the ERROR Log of Pika."); } + } + return db_num % write_binlog_workers_.size(); +} + +size_t PikaReplClient::GetHashIndexByKey(const std::string& key) { + size_t hash_base = write_db_workers_.size(); + return (str_hash(key) % hash_base); +} + +Status PikaReplClient::Write(const std::string& ip, const int port, const std::string& msg) { + return client_thread_->Write(ip, port, msg); +} + +Status PikaReplClient::Close(const std::string& ip, const int port) { return client_thread_->Close(ip, port); } + +Status PikaReplClient::SendMetaSync() { + std::string local_ip; + std::unique_ptr cli (net::NewRedisCli()); + cli->set_connect_timeout(1500); + if ((cli->Connect(g_pika_server->master_ip(), g_pika_server->master_port(), "")).ok()) { + struct sockaddr_in laddr; + socklen_t llen = sizeof(laddr); + getsockname(cli->fd(), reinterpret_cast(&laddr), &llen); + std::string tmp_local_ip(inet_ntoa(laddr.sin_addr)); + local_ip = tmp_local_ip; + cli->Close(); + } else { + LOG(WARNING) << "Failed to connect master, Master (" << g_pika_server->master_ip() << ":" + << g_pika_server->master_port() << "), try reconnect"; + // Sleep three seconds to avoid frequent try Meta Sync + // when the connection fails + sleep(3); + g_pika_server->ResetMetaSyncStatus(); + return Status::Corruption("Connect master error"); + } + + InnerMessage::InnerRequest request; + request.set_type(InnerMessage::kMetaSync); + InnerMessage::InnerRequest::MetaSync* meta_sync = request.mutable_meta_sync(); + InnerMessage::Node* node = meta_sync->mutable_node(); + node->set_ip(local_ip); + node->set_port(g_pika_server->port()); + + std::string masterauth = g_pika_conf->masterauth(); + if (!masterauth.empty()) { + meta_sync->set_auth(masterauth); + } + + std::string to_send; + std::string master_ip = g_pika_server->master_ip(); + int master_port = g_pika_server->master_port(); + if (!request.SerializeToString(&to_send)) { + LOG(WARNING) << "Serialize Meta Sync Request Failed, to Master (" << master_ip << ":" << master_port << ")"; + return Status::Corruption("Serialize Failed"); + } + + LOG(INFO) << "Try Send Meta Sync Request to Master (" << master_ip << ":" << master_port << ")"; + return client_thread_->Write(master_ip, master_port + kPortShiftReplServer, to_send); +} + +Status PikaReplClient::SendDBSync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip) { + InnerMessage::InnerRequest request; + request.set_type(InnerMessage::kDBSync); + InnerMessage::InnerRequest::DBSync* db_sync = request.mutable_db_sync(); + InnerMessage::Node* node = db_sync->mutable_node(); + node->set_ip(local_ip); + node->set_port(g_pika_server->port()); + InnerMessage::Slot* db = db_sync->mutable_slot(); + db->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); + + InnerMessage::BinlogOffset* binlog_offset = db_sync->mutable_binlog_offset(); + binlog_offset->set_filenum(boffset.filenum); + binlog_offset->set_offset(boffset.offset); + + std::string to_send; + if (!request.SerializeToString(&to_send)) { + LOG(WARNING) << "Serialize DB DBSync Request Failed, to Master (" << ip << ":" << port << ")"; + return Status::Corruption("Serialize Failed"); + } + return client_thread_->Write(ip, static_cast(port) + kPortShiftReplServer, to_send); +} + +Status PikaReplClient::SendTrySync(const std::string& ip, uint32_t port, const std::string& db_name, + const BinlogOffset& boffset, const std::string& local_ip) { + InnerMessage::InnerRequest request; + request.set_type(InnerMessage::kTrySync); + InnerMessage::InnerRequest::TrySync* try_sync = request.mutable_try_sync(); + InnerMessage::Node* node = try_sync->mutable_node(); + node->set_ip(local_ip); + node->set_port(g_pika_server->port()); + InnerMessage::Slot* db = try_sync->mutable_slot(); + db->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); + + InnerMessage::BinlogOffset* binlog_offset = try_sync->mutable_binlog_offset(); + binlog_offset->set_filenum(boffset.filenum); + binlog_offset->set_offset(boffset.offset); + + std::string to_send; + if (!request.SerializeToString(&to_send)) { + LOG(WARNING) << "Serialize DB TrySync Request Failed, to Master (" << ip << ":" << port << ")"; + return Status::Corruption("Serialize Failed"); + } + return client_thread_->Write(ip, static_cast(port + kPortShiftReplServer), to_send); +} + +Status PikaReplClient::SendBinlogSync(const std::string& ip, uint32_t port, const std::string& db_name, + const LogOffset& ack_start, const LogOffset& ack_end, + const std::string& local_ip, bool is_first_send) { + InnerMessage::InnerRequest request; + request.set_type(InnerMessage::kBinlogSync); + InnerMessage::InnerRequest::BinlogSync* binlog_sync = request.mutable_binlog_sync(); + InnerMessage::Node* node = binlog_sync->mutable_node(); + node->set_ip(local_ip); + node->set_port(g_pika_server->port()); + binlog_sync->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + binlog_sync->set_slot_id(0); + binlog_sync->set_first_send(is_first_send); + + InnerMessage::BinlogOffset* ack_range_start = binlog_sync->mutable_ack_range_start(); + ack_range_start->set_filenum(ack_start.b_offset.filenum); + ack_range_start->set_offset(ack_start.b_offset.offset); + ack_range_start->set_term(ack_start.l_offset.term); + ack_range_start->set_index(ack_start.l_offset.index); + + InnerMessage::BinlogOffset* ack_range_end = binlog_sync->mutable_ack_range_end(); + ack_range_end->set_filenum(ack_end.b_offset.filenum); + ack_range_end->set_offset(ack_end.b_offset.offset); + ack_range_end->set_term(ack_end.l_offset.term); + ack_range_end->set_index(ack_end.l_offset.index); + + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << " not exist"; + return Status::NotFound("SyncSlaveDB NotFound"); + } + int32_t session_id = slave_db->MasterSessionId(); + binlog_sync->set_session_id(session_id); + + std::string to_send; + if (!request.SerializeToString(&to_send)) { + LOG(WARNING) << "Serialize DB BinlogSync Request Failed, to Master (" << ip << ":" << port << ")"; + return Status::Corruption("Serialize Failed"); + } + return client_thread_->Write(ip, static_cast(port + kPortShiftReplServer), to_send); +} + +Status PikaReplClient::SendRemoveSlaveNode(const std::string& ip, uint32_t port, const std::string& db_name, + const std::string& local_ip) { + InnerMessage::InnerRequest request; + request.set_type(InnerMessage::kRemoveSlaveNode); + InnerMessage::InnerRequest::RemoveSlaveNode* remove_slave_node = request.add_remove_slave_node(); + InnerMessage::Node* node = remove_slave_node->mutable_node(); + node->set_ip(local_ip); + node->set_port(g_pika_server->port()); + + InnerMessage::Slot* db = remove_slave_node->mutable_slot(); + db->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); + + std::string to_send; + if (!request.SerializeToString(&to_send)) { + LOG(WARNING) << "Serialize Remove Slave Node Failed, to Master (" << ip << ":" << port << "), " << db_name; + return Status::Corruption("Serialize Failed"); + } + return client_thread_->Write(ip, static_cast(port + kPortShiftReplServer), to_send); +} diff --git a/tools/pika_migrate/src/pika_repl_client_conn.cc b/tools/pika_migrate/src/pika_repl_client_conn.cc new file mode 100644 index 0000000000..8fb30d9306 --- /dev/null +++ b/tools/pika_migrate/src/pika_repl_client_conn.cc @@ -0,0 +1,282 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_repl_client_conn.h" + +#include +#include +#include + +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "pstd/include/pstd_string.h" +#include "pika_inner_message.pb.h" + +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +PikaReplClientConn::PikaReplClientConn(int fd, const std::string& ip_port, net::Thread* thread, + void* worker_specific_data, net::NetMultiplexer* mpx) + : net::PbConn(fd, ip_port, thread, mpx) {} + +bool PikaReplClientConn::IsDBStructConsistent(const std::vector& current_dbs, + const std::vector& expect_dbs) { + if (current_dbs.size() != expect_dbs.size()) { + return false; + } + for (const auto& db_struct : current_dbs) { + if (find(expect_dbs.begin(), expect_dbs.end(), db_struct) == expect_dbs.end()) { + LOG(WARNING) << "DB struct mismatch"; + return false; + } + } + return true; +} + +int PikaReplClientConn::DealMessage() { + std::shared_ptr response = std::make_shared(); + ::google::protobuf::io::ArrayInputStream input(rbuf_ + cur_pos_ - header_len_, static_cast(header_len_)); + ::google::protobuf::io::CodedInputStream decoder(&input); + decoder.SetTotalBytesLimit(g_pika_conf->max_conn_rbuf_size()); + bool success = response->ParseFromCodedStream(&decoder) && decoder.ConsumedEntireMessage(); + if (!success) { + LOG(WARNING) << "ParseFromArray FAILED! " + << " msg_len: " << header_len_; + g_pika_server->SyncError(); + return -1; + } + switch (response->type()) { + case InnerMessage::kMetaSync: { + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleMetaSyncResponse, static_cast(task_arg)); + break; + } + case InnerMessage::kDBSync: { + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleDBSyncResponse, static_cast(task_arg)); + break; + } + case InnerMessage::kTrySync: { + const std::string& db_name = response->try_sync().slot().db_name(); + //TrySync resp must contain db_name + assert(!db_name.empty()); + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplClientBGTaskByDBName(&PikaReplClientConn::HandleTrySyncResponse, static_cast(task_arg), db_name); + break; + } + case InnerMessage::kBinlogSync: { + DispatchBinlogRes(response); + break; + } + case InnerMessage::kRemoveSlaveNode: { + auto task_arg = + new ReplClientTaskArg(response, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplClientBGTask(&PikaReplClientConn::HandleRemoveSlaveNodeResponse, + static_cast(task_arg)); + break; + } + default: + break; + } + return 0; +} + +void PikaReplClientConn::HandleMetaSyncResponse(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; + std::shared_ptr response = task_arg->res; + + if (response->code() == InnerMessage::kOther) { + std::string reply = response->has_reply() ? response->reply() : ""; + // keep sending MetaSync + LOG(WARNING) << "Meta Sync Failed: " << reply << " will keep sending MetaSync msg"; + return; + } + + if (response->code() != InnerMessage::kOk) { + std::string reply = response->has_reply() ? response->reply() : ""; + LOG(WARNING) << "Meta Sync Failed: " << reply; + g_pika_server->SyncError(); + conn->NotifyClose(); + return; + } + + const InnerMessage::InnerResponse_MetaSync meta_sync = response->meta_sync(); + + std::vector master_db_structs; + for (int idx = 0; idx < meta_sync.dbs_info_size(); ++idx) { + const InnerMessage::InnerResponse_MetaSync_DBInfo& db_info = meta_sync.dbs_info(idx); + master_db_structs.push_back({db_info.db_name(), db_info.db_instance_num()}); + } + + std::vector self_db_structs = g_pika_conf->db_structs(); + if (!PikaReplClientConn::IsDBStructConsistent(self_db_structs, master_db_structs)) { + LOG(WARNING) << "Self db structs(number of databases: " << self_db_structs.size() + << ") inconsistent with master(number of databases: " << master_db_structs.size() + << "), failed to establish master-slave relationship"; + g_pika_server->SyncError(); + conn->NotifyClose(); + return; + } + + // The relicationid obtained from the server is null + if (meta_sync.replication_id() == "") { + LOG(WARNING) << "Meta Sync Failed: the relicationid obtained from the server is null, keep sending MetaSync msg"; + return; + } + + // The Replicationids of both the primary and secondary Replicationid are not empty and are not equal + if (g_pika_conf->replication_id() != meta_sync.replication_id() && g_pika_conf->replication_id() != "") { + LOG(WARNING) << "Meta Sync Failed: replicationid on both sides of the connection are inconsistent"; + g_pika_server->SyncError(); + conn->NotifyClose(); + return; + } + + // First synchronization between the master and slave + if (g_pika_conf->replication_id() != meta_sync.replication_id()) { + LOG(INFO) << "New node is added to the cluster and requires full replication, remote replication id: " << meta_sync.replication_id() + << ", local replication id: " << g_pika_conf->replication_id(); + g_pika_server->force_full_sync_ = true; + g_pika_conf->SetReplicationID(meta_sync.replication_id()); + g_pika_conf->ConfigRewriteReplicationID(); + } + + g_pika_conf->SetWriteBinlog("yes"); + g_pika_server->PrepareDBTrySync(); + g_pika_server->FinishMetaSync(); + LOG(INFO) << "Finish to handle meta sync response"; +} + +void PikaReplClientConn::HandleDBSyncResponse(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; + std::shared_ptr response = task_arg->res; + + const InnerMessage::InnerResponse_DBSync db_sync_response = response->db_sync(); + int32_t session_id = db_sync_response.session_id(); + const InnerMessage::Slot& db_response = db_sync_response.slot(); + const std::string& db_name = db_response.db_name(); + + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << " Not Found"; + return; + } + + if (response->code() != InnerMessage::kOk) { + slave_db->SetReplState(ReplState::kError); + std::string reply = response->has_reply() ? response->reply() : ""; + LOG(WARNING) << "DBSync Failed: " << reply; + return; + } + + slave_db->SetMasterSessionId(session_id); + + slave_db->StopRsync(); + slave_db->SetReplState(ReplState::kWaitDBSync); + LOG(INFO) << "DB: " << db_name << " Need Wait To Sync"; + + //now full sync is starting, add an unfinished full sync count + g_pika_conf->AddInternalUsedUnfinishedFullSync(slave_db->DBName()); +} + +void PikaReplClientConn::HandleTrySyncResponse(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; + std::shared_ptr response = task_arg->res; + + if (response->code() != InnerMessage::kOk) { + std::string reply = response->has_reply() ? response->reply() : ""; + LOG(WARNING) << "TrySync Failed: " << reply; + return; + } + const InnerMessage::InnerResponse_TrySync& try_sync_response = response->try_sync(); + const InnerMessage::Slot& db_response = try_sync_response.slot(); + std::string db_name = db_response.db_name(); + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { + LOG(WARNING) << "DB: " << db_name << " Not Found"; + return; + } + + std::shared_ptr slave_db = + g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "DB: " << db_name << "Not Found"; + return; + } + + LogicOffset logic_last_offset; + if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kOk) { + BinlogOffset boffset; + int32_t session_id = try_sync_response.session_id(); + db->Logger()->GetProducerStatus(&boffset.filenum, &boffset.offset); + slave_db->SetMasterSessionId(session_id); + LogOffset offset(boffset, logic_last_offset); + g_pika_rm->SendBinlogSyncAckRequest(db_name, offset, offset, true); + slave_db->SetReplState(ReplState::kConnected); + // after connected, update receive time first to avoid connection timeout + slave_db->SetLastRecvTime(pstd::NowMicros()); + + LOG(INFO) << "DB: " << db_name << " TrySync Ok"; + } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kSyncPointBePurged) { + slave_db->SetReplState(ReplState::kTryDBSync); + LOG(INFO) << "DB: " << db_name << " Need To Try DBSync"; + } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kSyncPointLarger) { + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "DB: " << db_name << " TrySync Error, Because the invalid filenum and offset"; + } else if (try_sync_response.reply_code() == InnerMessage::InnerResponse::TrySync::kError) { + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "DB: " << db_name << " TrySync Error"; + } +} + +void PikaReplClientConn::DispatchBinlogRes(const std::shared_ptr& res) { + // db to a bunch of binlog chips + std::unordered_map*, hash_db_info> par_binlog; + for (int i = 0; i < res->binlog_sync_size(); ++i) { + const InnerMessage::InnerResponse::BinlogSync& binlog_res = res->binlog_sync(i); + // hash key: db + DBInfo p_info(binlog_res.slot().db_name()); + if (par_binlog.find(p_info) == par_binlog.end()) { + par_binlog[p_info] = new std::vector(); + } + par_binlog[p_info]->push_back(i); + } + + std::shared_ptr slave_db; + for (auto& binlog_nums : par_binlog) { + RmNode node(binlog_nums.first.db_name_); + slave_db = g_pika_rm->GetSyncSlaveDBByName( + DBInfo(binlog_nums.first.db_name_)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << binlog_nums.first.db_name_ << " not exist"; + break; + } + slave_db->SetLastRecvTime(pstd::NowMicros()); + g_pika_rm->ScheduleWriteBinlogTask(binlog_nums.first.db_name_, res, + std::dynamic_pointer_cast(shared_from_this()), + reinterpret_cast(binlog_nums.second)); + } +} + +void PikaReplClientConn::HandleRemoveSlaveNodeResponse(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + std::shared_ptr conn = task_arg->conn; + std::shared_ptr response = task_arg->res; + if (response->code() != InnerMessage::kOk) { + std::string reply = response->has_reply() ? response->reply() : ""; + LOG(WARNING) << "Remove slave node Failed: " << reply; + return; + } +} diff --git a/tools/pika_migrate/src/pika_repl_client_thread.cc b/tools/pika_migrate/src/pika_repl_client_thread.cc new file mode 100644 index 0000000000..2a7c666d81 --- /dev/null +++ b/tools/pika_migrate/src/pika_repl_client_thread.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_repl_client_thread.h" + +#include "include/pika_rm.h" +#include "include/pika_server.h" + +#include "pstd/include/pstd_string.h" + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +PikaReplClientThread::PikaReplClientThread(int cron_interval, int keepalive_timeout) + : ClientThread(&conn_factory_, cron_interval, keepalive_timeout, &handle_, nullptr) {} + +void PikaReplClientThread::ReplClientHandle::FdClosedHandle(int fd, const std::string& ip_port) const { + LOG(INFO) << "ReplClient Close conn, fd=" << fd << ", ip_port=" << ip_port; + std::string ip; + int port = 0; + if (!pstd::ParseIpPortString(ip_port, ip, port)) { + LOG(WARNING) << "Parse ip_port error " << ip_port; + return; + } + if (ip == g_pika_server->master_ip() && port == g_pika_server->master_port() + kPortShiftReplServer && + PIKA_REPL_ERROR != g_pika_server->repl_state()) { // if state machine in error state, no retry + LOG(WARNING) << "Master conn disconnect : " << ip_port << " try reconnect"; + g_pika_server->ResetMetaSyncStatus(); + } + g_pika_server->UpdateMetaSyncTimestamp(); +}; + +void PikaReplClientThread::ReplClientHandle::FdTimeoutHandle(int fd, const std::string& ip_port) const { + LOG(INFO) << "ReplClient Timeout conn, fd=" << fd << ", ip_port=" << ip_port; + std::string ip; + int port = 0; + if (!pstd::ParseIpPortString(ip_port, ip, port)) { + LOG(WARNING) << "Parse ip_port error " << ip_port; + return; + } + if (ip == g_pika_server->master_ip() && port == g_pika_server->master_port() + kPortShiftReplServer && + PIKA_REPL_ERROR != g_pika_server->repl_state() && + PikaReplicaManager::CheckSlaveDBState(ip, port)) { + // if state machine equal to kDBNoConnect(execute cmd 'dbslaveof db no one'), no retry + LOG(WARNING) << "Master conn timeout : " << ip_port << " try reconnect"; + g_pika_server->ResetMetaSyncStatus(); + } + g_pika_server->UpdateMetaSyncTimestamp(); +}; diff --git a/tools/pika_migrate/src/pika_repl_server.cc b/tools/pika_migrate/src/pika_repl_server.cc new file mode 100644 index 0000000000..b92d239b18 --- /dev/null +++ b/tools/pika_migrate/src/pika_repl_server.cc @@ -0,0 +1,149 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_repl_server.h" + +#include + +#include "include/pika_conf.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" + +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +PikaReplServer::PikaReplServer(const std::set& ips, int port, int cron_interval) { + server_tp_ = std::make_unique(PIKA_REPL_SERVER_TP_SIZE, 100000, "PikaReplServer"); + pika_repl_server_thread_ = std::make_unique(ips, port, cron_interval); + pika_repl_server_thread_->set_thread_name("PikaReplServer"); +} + +PikaReplServer::~PikaReplServer() { + LOG(INFO) << "PikaReplServer exit!!!"; +} + +int PikaReplServer::Start() { + pika_repl_server_thread_->set_thread_name("PikaReplServer"); + int res = pika_repl_server_thread_->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start Pika Repl Server Thread Error: " << res + << (res == net::kBindError + ? ": bind port " + std::to_string(pika_repl_server_thread_->ListenPort()) + " conflict" + : ": create thread error ") + << ", Listen on this port to handle the request sent by the Slave"; + } + res = server_tp_->start_thread_pool(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start ThreadPool Error: " << res + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + return res; +} + +int PikaReplServer::Stop() { + server_tp_->stop_thread_pool(); + pika_repl_server_thread_->StopThread(); + pika_repl_server_thread_->Cleanup(); + return 0; +} + +pstd::Status PikaReplServer::SendSlaveBinlogChips(const std::string& ip, int port, + const std::vector& tasks) { + InnerMessage::InnerResponse response; + BuildBinlogSyncResp(tasks, &response); + + std::string binlog_chip_pb; + if (!response.SerializeToString(&binlog_chip_pb)) { + return Status::Corruption("Serialized Failed"); + } + + if (binlog_chip_pb.size() > static_cast(g_pika_conf->max_conn_rbuf_size())) { + for (const auto& task : tasks) { + InnerMessage::InnerResponse response; + std::vector tmp_tasks; + tmp_tasks.push_back(task); + BuildBinlogSyncResp(tmp_tasks, &response); + if (!response.SerializeToString(&binlog_chip_pb)) { + return Status::Corruption("Serialized Failed"); + } + pstd::Status s = Write(ip, port, binlog_chip_pb); + if (!s.ok()) { + return s; + } + } + return pstd::Status::OK(); + } + return Write(ip, port, binlog_chip_pb); +} + +void PikaReplServer::BuildBinlogOffset(const LogOffset& offset, InnerMessage::BinlogOffset* boffset) { + boffset->set_filenum(offset.b_offset.filenum); + boffset->set_offset(offset.b_offset.offset); + boffset->set_term(offset.l_offset.term); + boffset->set_index(offset.l_offset.index); +} + +void PikaReplServer::BuildBinlogSyncResp(const std::vector& tasks, InnerMessage::InnerResponse* response) { + response->set_code(InnerMessage::kOk); + response->set_type(InnerMessage::Type::kBinlogSync); + for (const auto& task : tasks) { + InnerMessage::InnerResponse::BinlogSync* binlog_sync = response->add_binlog_sync(); + binlog_sync->set_session_id(task.rm_node_.SessionId()); + InnerMessage::Slot* db = binlog_sync->mutable_slot(); + db->set_db_name(task.rm_node_.DBName()); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db->set_slot_id(0); + InnerMessage::BinlogOffset* boffset = binlog_sync->mutable_binlog_offset(); + BuildBinlogOffset(task.binlog_chip_.offset_, boffset); + binlog_sync->set_binlog(task.binlog_chip_.binlog_); + } +} + +pstd::Status PikaReplServer::Write(const std::string& ip, const int port, const std::string& msg) { + std::shared_lock l(client_conn_rwlock_); + const std::string ip_port = pstd::IpPortString(ip, port); + if (client_conn_map_.find(ip_port) == client_conn_map_.end()) { + return Status::NotFound("The " + ip_port + " fd cannot be found"); + } + int fd = client_conn_map_[ip_port]; + std::shared_ptr conn = std::dynamic_pointer_cast(pika_repl_server_thread_->get_conn(fd)); + if (!conn) { + return Status::NotFound("The" + ip_port + " conn cannot be found"); + } + + if (conn->WriteResp(msg)) { + conn->NotifyClose(); + return Status::Corruption("The" + ip_port + " conn, Write Resp Failed"); + } + conn->NotifyWrite(); + return Status::OK(); +} + +void PikaReplServer::Schedule(net::TaskFunc func, void* arg) { server_tp_->Schedule(func, arg); } + +void PikaReplServer::UpdateClientConnMap(const std::string& ip_port, int fd) { + std::lock_guard l(client_conn_rwlock_); + client_conn_map_[ip_port] = fd; +} + +void PikaReplServer::RemoveClientConn(int fd) { + std::lock_guard l(client_conn_rwlock_); + auto iter = client_conn_map_.begin(); + while (iter != client_conn_map_.end()) { + if (iter->second == fd) { + iter = client_conn_map_.erase(iter); + break; + } + iter++; + } +} + +void PikaReplServer::KillAllConns() { return pika_repl_server_thread_->KillAllConns(); } diff --git a/tools/pika_migrate/src/pika_repl_server_conn.cc b/tools/pika_migrate/src/pika_repl_server_conn.cc new file mode 100644 index 0000000000..41cec0e02f --- /dev/null +++ b/tools/pika_migrate/src/pika_repl_server_conn.cc @@ -0,0 +1,464 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_repl_server_conn.h" + +#include + +#include "include/pika_rm.h" +#include "include/pika_server.h" + +using pstd::Status; +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +PikaReplServerConn::PikaReplServerConn(int fd, const std::string& ip_port, net::Thread* thread, void* worker_specific_data, + net::NetMultiplexer* mpx) + : PbConn(fd, ip_port, thread, mpx) {} + +PikaReplServerConn::~PikaReplServerConn() = default; + +void PikaReplServerConn::HandleMetaSyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + + InnerMessage::InnerRequest::MetaSync meta_sync_request = req->meta_sync(); + const InnerMessage::Node& node = meta_sync_request.node(); + std::string masterauth = meta_sync_request.has_auth() ? meta_sync_request.auth() : ""; + + InnerMessage::InnerResponse response; + response.set_type(InnerMessage::kMetaSync); + if (!g_pika_conf->requirepass().empty() && g_pika_conf->requirepass() != masterauth) { + response.set_code(InnerMessage::kError); + response.set_reply("Auth with master error, Invalid masterauth"); + } else { + LOG(INFO) << "Receive MetaSync, Slave ip: " << node.ip() << ", Slave port:" << node.port(); + std::vector db_structs = g_pika_conf->db_structs(); + bool success = g_pika_server->TryAddSlave(node.ip(), node.port(), conn->fd(), db_structs); + const std::string ip_port = pstd::IpPortString(node.ip(), node.port()); + g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); + if (!success) { + response.set_code(InnerMessage::kOther); + response.set_reply("Slave AlreadyExist"); + } else { + g_pika_server->BecomeMaster(); + response.set_code(InnerMessage::kOk); + InnerMessage::InnerResponse_MetaSync* meta_sync = response.mutable_meta_sync(); + if (g_pika_conf->replication_id() == "") { + std::string replication_id = pstd::getRandomHexChars(configReplicationIDSize); + g_pika_conf->SetReplicationID(replication_id); + g_pika_conf->ConfigRewriteReplicationID(); + } + meta_sync->set_classic_mode(g_pika_conf->classic_mode()); + meta_sync->set_run_id(g_pika_conf->run_id()); + meta_sync->set_replication_id(g_pika_conf->replication_id()); + for (const auto& db_struct : db_structs) { + InnerMessage::InnerResponse_MetaSync_DBInfo* db_info = meta_sync->add_dbs_info(); + db_info->set_db_name(db_struct.db_name); + /* + * Since the slot field is written in protobuffer, + * slot_num is set to the default value 1 for compatibility + * with older versions, but slot_num is not used + */ + db_info->set_slot_num(1); + db_info->set_db_instance_num(db_struct.db_instance_num); + } + } + } + + std::string reply_str; + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { + LOG(WARNING) << "Process MetaSync request serialization failed"; + conn->NotifyClose(); + return; + } + conn->NotifyWrite(); +} + +void PikaReplServerConn::HandleTrySyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + + InnerMessage::InnerRequest::TrySync try_sync_request = req->try_sync(); + const InnerMessage::Slot& db_request = try_sync_request.slot(); + const InnerMessage::BinlogOffset& slave_boffset = try_sync_request.binlog_offset(); + const InnerMessage::Node& node = try_sync_request.node(); + std::string db_name = db_request.db_name(); + + InnerMessage::InnerResponse response; + InnerMessage::InnerResponse::TrySync* try_sync_response = response.mutable_try_sync(); + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + InnerMessage::Slot* db_response = try_sync_response->mutable_slot(); + db_response->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db_response->set_slot_id(0); + + bool pre_success = true; + response.set_type(InnerMessage::Type::kTrySync); + std::shared_ptr db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { + response.set_code(InnerMessage::kError); + response.set_reply("DB not found"); + LOG(WARNING) << "DB Name: " << db_name << "Not Found, TrySync Error"; + pre_success = false; + } else { + LOG(INFO) << "Receive Trysync, Slave ip: " << node.ip() << ", Slave port:" << node.port() + << ", DB: " << db_name << ", filenum: " << slave_boffset.filenum() + << ", pro_offset: " << slave_boffset.offset(); + response.set_code(InnerMessage::kOk); + } + + if (pre_success && TrySyncOffsetCheck(db, try_sync_request, try_sync_response)) { + TrySyncUpdateSlaveNode(db, try_sync_request, conn, try_sync_response); + } + + std::string reply_str; + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { + LOG(WARNING) << "Handle Try Sync Failed"; + conn->NotifyClose(); + return; + } + conn->NotifyWrite(); +} + +bool PikaReplServerConn::TrySyncUpdateSlaveNode(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + const std::shared_ptr& conn, + InnerMessage::InnerResponse::TrySync* try_sync_response) { + const InnerMessage::Node& node = try_sync_request.node(); + if (!db->CheckSlaveNodeExist(node.ip(), node.port())) { + int32_t session_id = db->GenSessionId(); + if (session_id == -1) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "DB: " << db->DBName() << ", Gen Session id Failed"; + return false; + } + try_sync_response->set_session_id(session_id); + // incremental sync + Status s = db->AddSlaveNode(node.ip(), node.port(), session_id); + if (!s.ok()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "DB: " << db->DBName() << " TrySync Failed, " << s.ToString(); + return false; + } + const std::string ip_port = pstd::IpPortString(node.ip(), node.port()); + g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); + LOG(INFO) << "DB: " << db->DBName() << " TrySync Success, Session: " << session_id; + } else { + int32_t session_id; + Status s = db->GetSlaveNodeSession(node.ip(), node.port(), &session_id); + if (!s.ok()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "DB: " << db->DBName() << " Get Session id Failed" << s.ToString(); + return false; + } + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kOk); + try_sync_response->set_session_id(session_id); + LOG(INFO) << "DB: " << db->DBName() << " TrySync Success, Session: " << session_id; + } + return true; +} + +bool PikaReplServerConn::TrySyncOffsetCheck(const std::shared_ptr& db, + const InnerMessage::InnerRequest::TrySync& try_sync_request, + InnerMessage::InnerResponse::TrySync* try_sync_response) { + const InnerMessage::Node& node = try_sync_request.node(); + const InnerMessage::BinlogOffset& slave_boffset = try_sync_request.binlog_offset(); + std::string db_name = db->DBName(); + BinlogOffset boffset; + Status s = db->Logger()->GetProducerStatus(&(boffset.filenum), &(boffset.offset)); + if (!s.ok()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "Handle TrySync, DB: " << db_name << " Get binlog offset error, TrySync failed"; + return false; + } + InnerMessage::BinlogOffset* master_db_boffset = try_sync_response->mutable_binlog_offset(); + master_db_boffset->set_filenum(boffset.filenum); + master_db_boffset->set_offset(boffset.offset); + + if (boffset.filenum < slave_boffset.filenum() || + (boffset.filenum == slave_boffset.filenum() && boffset.offset < slave_boffset.offset())) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointLarger); + LOG(WARNING) << "Slave offset is larger than mine, Slave ip: " << node.ip() << ", Slave port: " << node.port() + << ", DB: " << db_name << ", slave filenum: " << slave_boffset.filenum() + << ", slave pro_offset_: " << slave_boffset.offset() << ", local filenum: " << boffset.filenum << ", local pro_offset_: " << boffset.offset; + return false; + } + + std::string confile = NewFileName(db->Logger()->filename(), slave_boffset.filenum()); + if (!pstd::FileExists(confile)) { + LOG(INFO) << "DB: " << db_name << " binlog has been purged, may need full sync"; + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kSyncPointBePurged); + return false; + } + + PikaBinlogReader reader; + reader.Seek(db->Logger(), slave_boffset.filenum(), slave_boffset.offset()); + BinlogOffset seeked_offset; + reader.GetReaderStatus(&(seeked_offset.filenum), &(seeked_offset.offset)); + if (seeked_offset.filenum != slave_boffset.filenum() || seeked_offset.offset != slave_boffset.offset()) { + try_sync_response->set_reply_code(InnerMessage::InnerResponse::TrySync::kError); + LOG(WARNING) << "Slave offset is not a start point of cur log, Slave ip: " << node.ip() + << ", Slave port: " << node.port() << ", DB: " << db_name << " closest start point, filenum: " + << seeked_offset.filenum << ", offset: " << seeked_offset.offset; + return false; + } + return true; +} + +void PikaReplServerConn::HandleDBSyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + + InnerMessage::InnerRequest::DBSync db_sync_request = req->db_sync(); + const InnerMessage::Slot& db_request = db_sync_request.slot(); + const InnerMessage::Node& node = db_sync_request.node(); + const InnerMessage::BinlogOffset& slave_boffset = db_sync_request.binlog_offset(); + std::string db_name = db_request.db_name(); + + InnerMessage::InnerResponse response; + response.set_code(InnerMessage::kOk); + response.set_type(InnerMessage::Type::kDBSync); + InnerMessage::InnerResponse::DBSync* db_sync_response = response.mutable_db_sync(); + InnerMessage::Slot* db_response = db_sync_response->mutable_slot(); + db_response->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db_response->set_slot_id(0); + + LOG(INFO) << "Handle DBSync Request"; + bool prior_success = true; + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << ", NotFound"; + prior_success = false; + response.set_code(InnerMessage::kError); + } + if (prior_success) { + if (!master_db->CheckSlaveNodeExist(node.ip(), node.port())) { + int32_t session_id = master_db->GenSessionId(); + db_sync_response->set_session_id(session_id); + if (session_id == -1) { + response.set_code(InnerMessage::kError); + LOG(WARNING) << "DB: " << db_name << ", Gen Session id Failed"; + } else { + Status s = master_db->AddSlaveNode(node.ip(), node.port(), session_id); + if (s.ok()) { + const std::string ip_port = pstd::IpPortString(node.ip(), node.port()); + g_pika_rm->ReplServerUpdateClientConnMap(ip_port, conn->fd()); + LOG(INFO) << "DB: " << db_name << " Handle DBSync Request Success, Session: " << session_id; + } else { + response.set_code(InnerMessage::kError); + LOG(WARNING) << "DB: " << db_name << " Handle DBSync Request Failed, " << s.ToString(); + } + } + } else { + int32_t session_id = 0; + Status s = master_db->GetSlaveNodeSession(node.ip(), node.port(), &session_id); + if (!s.ok()) { + response.set_code(InnerMessage::kError); + db_sync_response->set_session_id(-1); + LOG(WARNING) << "DB: " << db_name << ", Get Session id Failed" << s.ToString(); + } else { + db_sync_response->set_session_id(session_id); + LOG(INFO) << "DB: " << db_name << " Handle DBSync Request Success, Session: " << session_id; + } + } + } + + // Change slave node's state to kSlaveDbSync so that the binlog will perserved. + // See details in SyncMasterSlot::BinlogCloudPurge. + master_db->ActivateSlaveDbSync(node.ip(), node.port()); + + g_pika_server->TryDBSync(node.ip(), node.port() + kPortShiftRSync, db_name, + static_cast(slave_boffset.filenum())); + + std::string reply_str; + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { + LOG(WARNING) << "Handle DBSync Failed"; + conn->NotifyClose(); + return; + } + conn->NotifyWrite(); +} + +void PikaReplServerConn::HandleBinlogSyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + if (!req->has_binlog_sync()) { + LOG(WARNING) << "Pb parse error"; + return; + } + const InnerMessage::InnerRequest::BinlogSync& binlog_req = req->binlog_sync(); + const InnerMessage::Node& node = binlog_req.node(); + const std::string& db_name = binlog_req.db_name(); + + bool is_first_send = binlog_req.first_send(); + int32_t session_id = binlog_req.session_id(); + const InnerMessage::BinlogOffset& ack_range_start = binlog_req.ack_range_start(); + const InnerMessage::BinlogOffset& ack_range_end = binlog_req.ack_range_end(); + BinlogOffset b_range_start(ack_range_start.filenum(), ack_range_start.offset()); + BinlogOffset b_range_end(ack_range_end.filenum(), ack_range_end.offset()); + LogicOffset l_range_start(ack_range_start.term(), ack_range_start.index()); + LogicOffset l_range_end(ack_range_end.term(), ack_range_end.index()); + LogOffset range_start(b_range_start, l_range_start); + LogOffset range_end(b_range_end, l_range_end); + + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << ", NotFound"; + return; + } + + if (!master_db->CheckSessionId(node.ip(), node.port(), db_name, session_id)) { + LOG(WARNING) << "Check Session failed " << node.ip() << ":" << node.port() << ", " << db_name; + return; + } + + // Set ack info from slave + RmNode slave_node = RmNode(node.ip(), node.port(), db_name); + + Status s = master_db->SetLastRecvTime(node.ip(), node.port(), pstd::NowMicros()); + if (!s.ok()) { + LOG(WARNING) << "SetMasterLastRecvTime failed " << node.ip() << ":" << node.port() << ", " << db_name << " " << s.ToString(); + conn->NotifyClose(); + return; + } + + if (is_first_send) { + if (range_start.b_offset != range_end.b_offset) { + LOG(WARNING) << "first binlogsync request pb argument invalid"; + conn->NotifyClose(); + return; + } + + Status s = master_db->ActivateSlaveBinlogSync(node.ip(), node.port(), range_start); + if (!s.ok()) { + LOG(WARNING) << "Activate Binlog Sync failed " << slave_node.ToString() << " " << s.ToString(); + conn->NotifyClose(); + return; + } + return; + } + + // not the first_send the range_ack cant be 0 + // set this case as ping + if (range_start.b_offset == BinlogOffset() && range_end.b_offset == BinlogOffset()) { + return; + } + s = g_pika_rm->UpdateSyncBinlogStatus(slave_node, range_start, range_end); + if (!s.ok()) { + LOG(WARNING) << "Update binlog ack failed " << db_name << " " << s.ToString(); + conn->NotifyClose(); + return; + } + + g_pika_server->SignalAuxiliary(); +} + +void PikaReplServerConn::HandleRemoveSlaveNodeRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + if (req->remove_slave_node_size() == 0) { + LOG(WARNING) << "Pb parse error"; + conn->NotifyClose(); + return; + } + const InnerMessage::InnerRequest::RemoveSlaveNode& remove_slave_node_req = req->remove_slave_node(0); + const InnerMessage::Node& node = remove_slave_node_req.node(); + const InnerMessage::Slot& slot = remove_slave_node_req.slot(); + + std::string db_name = slot.db_name(); + std::shared_ptr master_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!master_db) { + LOG(WARNING) << "Sync Master DB: " << db_name << ", NotFound"; + } + Status s = master_db->RemoveSlaveNode(node.ip(), node.port()); + + InnerMessage::InnerResponse response; + response.set_code(InnerMessage::kOk); + response.set_type(InnerMessage::Type::kRemoveSlaveNode); + InnerMessage::InnerResponse::RemoveSlaveNode* remove_slave_node_response = response.add_remove_slave_node(); + InnerMessage::Slot* db_response = remove_slave_node_response->mutable_slot (); + db_response->set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + db_response->set_slot_id(0); + InnerMessage::Node* node_response = remove_slave_node_response->mutable_node(); + node_response->set_ip(g_pika_server->host()); + node_response->set_port(g_pika_server->port()); + + std::string reply_str; + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { + LOG(WARNING) << "Remove Slave Node Failed"; + conn->NotifyClose(); + return; + } + conn->NotifyWrite(); +} + +int PikaReplServerConn::DealMessage() { + std::shared_ptr req = std::make_shared(); + bool parse_res = req->ParseFromArray(rbuf_ + cur_pos_ - header_len_, static_cast(header_len_)); + if (!parse_res) { + LOG(WARNING) << "Pika repl server connection pb parse error."; + return -1; + } + switch (req->type()) { + case InnerMessage::kMetaSync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleMetaSyncRequest, task_arg); + break; + } + case InnerMessage::kTrySync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleTrySyncRequest, task_arg); + break; + } + case InnerMessage::kDBSync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleDBSyncRequest, task_arg); + break; + } + case InnerMessage::kBinlogSync: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleBinlogSyncRequest, task_arg); + break; + } + case InnerMessage::kRemoveSlaveNode: { + auto task_arg = + new ReplServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + g_pika_rm->ScheduleReplServerBGTask(&PikaReplServerConn::HandleRemoveSlaveNodeRequest, task_arg); + break; + } + default: + break; + } + return 0; +} diff --git a/tools/pika_migrate/src/pika_repl_server_thread.cc b/tools/pika_migrate/src/pika_repl_server_thread.cc new file mode 100644 index 0000000000..590ba02f7f --- /dev/null +++ b/tools/pika_migrate/src/pika_repl_server_thread.cc @@ -0,0 +1,27 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_repl_server_thread.h" + +#include "include/pika_rm.h" +#include "include/pika_server.h" + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +PikaReplServerThread::PikaReplServerThread(const std::set& ips, int port, int cron_interval) + : HolyThread(ips, port, &conn_factory_, cron_interval, &handle_, true), + conn_factory_(this), + port_(port) { + set_keepalive_timeout(180); +} + +int PikaReplServerThread::ListenPort() { return port_; } + +void PikaReplServerThread::ReplServerHandle::FdClosedHandle(int fd, const std::string& ip_port) const { + LOG(INFO) << "ServerThread Close Slave Conn, fd: " << fd << ", ip_port: " << ip_port; + g_pika_server->DeleteSlave(fd); + g_pika_rm->ReplServerRemoveClientConn(fd); +} diff --git a/tools/pika_migrate/src/pika_rm.cc b/tools/pika_migrate/src/pika_rm.cc new file mode 100644 index 0000000000..9df7b82101 --- /dev/null +++ b/tools/pika_migrate/src/pika_rm.cc @@ -0,0 +1,1056 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_rm.h" + +#include +#include +#include +#include + +#include + +#include "net/include/net_cli.h" + +#include "include/pika_conf.h" +#include "include/pika_server.h" + +#include "include/pika_admin.h" +#include "include/pika_command.h" + +using pstd::Status; + +extern std::unique_ptr g_pika_rm; +extern PikaServer* g_pika_server; + +/* SyncDB */ + +SyncDB::SyncDB(const std::string& db_name) + : db_info_(db_name) {} + +std::string SyncDB::DBName() { + return db_info_.db_name_; +} + +/* SyncMasterDB*/ + +SyncMasterDB::SyncMasterDB(const std::string& db_name) + : SyncDB(db_name), coordinator_(db_name) {} + +int SyncMasterDB::GetNumberOfSlaveNode() { return coordinator_.SyncPros().SlaveSize(); } + +bool SyncMasterDB::CheckSlaveNodeExist(const std::string& ip, int port) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + return static_cast(slave_ptr); +} + +Status SyncMasterDB::GetSlaveNodeSession(const std::string& ip, int port, int32_t* session) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("slave " + ip + ":" + std::to_string(port) + " not found"); + } + + slave_ptr->Lock(); + *session = slave_ptr->SessionId(); + slave_ptr->Unlock(); + + return Status::OK(); +} + +Status SyncMasterDB::AddSlaveNode(const std::string& ip, int port, int session_id) { + Status s = coordinator_.AddSlaveNode(ip, port, session_id); + if (!s.ok()) { + LOG(WARNING) << "Add Slave Node Failed, db: " << SyncDBInfo().ToString() << ", ip_port: " << ip << ":" + << port; + return s; + } + LOG(INFO) << "Add Slave Node, db: " << SyncDBInfo().ToString() << ", ip_port: " << ip << ":" << port; + return Status::OK(); +} + +Status SyncMasterDB::RemoveSlaveNode(const std::string& ip, int port) { + Status s = coordinator_.RemoveSlaveNode(ip, port); + if (!s.ok()) { + LOG(WARNING) << "Remove Slave Node Failed, db: " << SyncDBInfo().ToString() << ", ip_port: " << ip + << ":" << port; + return s; + } + LOG(INFO) << "Remove Slave Node, DB: " << SyncDBInfo().ToString() << ", ip_port: " << ip << ":" << port; + return Status::OK(); +} + +Status SyncMasterDB::ActivateSlaveBinlogSync(const std::string& ip, int port, const LogOffset& offset) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + + { + std::lock_guard l(slave_ptr->slave_mu); + slave_ptr->slave_state = kSlaveBinlogSync; + slave_ptr->sent_offset = offset; + slave_ptr->acked_offset = offset; + // read binlog file from file + Status s = slave_ptr->InitBinlogFileReader(Logger(), offset.b_offset); + if (!s.ok()) { + return Status::Corruption("Init binlog file reader failed" + s.ToString()); + } + //Since we init a new reader, we should drop items in write queue and reset sync_window. + //Or the sent_offset and acked_offset will not match + g_pika_rm->DropItemInOneWriteQueue(ip, port, slave_ptr->DBName()); + slave_ptr->sync_win.Reset(); + slave_ptr->b_state = kReadFromFile; + } + + Status s = SyncBinlogToWq(ip, port); + if (!s.ok()) { + return s; + } + return Status::OK(); +} + +Status SyncMasterDB::SyncBinlogToWq(const std::string& ip, int port) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + Status s; + slave_ptr->Lock(); + s = ReadBinlogFileToWq(slave_ptr); + slave_ptr->Unlock(); + if (!s.ok()) { + return s; + } + return Status::OK(); +} + +Status SyncMasterDB::ActivateSlaveDbSync(const std::string& ip, int port) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + + slave_ptr->Lock(); + slave_ptr->slave_state = kSlaveDbSync; + // invoke db sync + slave_ptr->Unlock(); + + return Status::OK(); +} + +Status SyncMasterDB::ReadBinlogFileToWq(const std::shared_ptr& slave_ptr) { + int cnt = slave_ptr->sync_win.Remaining(); + std::shared_ptr reader = slave_ptr->binlog_reader; + if (!reader) { + return Status::OK(); + } + std::vector tasks; + for (int i = 0; i < cnt; ++i) { + std::string msg; + uint32_t filenum; + uint64_t offset; + if (slave_ptr->sync_win.GetTotalBinlogSize() > PIKA_MAX_CONN_RBUF_HB * 2) { + LOG(INFO) << slave_ptr->ToString() + << " total binlog size in sync window is :" << slave_ptr->sync_win.GetTotalBinlogSize(); + break; + } + Status s = reader->Get(&msg, &filenum, &offset); + if (s.IsEndFile()) { + break; + } else if (s.IsCorruption() || s.IsIOError()) { + LOG(WARNING) << SyncDBInfo().ToString() << " Read Binlog error : " << s.ToString(); + return s; + } + BinlogItem item; + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, msg, &item)) { + LOG(WARNING) << "Binlog item decode failed"; + return Status::Corruption("Binlog item decode failed"); + } + BinlogOffset sent_b_offset = BinlogOffset(filenum, offset); + LogicOffset sent_l_offset = LogicOffset(item.term_id(), item.logic_id()); + LogOffset sent_offset(sent_b_offset, sent_l_offset); + + slave_ptr->sync_win.Push(SyncWinItem(sent_offset, msg.size())); + slave_ptr->SetLastSendTime(pstd::NowMicros()); + RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->DBName(), slave_ptr->SessionId()); + WriteTask task(rm_node, BinlogChip(sent_offset, msg), slave_ptr->sent_offset); + tasks.push_back(task); + slave_ptr->sent_offset = sent_offset; + } + + if (!tasks.empty()) { + g_pika_rm->ProduceWriteQueue(slave_ptr->Ip(), slave_ptr->Port(), db_info_.db_name_, tasks); + } + return Status::OK(); +} + +Status SyncMasterDB::ConsensusUpdateSlave(const std::string& ip, int port, const LogOffset& start, const LogOffset& end) { + Status s = coordinator_.UpdateSlave(ip, port, start, end); + if (!s.ok()) { + LOG(WARNING) << SyncDBInfo().ToString() << s.ToString(); + return s; + } + return Status::OK(); +} + +Status SyncMasterDB::GetSlaveSyncBinlogInfo(const std::string& ip, int port, BinlogOffset* sent_offset, + BinlogOffset* acked_offset) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + + slave_ptr->Lock(); + *sent_offset = slave_ptr->sent_offset.b_offset; + *acked_offset = slave_ptr->acked_offset.b_offset; + slave_ptr->Unlock(); + + return Status::OK(); +} + +Status SyncMasterDB::GetSlaveState(const std::string& ip, int port, SlaveState* const slave_state) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + + slave_ptr->Lock(); + *slave_state = slave_ptr->slave_state; + slave_ptr->Unlock(); + + return Status::OK(); +} + +Status SyncMasterDB::WakeUpSlaveBinlogSync() { + std::unordered_map> slaves = GetAllSlaveNodes(); + std::vector> to_del; + for (auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + if (slave_ptr->sent_offset == slave_ptr->acked_offset) { + Status s = ReadBinlogFileToWq(slave_ptr); + if (!s.ok()) { + to_del.push_back(slave_ptr); + LOG(WARNING) << "WakeUpSlaveBinlogSync falied, Delete from RM, slave: " << slave_ptr->ToStringStatus() << " " + << s.ToString(); + } + } + } + for (auto& to_del_slave : to_del) { + RemoveSlaveNode(to_del_slave->Ip(), to_del_slave->Port()); + } + return Status::OK(); +} + +Status SyncMasterDB::SetLastRecvTime(const std::string& ip, int port, uint64_t time) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + return Status::NotFound("ip " + ip + " port " + std::to_string(port)); + } + + slave_ptr->Lock(); + slave_ptr->SetLastRecvTime(time); + slave_ptr->Unlock(); + + return Status::OK(); +} + +Status SyncMasterDB::GetSafetyPurgeBinlog(std::string* safety_purge) { + BinlogOffset boffset; + Status s = Logger()->GetProducerStatus(&(boffset.filenum), &(boffset.offset)); + if (!s.ok()) { + return s; + } + bool success = false; + uint32_t purge_max = boffset.filenum; + if (purge_max >= 10) { + success = true; + purge_max -= 10; + std::unordered_map> slaves = GetAllSlaveNodes(); + for (const auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + if (slave_ptr->slave_state == SlaveState::kSlaveBinlogSync && slave_ptr->acked_offset.b_offset.filenum > 0) { + purge_max = std::min(slave_ptr->acked_offset.b_offset.filenum - 1, purge_max); + } else { + success = false; + break; + } + } + } + *safety_purge = (success ? kBinlogPrefix + std::to_string(static_cast(purge_max)) : "none"); + return Status::OK(); +} + +bool SyncMasterDB::BinlogCloudPurge(uint32_t index) { + BinlogOffset boffset; + Status s = Logger()->GetProducerStatus(&(boffset.filenum), &(boffset.offset)); + if (!s.ok()) { + return false; + } + if (index > (boffset.filenum - 10)) { // remain some more + return false; + } else { + std::unordered_map> slaves = GetAllSlaveNodes(); + for (const auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + if (slave_ptr->slave_state == SlaveState::kSlaveDbSync) { + return false; + } else if (slave_ptr->slave_state == SlaveState::kSlaveBinlogSync) { + if (index >= slave_ptr->acked_offset.b_offset.filenum) { + return false; + } + } + } + } + return true; +} + +Status SyncMasterDB::CheckSyncTimeout(uint64_t now) { + std::unordered_map> slaves = GetAllSlaveNodes(); + + std::vector to_del; + for (auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + if (slave_ptr->LastRecvTime() + kRecvKeepAliveTimeout < now) { + to_del.emplace_back(slave_ptr->Ip(), slave_ptr->Port()); + } else if (slave_ptr->LastSendTime() + kSendKeepAliveTimeout < now && + slave_ptr->sent_offset == slave_ptr->acked_offset) { + std::vector task; + RmNode rm_node(slave_ptr->Ip(), slave_ptr->Port(), slave_ptr->DBName(), slave_ptr->SessionId()); + WriteTask empty_task(rm_node, BinlogChip(LogOffset(), ""), LogOffset()); + task.push_back(empty_task); + Status s = g_pika_rm->SendSlaveBinlogChipsRequest(slave_ptr->Ip(), slave_ptr->Port(), task); + slave_ptr->SetLastSendTime(now); + if (!s.ok()) { + LOG(INFO) << "Send ping failed: " << s.ToString(); + return Status::Corruption("Send ping failed: " + slave_ptr->Ip() + ":" + std::to_string(slave_ptr->Port())); + } + } + } + + for (auto& node : to_del) { + coordinator_.SyncPros().RemoveSlaveNode(node.Ip(), node.Port()); + g_pika_rm->DropItemInOneWriteQueue(node.Ip(), node.Port(), DBName()); + LOG(WARNING) << SyncDBInfo().ToString() << " Master del Recv Timeout slave success " << node.ToString(); + } + return Status::OK(); +} + +std::string SyncMasterDB::ToStringStatus() { + std::stringstream tmp_stream; + tmp_stream << " Current Master Session: " << session_id_ << "\r\n"; + tmp_stream << " Consensus: " + << "\r\n" + << coordinator_.ToStringStatus(); + std::unordered_map> slaves = GetAllSlaveNodes(); + int i = 0; + for (const auto& slave_iter : slaves) { + std::shared_ptr slave_ptr = slave_iter.second; + std::lock_guard l(slave_ptr->slave_mu); + tmp_stream << " slave[" << i << "]: " << slave_ptr->ToString() << "\r\n" << slave_ptr->ToStringStatus(); + i++; + } + return tmp_stream.str(); +} + +int32_t SyncMasterDB::GenSessionId() { + std::lock_guard ml(session_mu_); + return session_id_++; +} + +bool SyncMasterDB::CheckSessionId(const std::string& ip, int port, const std::string& db_name, + int session_id) { + std::shared_ptr slave_ptr = GetSlaveNode(ip, port); + if (!slave_ptr) { + LOG(WARNING) << "Check SessionId Get Slave Node Error: " << ip << ":" << port << "," << db_name; + return false; + } + + std::lock_guard l(slave_ptr->slave_mu); + if (session_id != slave_ptr->SessionId()) { + LOG(WARNING) << "Check SessionId Mismatch: " << ip << ":" << port << ", " << db_name << "_" + << " expected_session: " << session_id << ", actual_session:" << slave_ptr->SessionId(); + return false; + } + return true; +} + +Status SyncMasterDB::ConsensusProposeLog(const std::shared_ptr& cmd_ptr) { + return coordinator_.ProposeLog(cmd_ptr); +} + +Status SyncMasterDB::ConsensusProcessLeaderLog(const std::shared_ptr& cmd_ptr, const BinlogItem& attribute) { + return coordinator_.ProcessLeaderLog(cmd_ptr, attribute); +} + +LogOffset SyncMasterDB::ConsensusCommittedIndex() { return coordinator_.committed_index(); } + +LogOffset SyncMasterDB::ConsensusLastIndex() { return coordinator_.MemLogger()->last_offset(); } + +std::shared_ptr SyncMasterDB::GetSlaveNode(const std::string& ip, int port) { + return coordinator_.SyncPros().GetSlaveNode(ip, port); +} + +std::unordered_map> SyncMasterDB::GetAllSlaveNodes() { + return coordinator_.SyncPros().GetAllSlaveNodes(); +} + +/* SyncSlaveDB */ +SyncSlaveDB::SyncSlaveDB(const std::string& db_name) + : SyncDB(db_name) { + std::string dbsync_path = g_pika_conf->db_sync_path() + "/" + db_name; + rsync_cli_.reset(new rsync::RsyncClient(dbsync_path, db_name)); + m_info_.SetLastRecvTime(pstd::NowMicros()); +} + +void SyncSlaveDB::SetReplState(const ReplState& repl_state) { + if (repl_state == ReplState::kNoConnect) { + Deactivate(); + return; + } + std::lock_guard l(db_mu_); + repl_state_ = repl_state; +} + +ReplState SyncSlaveDB::State() { + std::lock_guard l(db_mu_); + return repl_state_; +} + +void SyncSlaveDB::SetLastRecvTime(uint64_t time) { + std::lock_guard l(db_mu_); + m_info_.SetLastRecvTime(time); +} + +Status SyncSlaveDB::CheckSyncTimeout(uint64_t now) { + std::lock_guard l(db_mu_); + // no need to do session keepalive return ok + if (repl_state_ != ReplState::kWaitDBSync && repl_state_ != ReplState::kConnected) { + return Status::OK(); + } + if (m_info_.LastRecvTime() + kRecvKeepAliveTimeout < now) { + // update slave state to kTryConnect, and try reconnect to master node + repl_state_ = ReplState::kTryConnect; + } + return Status::OK(); +} + +Status SyncSlaveDB::GetInfo(std::string* info) { + std::string tmp_str = " Role: Slave\r\n"; + tmp_str += " master: " + MasterIp() + ":" + std::to_string(MasterPort()) + "\r\n"; + tmp_str += " slave status: " + ReplStateMsg[repl_state_] + "\r\n"; + info->append(tmp_str); + return Status::OK(); +} + +void SyncSlaveDB::Activate(const RmNode& master, const ReplState& repl_state) { + std::lock_guard l(db_mu_); + m_info_ = master; + repl_state_ = repl_state; + m_info_.SetLastRecvTime(pstd::NowMicros()); +} + +void SyncSlaveDB::Deactivate() { + std::lock_guard l(db_mu_); + m_info_ = RmNode(); + repl_state_ = ReplState::kNoConnect; + rsync_cli_->Stop(); +} + +std::string SyncSlaveDB::ToStringStatus() { + return " Master: " + MasterIp() + ":" + std::to_string(MasterPort()) + "\r\n" + + " SessionId: " + std::to_string(MasterSessionId()) + "\r\n" + " SyncStatus " + ReplStateMsg[repl_state_] + + "\r\n"; +} + +const std::string& SyncSlaveDB::MasterIp() { + std::lock_guard l(db_mu_); + return m_info_.Ip(); +} + +int SyncSlaveDB::MasterPort() { + std::lock_guard l(db_mu_); + return m_info_.Port(); +} + +void SyncSlaveDB::SetMasterSessionId(int32_t session_id) { + std::lock_guard l(db_mu_); + m_info_.SetSessionId(session_id); +} + +int32_t SyncSlaveDB::MasterSessionId() { + std::lock_guard l(db_mu_); + return m_info_.SessionId(); +} + +void SyncSlaveDB::SetLocalIp(const std::string& local_ip) { + std::lock_guard l(db_mu_); + local_ip_ = local_ip; +} + +std::string SyncSlaveDB::LocalIp() { + std::lock_guard l(db_mu_); + return local_ip_; +} + +void SyncSlaveDB::StopRsync() { + rsync_cli_->Stop(); +} + +pstd::Status SyncSlaveDB::ActivateRsync() { + Status s = Status::OK(); + if (!rsync_cli_->IsIdle()) { + return s; + } + LOG(WARNING) << "Slave DB: " << DBName() << " Activating Rsync ... (retry count:" << rsync_init_retry_count_ << ")"; + if (rsync_cli_->Init()) { + rsync_init_retry_count_ = 0; + rsync_cli_->Start(); + return s; + } else { + rsync_init_retry_count_ += 1; + if (rsync_init_retry_count_ >= kMaxRsyncInitReTryTimes) { + SetReplState(ReplState::kError); + LOG(ERROR) << "Full Sync Stage - Rsync Init failed: Slave failed to pull meta info(generated by bgsave task in Master) from Master after MaxRsyncInitReTryTimes(" + << kMaxRsyncInitReTryTimes << " times) is reached. This usually means the Master's bgsave task has costed an unexpected-long time."; + } + return Status::Error("rsync client init failed!"); + } +} + +/* PikaReplicaManger */ + +PikaReplicaManager::PikaReplicaManager() { + std::set ips; + ips.insert("0.0.0.0"); + int port = g_pika_conf->port() + kPortShiftReplServer; + pika_repl_client_ = std::make_unique(3000, 60); + pika_repl_server_ = std::make_unique(ips, port, 3000); + InitDB(); +} + +void PikaReplicaManager::Start() { + int ret = 0; + ret = pika_repl_client_->Start(); + if (ret != net::kSuccess) { + LOG(FATAL) << "Start Repl Client Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + + ret = pika_repl_server_->Start(); + if (ret != net::kSuccess) { + LOG(FATAL) << "Start Repl Server Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } +} + +void PikaReplicaManager::Stop() { + pika_repl_client_->Stop(); + pika_repl_server_->Stop(); +} + +bool PikaReplicaManager::CheckMasterSyncFinished() { + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + LogOffset commit = db->ConsensusCommittedIndex(); + BinlogOffset binlog; + Status s = db->StableLogger()->Logger()->GetProducerStatus(&binlog.filenum, &binlog.offset); + if (!s.ok()) { + return false; + } + if (commit.b_offset < binlog) { + return false; + } + } + return true; +} + +void PikaReplicaManager::InitDB() { + std::vector db_structs = g_pika_conf->db_structs(); + for (const auto& db : db_structs) { + const std::string& db_name = db.db_name; + sync_master_dbs_[DBInfo(db_name)] = std::make_shared(db_name); + sync_slave_dbs_[DBInfo(db_name)] = std::make_shared(db_name); + } +} + +void PikaReplicaManager::ProduceWriteQueue(const std::string& ip, int port, std::string db_name, + const std::vector& tasks) { + std::lock_guard l(write_queue_mu_); + std::string index = ip + ":" + std::to_string(port); + for (auto& task : tasks) { + write_queues_[index][db_name].push(task); + } +} + +int PikaReplicaManager::ConsumeWriteQueue() { + std::unordered_map>> to_send_map; + int counter = 0; + { + std::lock_guard l(write_queue_mu_); + for (auto& iter : write_queues_) { + const std::string& ip_port = iter.first; + std::unordered_map>& p_map = iter.second; + for (auto& db_queue : p_map) { + std::queue& queue = db_queue.second; + for (int i = 0; i < kBinlogSendPacketNum; ++i) { + if (queue.empty()) { + break; + } + size_t batch_index = queue.size() > kBinlogSendBatchNum ? kBinlogSendBatchNum : queue.size(); + std::vector to_send; + size_t batch_size = 0; + for (size_t i = 0; i < batch_index; ++i) { + WriteTask& task = queue.front(); + batch_size += task.binlog_chip_.binlog_.size(); + // make sure SerializeToString will not over 2G + if (batch_size > PIKA_MAX_CONN_RBUF_HB) { + break; + } + to_send.push_back(task); + queue.pop(); + counter++; + } + if (!to_send.empty()) { + to_send_map[ip_port].push_back(std::move(to_send)); + } + } + } + } + } + + std::vector to_delete; + for (auto& iter : to_send_map) { + std::string ip; + int port = 0; + if (!pstd::ParseIpPortString(iter.first, ip, port)) { + LOG(WARNING) << "Parse ip_port error " << iter.first; + continue; + } + for (auto& to_send : iter.second) { + Status s = pika_repl_server_->SendSlaveBinlogChips(ip, port, to_send); + if (!s.ok()) { + LOG(WARNING) << "send binlog to " << ip << ":" << port << " failed, " << s.ToString(); + to_delete.push_back(iter.first); + continue; + } + } + } + + if (!to_delete.empty()) { + std::lock_guard l(write_queue_mu_); + for (auto& del_queue : to_delete) { + write_queues_.erase(del_queue); + } + } + return counter; +} + +void PikaReplicaManager::DropItemInOneWriteQueue(const std::string& ip, int port, const std::string& db_name) { + std::lock_guard l(write_queue_mu_); + std::string index = ip + ":" + std::to_string(port); + if (write_queues_.find(index) != write_queues_.end()) { + write_queues_[index].erase(db_name); + } +} + +void PikaReplicaManager::DropItemInWriteQueue(const std::string& ip, int port) { + std::lock_guard l(write_queue_mu_); + std::string index = ip + ":" + std::to_string(port); + write_queues_.erase(index); +} + +void PikaReplicaManager::ScheduleReplServerBGTask(net::TaskFunc func, void* arg) { + pika_repl_server_->Schedule(func, arg); +} + +void PikaReplicaManager::ScheduleReplClientBGTask(net::TaskFunc func, void* arg) { + pika_repl_client_->Schedule(func, arg); +} + +void PikaReplicaManager::ScheduleReplClientBGTaskByDBName(net::TaskFunc func, void* arg, const std::string &db_name) { + pika_repl_client_->ScheduleByDBName(func, arg, db_name); +} + +void PikaReplicaManager::ScheduleWriteBinlogTask(const std::string& db, + const std::shared_ptr& res, + const std::shared_ptr& conn, void* res_private_data) { + pika_repl_client_->ScheduleWriteBinlogTask(db, res, conn, res_private_data); +} + +void PikaReplicaManager::ScheduleWriteDBTask(const std::shared_ptr& cmd_ptr, const std::string& db_name) { + pika_repl_client_->ScheduleWriteDBTask(cmd_ptr, db_name); +} + +void PikaReplicaManager::ReplServerRemoveClientConn(int fd) { pika_repl_server_->RemoveClientConn(fd); } + +void PikaReplicaManager::ReplServerUpdateClientConnMap(const std::string& ip_port, int fd) { + pika_repl_server_->UpdateClientConnMap(ip_port, fd); +} + +Status PikaReplicaManager::UpdateSyncBinlogStatus(const RmNode& slave, const LogOffset& offset_start, + const LogOffset& offset_end) { + std::shared_lock l(dbs_rw_); + if (sync_master_dbs_.find(slave.NodeDBInfo()) == sync_master_dbs_.end()) { + return Status::NotFound(slave.ToString() + " not found"); + } + std::shared_ptr db = sync_master_dbs_[slave.NodeDBInfo()]; + Status s = db->ConsensusUpdateSlave(slave.Ip(), slave.Port(), offset_start, offset_end); + if (!s.ok()) { + return s; + } + s = db->SyncBinlogToWq(slave.Ip(), slave.Port()); + if (!s.ok()) { + return s; + } + return Status::OK(); +} + +bool PikaReplicaManager::CheckSlaveDBState(const std::string& ip, const int port) { + std::shared_ptr db = nullptr; + for (const auto& iter : g_pika_rm->sync_slave_dbs_) { + db = iter.second; + if (db->State() == ReplState::kDBNoConnect && db->MasterIp() == ip && + db->MasterPort() + kPortShiftReplServer == port) { + LOG(INFO) << "DB: " << db->SyncDBInfo().ToString() + << " has been dbslaveof no one, then will not try reconnect."; + return false; + } + } + return true; +} + +Status PikaReplicaManager::DeactivateSyncSlaveDB(const std::string& ip, int port) { + std::shared_lock l(dbs_rw_); + for (auto& iter : sync_slave_dbs_) { + std::shared_ptr db = iter.second; + if (db->MasterIp() == ip && db->MasterPort() == port) { + db->Deactivate(); + } + } + return Status::OK(); +} + +Status PikaReplicaManager::LostConnection(const std::string& ip, int port) { + std::shared_lock l(dbs_rw_); + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->RemoveSlaveNode(ip, port); + if (!s.ok() && !s.IsNotFound()) { + LOG(WARNING) << "Lost Connection failed " << s.ToString(); + } + } + + for (auto& iter : sync_slave_dbs_) { + std::shared_ptr db = iter.second; + if (db->MasterIp() == ip && db->MasterPort() == port) { + db->Deactivate(); + } + } + return Status::OK(); +} + +Status PikaReplicaManager::WakeUpBinlogSync() { + std::shared_lock l(dbs_rw_); + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->WakeUpSlaveBinlogSync(); + if (!s.ok()) { + return s; + } + } + return Status::OK(); +} + +Status PikaReplicaManager::CheckSyncTimeout(uint64_t now) { + std::shared_lock l(dbs_rw_); + + for (auto& iter : sync_master_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->CheckSyncTimeout(now); + if (!s.ok()) { + LOG(WARNING) << "CheckSyncTimeout Failed " << s.ToString(); + } + } + for (auto& iter : sync_slave_dbs_) { + std::shared_ptr db = iter.second; + Status s = db->CheckSyncTimeout(now); + if (!s.ok()) { + LOG(WARNING) << "CheckSyncTimeout Failed " << s.ToString(); + } + } + return Status::OK(); +} + +Status PikaReplicaManager::CheckDBRole(const std::string& db, int* role) { + std::shared_lock l(dbs_rw_); + *role = 0; + DBInfo p_info(db); + if (sync_master_dbs_.find(p_info) == sync_master_dbs_.end()) { + return Status::NotFound(db + " not found"); + } + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { + return Status::NotFound(db + " not found"); + } + if (sync_master_dbs_[p_info]->GetNumberOfSlaveNode() != 0 || + (sync_master_dbs_[p_info]->GetNumberOfSlaveNode() == 0 && + sync_slave_dbs_[p_info]->State() == kNoConnect)) { + *role |= PIKA_ROLE_MASTER; + } + if (sync_slave_dbs_[p_info]->State() != ReplState::kNoConnect) { + *role |= PIKA_ROLE_SLAVE; + } + // if role is not master or slave, the rest situations are all single + return Status::OK(); +} + +Status PikaReplicaManager::SelectLocalIp(const std::string& remote_ip, const int remote_port, + std::string* const local_ip) { + std::unique_ptr cli(net::NewRedisCli()); + cli->set_connect_timeout(1500); + if ((cli->Connect(remote_ip, remote_port, "")).ok()) { + struct sockaddr_in laddr; + socklen_t llen = sizeof(laddr); + getsockname(cli->fd(), reinterpret_cast(&laddr), &llen); + std::string tmp_ip(inet_ntoa(laddr.sin_addr)); + *local_ip = tmp_ip; + cli->Close(); + } else { + LOG(WARNING) << "Failed to connect remote node(" << remote_ip << ":" << remote_port << ")"; + return Status::Corruption("connect remote node error"); + } + return Status::OK(); +} + +Status PikaReplicaManager::ActivateSyncSlaveDB(const RmNode& node, const ReplState& repl_state) { + std::shared_lock l(dbs_rw_); + const DBInfo& p_info = node.NodeDBInfo(); + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { + return Status::NotFound("Sync Slave DB " + node.ToString() + " not found"); + } + ReplState ssp_state = sync_slave_dbs_[p_info]->State(); + if (ssp_state != ReplState::kNoConnect && ssp_state != ReplState::kDBNoConnect) { + return Status::Corruption("Sync Slave DB in " + ReplStateMsg[ssp_state]); + } + std::string local_ip; + Status s = SelectLocalIp(node.Ip(), node.Port(), &local_ip); + if (s.ok()) { + sync_slave_dbs_[p_info]->SetLocalIp(local_ip); + sync_slave_dbs_[p_info]->Activate(node, repl_state); + } + return s; +} + +Status PikaReplicaManager::SendMetaSyncRequest() { + Status s; + if (time(nullptr) - g_pika_server->GetMetaSyncTimestamp() >= PIKA_META_SYNC_MAX_WAIT_TIME || + g_pika_server->IsFirstMetaSync()) { + s = pika_repl_client_->SendMetaSync(); + if (s.ok()) { + g_pika_server->UpdateMetaSyncTimestamp(); + g_pika_server->SetFirstMetaSync(false); + } + } + return s; +} + +Status PikaReplicaManager::SendRemoveSlaveNodeRequest(const std::string& db) { + pstd::Status s; + std::shared_lock l(dbs_rw_); + DBInfo p_info(db); + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { + return Status::NotFound("Sync Slave DB " + p_info.ToString()); + } else { + std::shared_ptr s_db = sync_slave_dbs_[p_info]; + s = pika_repl_client_->SendRemoveSlaveNode(s_db->MasterIp(), s_db->MasterPort(), db, s_db->LocalIp()); + if (s.ok()) { + s_db->SetReplState(ReplState::kDBNoConnect); + } + } + + if (s.ok()) { + LOG(INFO) << "SlaveNode (" << db << ", stop sync success"; + } else { + LOG(WARNING) << "SlaveNode (" << db << ", stop sync faild, " << s.ToString(); + } + return s; +} + +Status PikaReplicaManager::SendTrySyncRequest(const std::string& db_name) { + BinlogOffset boffset; + if (!g_pika_server->GetDBBinlogOffset(db_name, &boffset)) { + LOG(WARNING) << "DB: " << db_name << ", Get DB binlog offset failed"; + return Status::Corruption("DB get binlog offset error"); + } + + std::shared_ptr slave_db = GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << ", NotFound"; + return Status::Corruption("Slave DB not found"); + } + + Status status = + pika_repl_client_->SendTrySync(slave_db->MasterIp(), slave_db->MasterPort(), db_name, + boffset, slave_db->LocalIp()); + + if (status.ok()) { + slave_db->SetReplState(ReplState::kWaitReply); + } else { + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "SendDBTrySyncRequest failed " << status.ToString(); + } + return status; +} + +Status PikaReplicaManager::SendDBSyncRequest(const std::string& db_name) { + BinlogOffset boffset; + if (!g_pika_server->GetDBBinlogOffset(db_name, &boffset)) { + LOG(WARNING) << "DB: " << db_name << ", Get DB binlog offset failed"; + return Status::Corruption("DB get binlog offset error"); + } + + std::shared_ptr db = g_pika_server->GetDB(db_name); + if (!db) { + LOG(WARNING) << "DB: " << db_name << " NotFound"; + return Status::Corruption("DB not found"); + } + db->PrepareRsync(); + + std::shared_ptr slave_db = GetSyncSlaveDBByName(DBInfo(db_name)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_name << ", NotFound"; + return Status::Corruption("Slave DB not found"); + } + + Status status = pika_repl_client_->SendDBSync(slave_db->MasterIp(), slave_db->MasterPort(), + db_name, boffset, slave_db->LocalIp()); + + Status s; + if (status.ok()) { + slave_db->SetReplState(ReplState::kWaitReply); + } else { + slave_db->SetReplState(ReplState::kError); + LOG(WARNING) << "SendDBSync failed " << status.ToString(); + } + if (!s.ok()) { + LOG(WARNING) << s.ToString(); + } + return status; +} + +Status PikaReplicaManager::SendBinlogSyncAckRequest(const std::string& db, const LogOffset& ack_start, + const LogOffset& ack_end, bool is_first_send) { + std::shared_ptr slave_db = GetSyncSlaveDBByName(DBInfo(db)); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db << ":, NotFound"; + return Status::Corruption("Slave DB not found"); + } + return pika_repl_client_->SendBinlogSync(slave_db->MasterIp(), slave_db->MasterPort(), db, + ack_start, ack_end, slave_db->LocalIp(), is_first_send); +} + +Status PikaReplicaManager::CloseReplClientConn(const std::string& ip, int32_t port) { + return pika_repl_client_->Close(ip, port); +} + +Status PikaReplicaManager::SendSlaveBinlogChipsRequest(const std::string& ip, int port, + const std::vector& tasks) { + return pika_repl_server_->SendSlaveBinlogChips(ip, port, tasks); +} + +std::shared_ptr PikaReplicaManager::GetSyncMasterDBByName(const DBInfo& p_info) { + std::shared_lock l(dbs_rw_); + if (sync_master_dbs_.find(p_info) == sync_master_dbs_.end()) { + return nullptr; + } + return sync_master_dbs_[p_info]; +} + +std::shared_ptr PikaReplicaManager::GetSyncSlaveDBByName(const DBInfo& p_info) { + std::shared_lock l(dbs_rw_); + if (sync_slave_dbs_.find(p_info) == sync_slave_dbs_.end()) { + return nullptr; + } + return sync_slave_dbs_[p_info]; +} + +Status PikaReplicaManager::RunSyncSlaveDBStateMachine() { + std::shared_lock l(dbs_rw_); + for (const auto& item : sync_slave_dbs_) { + DBInfo p_info = item.first; + std::shared_ptr s_db = item.second; + if (s_db->State() == ReplState::kTryConnect) { + SendTrySyncRequest(p_info.db_name_); + } else if (s_db->State() == ReplState::kTryDBSync) { + SendDBSyncRequest(p_info.db_name_); + } else if (s_db->State() == ReplState::kWaitReply) { + continue; + } else if (s_db->State() == ReplState::kWaitDBSync) { + Status s = s_db->ActivateRsync(); + if (!s.ok()) { + LOG(WARNING) << "Slave DB: " << s_db->DBName() << " rsync failed! full synchronization will be retried later"; + continue; + } + + std::shared_ptr db = + g_pika_server->GetDB(p_info.db_name_); + if (db) { + if (s_db->IsRsyncExited()) { + db->TryUpdateMasterOffset(); + } + } else { + LOG(WARNING) << "DB not found, DB Name: " << p_info.db_name_; + } + } else if (s_db->State() == ReplState::kConnected || s_db->State() == ReplState::kNoConnect || + s_db->State() == ReplState::kDBNoConnect) { + continue; + } + } + return Status::OK(); +} + +void PikaReplicaManager::FindCommonMaster(std::string* master) { + std::shared_lock l(dbs_rw_); + std::string common_master_ip; + int common_master_port = 0; + for (auto& iter : sync_slave_dbs_) { + if (iter.second->State() != kConnected) { + return; + } + std::string tmp_ip = iter.second->MasterIp(); + int tmp_port = iter.second->MasterPort(); + if (common_master_ip.empty() && common_master_port == 0) { + common_master_ip = tmp_ip; + common_master_port = tmp_port; + } + if (tmp_ip != common_master_ip || tmp_port != common_master_port) { + return; + } + } + if (!common_master_ip.empty() && common_master_port != 0) { + *master = common_master_ip + ":" + std::to_string(common_master_port); + } +} + +void PikaReplicaManager::RmStatus(std::string* info) { + std::shared_lock l(dbs_rw_); + std::stringstream tmp_stream; + tmp_stream << "Master DB(" << sync_master_dbs_.size() << "):" + << "\r\n"; + for (auto& iter : sync_master_dbs_) { + tmp_stream << " DB " << iter.second->SyncDBInfo().ToString() << "\r\n" + << iter.second->ToStringStatus() << "\r\n"; + } + tmp_stream << "Slave DB(" << sync_slave_dbs_.size() << "):" + << "\r\n"; + for (auto& iter : sync_slave_dbs_) { + tmp_stream << " DB " << iter.second->SyncDBInfo().ToString() << "\r\n" + << iter.second->ToStringStatus() << "\r\n"; + } + info->append(tmp_stream.str()); +} diff --git a/tools/pika_migrate/src/pika_rsync_service.cc b/tools/pika_migrate/src/pika_rsync_service.cc new file mode 100644 index 0000000000..5071a1cfc1 --- /dev/null +++ b/tools/pika_migrate/src/pika_rsync_service.cc @@ -0,0 +1,105 @@ +// Copyright (c) 2019-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_rsync_service.h" + +#include +#include +#include + +#include "pstd/include/env.h" +#include "pstd/include/rsync.h" + +#include "include/pika_conf.h" +#include "include/pika_define.h" + +#ifdef __FreeBSD__ +# include +#endif + +extern std::unique_ptr g_pika_conf; + +PikaRsyncService::PikaRsyncService(const std::string& raw_path, const int port) : raw_path_(raw_path), port_(port) { + if (raw_path_.back() != '/') { + raw_path_ += "/"; + } + rsync_path_ = raw_path_ + pstd::kRsyncSubDir + "/"; + pid_path_ = rsync_path_ + pstd::kRsyncPidFile; +} + +PikaRsyncService::~PikaRsyncService() { + if (!CheckRsyncAlive()) { + pstd::DeleteDirIfExist(rsync_path_); + } else { + pstd::StopRsync(raw_path_); + } + LOG(INFO) << "PikaRsyncService exit!!!"; +} + +int PikaRsyncService::StartRsync() { + int ret = 0; + std::string auth; + if (g_pika_conf->masterauth().empty()) { + auth = kDefaultRsyncAuth; + } else { + auth = g_pika_conf->masterauth(); + } + ret = pstd::StartRsync(raw_path_, kDBSyncModule, "0.0.0.0", port_, auth); + if (ret) { + LOG(WARNING) << "Failed to start rsync, path:" << raw_path_ << " error : " << ret; + return -1; + } + ret = CreateSecretFile(); + if (ret) { + LOG(WARNING) << "Failed to create secret file"; + return -1; + } + // Make sure the listening addr of rsyncd is accessible, avoid the corner case + // that rsync --daemon process is started but not finished listening on the socket + sleep(1); + + if (!CheckRsyncAlive()) { + LOG(WARNING) << "Rsync service is no live, path:" << raw_path_; + return -1; + } + return 0; +} + +int PikaRsyncService::CreateSecretFile() { + std::string secret_file_path = g_pika_conf->db_sync_path(); + if (g_pika_conf->db_sync_path().back() != '/') { + secret_file_path += "/"; + } + secret_file_path += pstd::kRsyncSubDir + "/"; + pstd::CreatePath(secret_file_path); + secret_file_path += kPikaSecretFile; + + std::string auth; + // unify rsync auth with masterauth + if (g_pika_conf->masterauth().empty()) { + auth = kDefaultRsyncAuth; + } else { + auth = g_pika_conf->masterauth(); + } + + std::ofstream secret_stream(secret_file_path.c_str()); + if (!secret_stream) { + return -1; + } + secret_stream << auth; + secret_stream.close(); + + // secret file cant be other-accessible + std::string cmd = "chmod 600 " + secret_file_path; + int ret = system(cmd.c_str()); + if (ret == 0 || (WIFEXITED(ret) && !WEXITSTATUS(ret))) { + return 0; + } + return ret; +} + +bool PikaRsyncService::CheckRsyncAlive() { return pstd::FileExists(pid_path_); } + +int PikaRsyncService::ListenPort() { return port_; } diff --git a/tools/pika_migrate/src/pika_server.cc b/tools/pika_migrate/src/pika_server.cc new file mode 100644 index 0000000000..72b16d82f7 --- /dev/null +++ b/tools/pika_migrate/src/pika_server.cc @@ -0,0 +1,1921 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include +#include +#include +#include +#include "net/include/net_cli.h" +#include "net/include/net_interfaces.h" +#include "net/include/net_stats.h" +#include "net/include/redis_cli.h" +#include "pstd/include/env.h" +#include "pstd/include/rsync.h" +#include "pstd/include/pika_codis_slot.h" + +#include "include/pika_cmd_table_manager.h" +#include "include/pika_dispatch_thread.h" +#include "include/pika_instant.h" +#include "include/pika_monotonic_time.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" + +using pstd::Status; +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; +extern std::unique_ptr g_network_statistic; +// QUEUE_SIZE_THRESHOLD_PERCENTAGE is used to represent a percentage value and should be within the range of 0 to 100. +const size_t QUEUE_SIZE_THRESHOLD_PERCENTAGE = 75; + +void DoPurgeDir(void* arg) { + std::unique_ptr path(static_cast(arg)); + LOG(INFO) << "Delete dir: " << *path << " start"; + pstd::DeleteDir(*path); + LOG(INFO) << "Delete dir: " << *path << " done"; +} + + +PikaServer::PikaServer() + : exit_(false), + slow_cmd_thread_pool_flag_(g_pika_conf->slow_cmd_pool()), + last_check_compact_time_({0, 0}), + last_check_resume_time_({0, 0}), + repl_state_(PIKA_REPL_NO_CONNECT), + role_(PIKA_ROLE_SINGLE) { + // Init server ip host + if (!ServerInit()) { + LOG(FATAL) << "ServerInit iotcl error"; + } + + InitStorageOptions(); + + // Create thread + worker_num_ = std::min(g_pika_conf->thread_num(), PIKA_MAX_WORKER_THREAD_NUM); + + std::set ips; + if (g_pika_conf->network_interface().empty()) { + ips.insert("0.0.0.0"); + } else { + ips.insert("127.0.0.1"); + ips.insert(host_); + } + // We estimate the queue size + int worker_queue_limit = g_pika_conf->maxclients() / worker_num_ + 100; + LOG(INFO) << "Worker queue limit is " << worker_queue_limit; + for_each(ips.begin(), ips.end(), [](auto& ip) { LOG(WARNING) << ip; }); + pika_dispatch_thread_ = std::make_unique(ips, port_, worker_num_, 3000, worker_queue_limit, + g_pika_conf->max_conn_rbuf_size()); + pika_rsync_service_ = + std::make_unique(g_pika_conf->db_sync_path(), g_pika_conf->port() + kPortShiftRSync); + // TODO: remove pika_rsync_service_,reuse pika_rsync_service_ port + rsync_server_ = std::make_unique(ips, port_ + kPortShiftRsync2); + pika_pubsub_thread_ = std::make_unique(); + pika_auxiliary_thread_ = std::make_unique(); + pika_migrate_ = std::make_unique(); + pika_migrate_thread_ = std::make_unique(); + + pika_client_processor_ = std::make_unique(g_pika_conf->thread_pool_size(), 100000); + pika_slow_cmd_thread_pool_ = std::make_unique(g_pika_conf->slow_cmd_thread_pool_size(), 100000); + pika_admin_cmd_thread_pool_ = std::make_unique(g_pika_conf->admin_thread_pool_size(), 100000); + instant_ = std::make_unique(); + exit_mutex_.lock(); + int64_t lastsave = GetLastSaveTime(g_pika_conf->bgsave_path()); + UpdateLastSave(lastsave); + + // init role + std::string slaveof = g_pika_conf->slaveof(); + if (!slaveof.empty()) { + auto sep = static_cast(slaveof.find(':')); + std::string master_ip = slaveof.substr(0, sep); + int32_t master_port = std::stoi(slaveof.substr(sep + 1)); + if ((master_ip == "127.0.0.1" || master_ip == host_) && master_port == port_) { + LOG(FATAL) << "you will slaveof yourself as the config file, please check"; + } else { + SetMaster(master_ip, master_port); + } + } + + acl_ = std::make_unique<::Acl>(); + SetSlowCmdThreadPoolFlag(g_pika_conf->slow_cmd_pool()); + bgsave_thread_.set_thread_name("PikaServer::bgsave_thread_"); + purge_thread_.set_thread_name("PikaServer::purge_thread_"); + bgslots_cleanup_thread_.set_thread_name("PikaServer::bgslots_cleanup_thread_"); + common_bg_thread_.set_thread_name("PikaServer::common_bg_thread_"); + key_scan_thread_.set_thread_name("PikaServer::key_scan_thread_"); +} + +PikaServer::~PikaServer() { + rsync_server_->Stop(); + // DispatchThread will use queue of worker thread + // so we need to Stop dispatch before worker. + pika_dispatch_thread_->StopThread(); + pika_client_processor_->Stop(); + pika_slow_cmd_thread_pool_->stop_thread_pool(); + pika_admin_cmd_thread_pool_->stop_thread_pool(); + { + std::lock_guard l(slave_mutex_); + auto iter = slaves_.begin(); + while (iter != slaves_.end()) { + iter = slaves_.erase(iter); + LOG(INFO) << "Delete slave success"; + } + } + bgsave_thread_.StopThread(); + key_scan_thread_.StopThread(); + pika_migrate_thread_->StopThread(); + + dbs_.clear(); + + LOG(INFO) << "PikaServer " << pthread_self() << " exit!!!"; +} + +bool PikaServer::ServerInit() { + std::string network_interface = g_pika_conf->network_interface(); + if (network_interface.empty()) { + network_interface = GetDefaultInterface(); + } + + if (network_interface.empty()) { + LOG(FATAL) << "Can't get Networker Interface"; + return false; + } + + host_ = GetIpByInterface(network_interface); + if (host_.empty()) { + LOG(FATAL) << "can't get host ip for " << network_interface; + return false; + } + + port_ = g_pika_conf->port(); + LOG(INFO) << "host: " << host_ << " port: " << port_; + return true; +} + +void PikaServer::Start() { + int ret = 0; + // start rsync first, rocksdb opened fd will not appear in this fork + // TODO: temporarily disable rsync server + /* + ret = pika_rsync_service_->StartRsync(); + if (0 != ret) { + dbs_.clear(); + LOG(FATAL) << "Start Rsync Error: bind port " + std::to_string(pika_rsync_service_->ListenPort()) + " failed" + << ", Listen on this port to receive Master FullSync Data"; + } + */ + + ret = pika_client_processor_->Start(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start PikaClientProcessor Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + + ret = pika_slow_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start PikaLowLevelThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + ret = pika_admin_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start PikaAdminThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + ret = pika_dispatch_thread_->StartThread(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start Dispatch Error: " << ret + << (ret == net::kBindError ? ": bind port " + std::to_string(port_) + " conflict" : ": other error") + << ", Listen on this port to handle the connected redis client"; + } + ret = pika_pubsub_thread_->StartThread(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start Pubsub Error: " << ret << (ret == net::kBindError ? ": bind port conflict" : ": other error"); + } + + ret = pika_auxiliary_thread_->StartThread(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start Auxiliary Thread Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + + time(&start_time_s_); + LOG(INFO) << "Pika Server going to start"; + rsync_server_->Start(); + while (!exit_) { + DoTimingTask(); + // wake up every 5 seconds + if (!exit_ && exit_mutex_.try_lock_for(std::chrono::seconds(5))) { + exit_mutex_.unlock(); + } + } + LOG(INFO) << "Goodbye..."; +} + +void PikaServer::SetSlowCmdThreadPoolFlag(bool flag) { + slow_cmd_thread_pool_flag_ = flag; + int ret = 0; + if (flag) { + ret = pika_slow_cmd_thread_pool_->start_thread_pool(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(ERROR) << "Start PikaLowLevelThreadPool Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } else { + while (SlowCmdThreadPoolCurQueueSize() != 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + pika_slow_cmd_thread_pool_->stop_thread_pool(); + } +} + +void PikaServer::Exit() { + g_pika_server->DisableCompact(); + exit_mutex_.unlock(); + exit_ = true; +} + +std::string PikaServer::host() { return host_; } + +int PikaServer::port() { return port_; } + +time_t PikaServer::start_time_s() { return start_time_s_; } + +std::string PikaServer::master_ip() { + std::shared_lock l(state_protector_); + return master_ip_; +} + +int PikaServer::master_port() { + std::shared_lock l(state_protector_); + return master_port_; +} + +int PikaServer::role() { + std::shared_lock l(state_protector_); + return role_; +} + +bool PikaServer::leader_protected_mode() { + std::shared_lock l(state_protector_); + return leader_protected_mode_; +} + +void PikaServer::CheckLeaderProtectedMode() { + if (!leader_protected_mode()) { + return; + } + if (g_pika_rm->CheckMasterSyncFinished()) { + LOG(INFO) << "Master finish sync and commit binlog"; + + std::lock_guard l(state_protector_); + leader_protected_mode_ = false; + } +} + +bool PikaServer::readonly(const std::string& db_name) { + std::shared_lock l(state_protector_); + return ((role_ & PIKA_ROLE_SLAVE) != 0) && g_pika_conf->slave_read_only(); +} + +int PikaServer::repl_state() { + std::shared_lock l(state_protector_); + return repl_state_; +} + +std::string PikaServer::repl_state_str() { + std::shared_lock l(state_protector_); + switch (repl_state_) { + case PIKA_REPL_NO_CONNECT: + return "no connect"; + case PIKA_REPL_SHOULD_META_SYNC: + return "should meta sync"; + case PIKA_REPL_META_SYNC_DONE: + return "meta sync done"; + case PIKA_REPL_ERROR: + return "error"; + default: + return ""; + } +} + +bool PikaServer::force_full_sync() { return force_full_sync_; } + +void PikaServer::SetForceFullSync(bool v) { force_full_sync_ = v; } + +void PikaServer::SetDispatchQueueLimit(int queue_limit) { + rlimit limit; + rlim_t maxfiles = g_pika_conf->maxclients() + PIKA_MIN_RESERVED_FDS; + if (getrlimit(RLIMIT_NOFILE, &limit) == -1) { + LOG(WARNING) << "getrlimit error: " << strerror(errno); + } else if (limit.rlim_cur < maxfiles) { + rlim_t old_limit = limit.rlim_cur; + limit.rlim_cur = maxfiles; + limit.rlim_max = maxfiles; + if (setrlimit(RLIMIT_NOFILE, &limit) != -1) { + LOG(WARNING) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika have successfully reconfig it to " << limit.rlim_cur; + } else { + LOG(FATAL) << "your 'limit -n ' of " << old_limit + << " is not enough for Redis to start. pika can not reconfig it(" << strerror(errno) + << "), do it by yourself"; + } + } + pika_dispatch_thread_->SetQueueLimit(queue_limit); +} + +storage::StorageOptions PikaServer::storage_options() { + std::shared_lock rwl(storage_options_rw_); + return storage_options_; +} + +void PikaServer::InitDBStruct() { + std::string db_path = g_pika_conf->db_path(); + std::string log_path = g_pika_conf->log_path(); + std::vector db_structs = g_pika_conf->db_structs(); + std::lock_guard rwl(dbs_rw_); + for (const auto& db : db_structs) { + std::string name = db.db_name; + std::shared_ptr db_ptr = std::make_shared(name, db_path, log_path); + db_ptr->Init(); + dbs_.emplace(name, db_ptr); + } +} + +std::shared_ptr PikaServer::GetDB(const std::string& db_name) { + std::shared_lock l(dbs_rw_); + auto iter = dbs_.find(db_name); + return (iter == dbs_.end()) ? nullptr : iter->second; +} + +bool PikaServer::IsBgSaving() { + std::shared_lock l(dbs_rw_); + for (const auto& db_item : dbs_) { + if (db_item.second->IsBgSaving()) { + return true; + } + } + return false; +} + +bool PikaServer::IsKeyScaning() { + std::shared_lock l(dbs_rw_); + for (const auto& db_item : dbs_) { + if (db_item.second->IsKeyScaning()) { + return true; + } + } + return false; +} + +bool PikaServer::IsCompacting() { + std::shared_lock db_rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + std::string task_type = db_item.second->storage()->GetCurrentTaskType(); + db_item.second->DBUnlockShared(); + if (strcasecmp(task_type.data(), "no") != 0) { + return true; + } + } + return false; +} + +bool PikaServer::IsDBExist(const std::string& db_name) { return static_cast(GetDB(db_name)); } + +bool PikaServer::IsDBBinlogIoError(const std::string& db_name) { + std::shared_ptr db = GetDB(db_name); + return db ? db->IsBinlogIoError() : true; +} + +std::set PikaServer::GetAllDBName() { + std::set dbs; + std::shared_lock l(dbs_rw_); + for (const auto& db_item : dbs_) { + dbs.insert(db_item.first); + } + return dbs; +} + +Status PikaServer::DoSameThingSpecificDB(const std::set& dbs, const TaskArg& arg) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + if (dbs.find(db_item.first) == dbs.end()) { + continue; + } + switch (arg.type) { + case TaskType::kCompactAll: + db_item.second->Compact(storage::DataType::kAll); + break; + case TaskType::kStartKeyScan: + db_item.second->KeyScan(); + break; + case TaskType::kStopKeyScan: + db_item.second->StopKeyScan(); + break; + case TaskType::kBgSave: + db_item.second->BgSaveDB(); + break; + case TaskType::kCompactRangeAll: + db_item.second->CompactRange(storage::DataType::kAll, arg.argv[0], arg.argv[1]); + break; + default: + break; + } + } + return Status::OK(); +} + +void PikaServer::PrepareDBTrySync() { + std::shared_lock rwl(dbs_rw_); + ReplState state = force_full_sync_ ? ReplState::kTryDBSync : ReplState::kTryConnect; + for (const auto& db_item : dbs_) { + Status s = g_pika_rm->ActivateSyncSlaveDB( + RmNode(g_pika_server->master_ip(), g_pika_server->master_port(), db_item.second->GetDBName()), state); + if (!s.ok()) { + LOG(WARNING) << s.ToString(); + } + } + force_full_sync_ = false; + LOG(INFO) << "Mark try connect finish"; +} + +void PikaServer::DBSetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + db_item.second->storage()->SetMaxCacheStatisticKeys(max_cache_statistic_keys); + db_item.second->DBUnlockShared(); + } +} + +void PikaServer::DBSetSmallCompactionThreshold(uint32_t small_compaction_threshold) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + db_item.second->storage()->SetSmallCompactionThreshold(small_compaction_threshold); + db_item.second->DBUnlockShared(); + } +} + +void PikaServer::DBSetSmallCompactionDurationThreshold(uint32_t small_compaction_duration_threshold) { + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLockShared(); + db_item.second->storage()->SetSmallCompactionDurationThreshold(small_compaction_duration_threshold); + db_item.second->DBUnlockShared(); + } +} + +bool PikaServer::GetDBBinlogOffset(const std::string& db_name, BinlogOffset* const boffset) { + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!db) { + return false; + } + Status s = db->Logger()->GetProducerStatus(&(boffset->filenum), &(boffset->offset)); + return s.ok(); +} + +Status PikaServer::DoSameThingEveryDB(const TaskType& type) { + std::shared_lock rwl(dbs_rw_); + std::shared_ptr slave_db = nullptr; + for (const auto& db_item : dbs_) { + switch (type) { + case TaskType::kResetReplState: { + slave_db = g_pika_rm->GetSyncSlaveDBByName(DBInfo(db_item.second->GetDBName())); + if (!slave_db) { + LOG(WARNING) << "Slave DB: " << db_item.second->GetDBName() << ":" + << " Not Found"; + } + slave_db->SetReplState(ReplState::kNoConnect); + break; + } + case TaskType::kPurgeLog: { + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName( + DBInfo(db_item.second->GetDBName())); + if (!db) { + LOG(WARNING) << "DB: " << db_item.second->GetDBName() << ":" + << " Not Found."; + break; + } + db->StableLogger()->PurgeStableLogs(); + break; + } + case TaskType::kCompactAll: + db_item.second->Compact(storage::DataType::kAll); + break; + case TaskType::kCompactOldestOrBestDeleteRatioSst: + db_item.second->LongestNotCompactionSstCompact(storage::DataType::kAll); + break; + default: + break; + } + } + return Status::OK(); +} + +void PikaServer::BecomeMaster() { + std::lock_guard l(state_protector_); + role_ |= PIKA_ROLE_MASTER; +} + +void PikaServer::DeleteSlave(int fd) { + std::string ip; + int port = -1; + bool is_find = false; + int slave_num = -1; + { + std::lock_guard l(slave_mutex_); + auto iter = slaves_.begin(); + while (iter != slaves_.end()) { + if (iter->conn_fd == fd) { + ip = iter->ip; + port = iter->port; + is_find = true; + LOG(INFO) << "Delete Slave Success, ip_port: " << iter->ip << ":" << iter->port; + slaves_.erase(iter); + break; + } + iter++; + } + slave_num = static_cast(slaves_.size()); + } + + if (is_find) { + g_pika_rm->LostConnection(ip, port); + g_pika_rm->DropItemInWriteQueue(ip, port); + } + + if (slave_num == 0) { + std::lock_guard l(state_protector_); + role_ &= ~PIKA_ROLE_MASTER; + leader_protected_mode_ = false; // explicitly cancel protected mode + } +} + +int32_t PikaServer::CountSyncSlaves() { + int32_t count = 0; + std::lock_guard l(slave_mutex_); + for (const auto& slave : slaves_) { + for (const auto& ts : slave.db_structs) { + SlaveState slave_state; + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(ts.db_name)); + if (!db) { + continue; + } + Status s = db->GetSlaveState(slave.ip, slave.port, &slave_state); + if (s.ok() && slave_state == SlaveState::kSlaveDbSync) { + count++; + } + } + } + return count; +} + +int32_t PikaServer::GetSlaveListString(std::string& slave_list_str) { + size_t index = 0; + SlaveState slave_state; + BinlogOffset master_boffset; + BinlogOffset sent_slave_boffset; + BinlogOffset acked_slave_boffset; + std::stringstream tmp_stream; + std::lock_guard l(slave_mutex_); + std::shared_ptr master_db = nullptr; + for (const auto& slave : slaves_) { + tmp_stream << "slave" << index++ << ":ip=" << slave.ip << ",port=" << slave.port << ",conn_fd=" << slave.conn_fd + << ",lag="; + for (const auto& ts : slave.db_structs) { + std::shared_ptr db = g_pika_rm->GetSyncMasterDBByName(DBInfo(ts.db_name)); + if (!db) { + LOG(WARNING) << "Sync Master DB: " << ts.db_name << ", NotFound"; + continue; + } + Status s = db->GetSlaveState(slave.ip, slave.port, &slave_state); + if (s.ok() && slave_state == SlaveState::kSlaveBinlogSync && + db->GetSlaveSyncBinlogInfo(slave.ip, slave.port, &sent_slave_boffset, &acked_slave_boffset).ok()) { + Status s = db->Logger()->GetProducerStatus(&(master_boffset.filenum), &(master_boffset.offset)); + if (!s.ok()) { + continue; + } else { + uint64_t lag = + static_cast((master_boffset.filenum - sent_slave_boffset.filenum)) * g_pika_conf->binlog_file_size() + + master_boffset.offset - sent_slave_boffset.offset; + tmp_stream << "(" << db->DBName() << ":" << lag << ")"; + } + } else if (s.ok() && slave_state == SlaveState::kSlaveDbSync) { + tmp_stream << "(" << db->DBName() << ":full syncing)"; + } else { + tmp_stream << "(" << db->DBName() << ":not syncing)"; + } + } + tmp_stream << "\r\n"; + } + slave_list_str.assign(tmp_stream.str()); + return static_cast(index); +} + +// Try add Slave, return true if success, +// return false when slave already exist +bool PikaServer::TryAddSlave(const std::string& ip, int64_t port, int fd, const std::vector& db_structs) { + std::string ip_port = pstd::IpPortString(ip, static_cast(port)); + + std::lock_guard l(slave_mutex_); + auto iter = slaves_.begin(); + while (iter != slaves_.end()) { + if (iter->ip_port == ip_port) { + LOG(WARNING) << "Slave Already Exist, ip_port: " << ip << ":" << port; + return false; + } + iter++; + } + + // Not exist, so add new + LOG(INFO) << "Add New Slave, " << ip << ":" << port; + SlaveItem s; + s.ip_port = ip_port; + s.ip = ip; + s.port = static_cast(port); + s.conn_fd = fd; + s.stage = SLAVE_ITEM_STAGE_ONE; + s.db_structs = db_structs; + gettimeofday(&s.create_time, nullptr); + slaves_.push_back(s); + return true; +} + +void PikaServer::SyncError() { + std::lock_guard l(state_protector_); + repl_state_ = PIKA_REPL_ERROR; + LOG(WARNING) << "Sync error, set repl_state to PIKA_REPL_ERROR"; +} + +void PikaServer::RemoveMaster() { + { + std::lock_guard l(state_protector_); + repl_state_ = PIKA_REPL_NO_CONNECT; + role_ &= ~PIKA_ROLE_SLAVE; + + if (!master_ip_.empty() && master_port_ != -1) { + g_pika_rm->CloseReplClientConn(master_ip_, master_port_ + kPortShiftReplServer); + g_pika_rm->DeactivateSyncSlaveDB(master_ip_, master_port_); + UpdateMetaSyncTimestampWithoutLock(); + LOG(INFO) << "Remove Master Success, ip_port: " << master_ip_ << ":" << master_port_; + } + + master_ip_ = ""; + master_port_ = -1; + DoSameThingEveryDB(TaskType::kResetReplState); + } +} + +bool PikaServer::SetMaster(std::string& master_ip, int master_port) { + if (master_ip == "127.0.0.1") { + master_ip = host_; + } + std::lock_guard l(state_protector_); + if (((role_ ^ PIKA_ROLE_SLAVE) != 0) && repl_state_ == PIKA_REPL_NO_CONNECT) { + master_ip_ = master_ip; + master_port_ = master_port; + role_ |= PIKA_ROLE_SLAVE; + repl_state_ = PIKA_REPL_SHOULD_META_SYNC; + return true; + } + return false; +} + +bool PikaServer::ShouldMetaSync() { + std::shared_lock l(state_protector_); + return repl_state_ == PIKA_REPL_SHOULD_META_SYNC; +} + +void PikaServer::FinishMetaSync() { + std::lock_guard l(state_protector_); + assert(repl_state_ == PIKA_REPL_SHOULD_META_SYNC); + repl_state_ = PIKA_REPL_META_SYNC_DONE; +} + +bool PikaServer::MetaSyncDone() { + std::shared_lock l(state_protector_); + return repl_state_ == PIKA_REPL_META_SYNC_DONE; +} + +void PikaServer::ResetMetaSyncStatus() { + std::lock_guard sp_l(state_protector_); + if ((role_ & PIKA_ROLE_SLAVE) != 0) { + // not change by slaveof no one, so set repl_state = PIKA_REPL_SHOULD_META_SYNC, + // continue to connect master + repl_state_ = PIKA_REPL_SHOULD_META_SYNC; + DoSameThingEveryDB(TaskType::kResetReplState); + } +} + +int PikaServer::GetMetaSyncTimestamp() { + std::shared_lock sp_l(state_protector_); + return last_meta_sync_timestamp_; +} + +void PikaServer::UpdateMetaSyncTimestamp() { + std::lock_guard sp_l(state_protector_); + last_meta_sync_timestamp_ = static_cast(time(nullptr)); +} + +void PikaServer::UpdateMetaSyncTimestampWithoutLock() { + last_meta_sync_timestamp_ = static_cast(time(nullptr)); +} + +bool PikaServer::IsFirstMetaSync() { + std::shared_lock sp_l(state_protector_); + return first_meta_sync_; +} + +void PikaServer::SetFirstMetaSync(bool v) { + std::lock_guard sp_l(state_protector_); + first_meta_sync_ = v; +} + +void PikaServer::ScheduleClientPool(net::TaskFunc func, void* arg, bool is_slow_cmd, bool is_admin_cmd) { + if (is_slow_cmd && g_pika_conf->slow_cmd_pool()) { + pika_slow_cmd_thread_pool_->Schedule(func, arg); + return; + } + if (is_admin_cmd) { + pika_admin_cmd_thread_pool_->Schedule(func, arg); + return; + } + pika_client_processor_->SchedulePool(func, arg); +} + +size_t PikaServer::ClientProcessorThreadPoolCurQueueSize() { + if (!pika_client_processor_) { + return 0; + } + return pika_client_processor_->ThreadPoolCurQueueSize(); +} + +size_t PikaServer::ClientProcessorThreadPoolMaxQueueSize() { + if (!pika_client_processor_) { + return 0; + } + return pika_client_processor_->ThreadPoolMaxQueueSize(); +} + +size_t PikaServer::SlowCmdThreadPoolCurQueueSize() { + if (!pika_slow_cmd_thread_pool_) { + return 0; + } + size_t cur_size = 0; + pika_slow_cmd_thread_pool_->cur_queue_size(&cur_size); + return cur_size; +} + +size_t PikaServer::SlowCmdThreadPoolMaxQueueSize() { + if (!pika_slow_cmd_thread_pool_) { + return 0; + } + return pika_slow_cmd_thread_pool_->max_queue_size(); +} + +void PikaServer::BGSaveTaskSchedule(net::TaskFunc func, void* arg) { + bgsave_thread_.StartThread(); + bgsave_thread_.Schedule(func, arg); +} + +void PikaServer::PurgelogsTaskSchedule(net::TaskFunc func, void* arg) { + purge_thread_.StartThread(); + purge_thread_.Schedule(func, arg); +} + +void PikaServer::PurgeDir(const std::string& path) { + auto dir_path = new std::string(path); + PurgeDirTaskSchedule(&DoPurgeDir, static_cast(dir_path)); +} + + +void PikaServer::PurgeDirTaskSchedule(void (*function)(void*), void* arg) { + purge_thread_.StartThread(); + purge_thread_.Schedule(function, arg); +} + +pstd::Status PikaServer::GetDumpUUID(const std::string& db_name, std::string* snapshot_uuid) { + std::shared_ptr db = GetDB(db_name); + if (!db) { + LOG(WARNING) << "cannot find db for db_name " << db_name; + return pstd::Status::NotFound("db no found"); + } + db->GetBgSaveUUID(snapshot_uuid); + return pstd::Status::OK(); +} + +pstd::Status PikaServer::GetDumpMeta(const std::string& db_name, std::vector* fileNames, std::string* snapshot_uuid) { + std::shared_ptr db = GetDB(db_name); + if (!db) { + LOG(WARNING) << "cannot find db for db_name " << db_name; + return pstd::Status::NotFound("db no found"); + } + db->GetBgSaveMetaData(fileNames, snapshot_uuid); + return pstd::Status::OK(); +} + +void PikaServer::TryDBSync(const std::string& ip, int port, const std::string& db_name, + int32_t top) { + std::shared_ptr db = GetDB(db_name); + if (!db) { + LOG(WARNING) << "can not find DB : " << db_name + << ", TryDBSync Failed"; + return; + } + std::shared_ptr sync_db = + g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name)); + if (!sync_db) { + LOG(WARNING) << "can not find DB: " << db_name + << ", TryDBSync Failed"; + return; + } + BgSaveInfo bgsave_info = db->bgsave_info(); + std::string logger_filename = sync_db->Logger()->filename(); + if (pstd::IsDir(bgsave_info.path) != 0 || + !pstd::FileExists(NewFileName(logger_filename, bgsave_info.offset.b_offset.filenum)) || + static_cast(top) - static_cast(bgsave_info.offset.b_offset.filenum) > + static_cast(kDBSyncMaxGap)) { + // Need Bgsave first + db->BgSaveDB(); + } +} + +void PikaServer::KeyScanTaskSchedule(net::TaskFunc func, void* arg) { + key_scan_thread_.StartThread(); + key_scan_thread_.Schedule(func, arg); +} + +void PikaServer::ClientKillAll() { + pika_dispatch_thread_->ClientKillAll(); + pika_pubsub_thread_->NotifyCloseAllConns(); +} + +void PikaServer::ClientKillPubSub() { pika_pubsub_thread_->NotifyCloseAllConns(); +} + +void PikaServer::ClientKillAllNormal() { + pika_dispatch_thread_->ClientKillAll(); +} + +int PikaServer::ClientKill(const std::string& ip_port) { + if (pika_dispatch_thread_->ClientKill(ip_port)) { + return 1; + } + return 0; +} + +int64_t PikaServer::ClientList(std::vector* clients) { + int64_t clients_num = 0; + clients_num += static_cast(pika_dispatch_thread_->ThreadClientList(clients)); + return clients_num; +} + +bool PikaServer::HasMonitorClients() const { + std::unique_lock lock(monitor_mutex_protector_); + return !pika_monitor_clients_.empty(); +} +bool PikaServer::ClientIsMonitor(const std::shared_ptr& client_ptr) const { + std::unique_lock lock(monitor_mutex_protector_); + return pika_monitor_clients_.count(client_ptr) != 0; +} + +void PikaServer::AddMonitorMessage(const std::string& monitor_message) { + const std::string msg = "+" + monitor_message + "\r\n"; + + std::vector> clients; + + std::unique_lock lock(monitor_mutex_protector_); + clients.reserve(pika_monitor_clients_.size()); + for (auto it = pika_monitor_clients_.begin(); it != pika_monitor_clients_.end();) { + auto cli = (*it).lock(); + if (cli) { + clients.push_back(std::move(cli)); + ++it; + } else { + it = pika_monitor_clients_.erase(it); + } + } + for (const auto& cli : clients) { + cli->WriteResp(msg); + cli->SendReply(); + } + lock.unlock(); // SendReply without lock +} + +void PikaServer::AddMonitorClient(const std::shared_ptr& client_ptr) { + if (client_ptr) { + std::unique_lock lock(monitor_mutex_protector_); + pika_monitor_clients_.insert(client_ptr); + } +} + +void PikaServer::SlowlogTrim() { + std::lock_guard l(slowlog_protector_); + while (slowlog_list_.size() > static_cast(g_pika_conf->slowlog_max_len())) { + slowlog_list_.pop_back(); + } +} + +void PikaServer::SlowlogReset() { + std::lock_guard l(slowlog_protector_); + slowlog_list_.clear(); +} + +uint32_t PikaServer::SlowlogLen() { + std::shared_lock l(slowlog_protector_); + return slowlog_list_.size(); +} + +void PikaServer::SlowlogObtain(int64_t number, std::vector* slowlogs) { + std::shared_lock l(slowlog_protector_); + slowlogs->clear(); + auto iter = slowlog_list_.begin(); + while (((number--) != 0) && iter != slowlog_list_.end()) { + slowlogs->push_back(*iter); + iter++; + } +} + +void PikaServer::SlowlogPushEntry(const PikaCmdArgsType& argv, int64_t time, int64_t duration) { + SlowlogEntry entry; + uint32_t slargc = (argv.size() < SLOWLOG_ENTRY_MAX_ARGC) ? argv.size() : SLOWLOG_ENTRY_MAX_ARGC; + + for (uint32_t idx = 0; idx < slargc; ++idx) { + if (slargc != argv.size() && idx == slargc - 1) { + char buffer[32]; + snprintf(buffer, sizeof(buffer), "... (%lu more arguments)", argv.size() - slargc + 1); + entry.argv.push_back(std::string(buffer)); + } else { + if (argv[idx].size() > SLOWLOG_ENTRY_MAX_STRING) { + char buffer[32]; + snprintf(buffer, sizeof(buffer), "... (%lu more bytes)", argv[idx].size() - SLOWLOG_ENTRY_MAX_STRING); + std::string suffix(buffer); + std::string brief = argv[idx].substr(0, SLOWLOG_ENTRY_MAX_STRING); + entry.argv.push_back(brief + suffix); + } else { + entry.argv.push_back(argv[idx]); + } + } + } + + { + std::lock_guard lock(slowlog_protector_); + entry.id = static_cast(slowlog_entry_id_++); + entry.start_time = time; + entry.duration = duration; + slowlog_list_.push_front(entry); + slowlog_counter_++; + } + + SlowlogTrim(); +} + +uint64_t PikaServer::SlowlogCount() { + std::shared_lock l(slowlog_protector_); + return slowlog_counter_; +} + +void PikaServer::ResetStat() { + statistic_.server_stat.accumulative_connections.store(0); + statistic_.server_stat.qps.querynum.store(0); + statistic_.server_stat.qps.last_querynum.store(0); +} + +uint64_t PikaServer::ServerQueryNum() { return statistic_.server_stat.qps.querynum.load(); } + +uint64_t PikaServer::ServerCurrentQps() { return statistic_.server_stat.qps.last_sec_querynum.load(); } + +uint64_t PikaServer::accumulative_connections() { return statistic_.server_stat.accumulative_connections.load(); } + +long long PikaServer::ServerKeyspaceHits() { return statistic_.server_stat.keyspace_hits.load(); } +long long PikaServer::ServerKeyspaceMisses() { return statistic_.server_stat.keyspace_misses.load(); } + +void PikaServer::incr_accumulative_connections() { ++(statistic_.server_stat.accumulative_connections); } +void PikaServer::incr_server_keyspace_hits() { ++(statistic_.server_stat.keyspace_hits); } +void PikaServer::incr_server_keyspace_misses() { ++(statistic_.server_stat.keyspace_misses); } + +// only one thread invoke this right now +void PikaServer::ResetLastSecQuerynum() { + statistic_.server_stat.qps.ResetLastSecQuerynum(); + statistic_.ResetDBLastSecQuerynum(); +} + +void PikaServer::UpdateQueryNumAndExecCountDB(const std::string& db_name, const std::string& command, bool is_write) { + std::string cmd(command); + statistic_.server_stat.qps.querynum++; + statistic_.server_stat.exec_count_db[pstd::StringToUpper(cmd)]++; + statistic_.UpdateDBQps(db_name, command, is_write); +} + +size_t PikaServer::NetInputBytes() { return g_network_statistic->NetInputBytes(); } + +size_t PikaServer::NetOutputBytes() { return g_network_statistic->NetOutputBytes(); } + +size_t PikaServer::NetReplInputBytes() { return g_network_statistic->NetReplInputBytes(); } + +size_t PikaServer::NetReplOutputBytes() { return g_network_statistic->NetReplOutputBytes(); } + +float PikaServer::InstantaneousInputKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_INPUT)) / 1024.0f; +} + +float PikaServer::InstantaneousOutputKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_OUTPUT)) / 1024.0f; +} + +float PikaServer::InstantaneousInputReplKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_INPUT_REPLICATION)) / + 1024.0f; +} + +float PikaServer::InstantaneousOutputReplKbps() { + return static_cast(g_pika_server->instant_->getInstantaneousMetric(STATS_METRIC_NET_OUTPUT_REPLICATION)) / + 1024.0f; +} + +std::unordered_map PikaServer::ServerExecCountDB() { + std::unordered_map res; + for (auto& cmd : statistic_.server_stat.exec_count_db) { + res[cmd.first] = cmd.second.load(); + } + return res; +} + +std::unordered_map PikaServer::ServerAllDBStat() { return statistic_.AllDBStat(); } + +int PikaServer::SendToPeer() { return g_pika_rm->ConsumeWriteQueue(); } + +void PikaServer::SignalAuxiliary() { pika_auxiliary_thread_->cv_.notify_one(); } + +Status PikaServer::TriggerSendBinlogSync() { return g_pika_rm->WakeUpBinlogSync(); } + +int PikaServer::PubSubNumPat() { return pika_pubsub_thread_->PubSubNumPat(); } + +int PikaServer::Publish(const std::string& channel, const std::string& msg) { + int receivers = pika_pubsub_thread_->Publish(channel, msg); + return receivers; +} + +void PikaServer::EnablePublish(int fd) { + pika_pubsub_thread_->UpdateConnReadyState(fd, net::PubSubThread::ReadyState::kReady); +} + +int PikaServer::UnSubscribe(const std::shared_ptr& conn, const std::vector& channels, + bool pattern, std::vector>* result) { + int subscribed = pika_pubsub_thread_->UnSubscribe(conn, channels, pattern, result); + return subscribed; +} + +void PikaServer::Subscribe(const std::shared_ptr& conn, const std::vector& channels, + bool pattern, std::vector>* result) { + pika_pubsub_thread_->Subscribe(conn, channels, pattern, result); +} + +void PikaServer::PubSubChannels(const std::string& pattern, std::vector* result) { + pika_pubsub_thread_->PubSubChannels(pattern, result); +} + +void PikaServer::PubSubNumSub(const std::vector& channels, + std::vector>* result) { + pika_pubsub_thread_->PubSubNumSub(channels, result); +} + +int PikaServer::ClientPubSubChannelSize(const std::shared_ptr& conn) { + return pika_pubsub_thread_->ClientPubSubChannelSize(conn); +} + +int PikaServer::ClientPubSubChannelPatternSize(const std::shared_ptr& conn) { + return pika_pubsub_thread_->ClientPubSubChannelPatternSize(conn); +} + +/******************************* PRIVATE *******************************/ + +void PikaServer::DoTimingTask() { + // Maybe schedule compactrange + AutoCompactRange(); + // Purge serverlog + AutoServerlogPurge(); + // Purge binlog + AutoBinlogPurge(); + // Delete expired dump + AutoDeleteExpiredDump(); + // Cheek Rsync Status + // TODO: temporarily disable rsync + // AutoKeepAliveRSync(); + // Reset server qps + ResetLastSecQuerynum(); + // Auto update network instantaneous metric + AutoUpdateNetworkMetric(); + ProcessCronTask(); + UpdateCacheInfo(); + // Print the queue status periodically + PrintThreadPoolQueueStatus(); + StatDiskUsage(); +} + +void PikaServer::StatDiskUsage() { + thread_local uint64_t last_update_time = 0; + auto current_time = pstd::NowMicros(); + if (current_time - last_update_time < 60 * 1000 * 1000) { + return; + } + last_update_time = current_time; + + disk_statistic_.db_size_.store(pstd::Du(g_pika_conf->db_path())); + disk_statistic_.log_size_.store(pstd::Du(g_pika_conf->log_path())); +} + +void PikaServer::AutoCompactRange() { + struct statfs disk_info; + int ret = statfs(g_pika_conf->db_path().c_str(), &disk_info); + if (ret == -1) { + LOG(WARNING) << "statfs error: " << strerror(errno); + return; + } + + uint64_t total_size = disk_info.f_bsize * disk_info.f_blocks; + uint64_t free_size = disk_info.f_bsize * disk_info.f_bfree; + std::string ci = g_pika_conf->compact_interval(); + std::string cc = g_pika_conf->compact_cron(); + + if (!ci.empty()) { + std::string::size_type slash = ci.find('/'); + int interval = std::atoi(ci.substr(0, slash).c_str()); + int usage = std::atoi(ci.substr(slash + 1).c_str()); + struct timeval now; + gettimeofday(&now, nullptr); + if (last_check_compact_time_.tv_sec == 0 || now.tv_sec - last_check_compact_time_.tv_sec >= interval * 3600) { + gettimeofday(&last_check_compact_time_, nullptr); + if ((static_cast(free_size) / static_cast(total_size)) * 100 >= usage) { + std::set dbs = g_pika_server->GetAllDBName(); + Status s = DoSameThingSpecificDB(dbs, {TaskType::kCompactAll}); + if (s.ok()) { + LOG(INFO) << "[Interval]schedule compactRange, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB"; + } else { + LOG(INFO) << "[Interval]schedule compactRange Failed, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB, error: " << s.ToString(); + } + } else { + LOG(WARNING) << "compact-interval failed, because there is not enough disk space left, freesize" + << free_size / 1048576 << "MB, disksize: " << total_size / 1048576 << "MB"; + } + } + return; + } + + if (!cc.empty()) { + bool have_week = false; + std::string compact_cron; + std::string week_str; + int64_t slash_num = count(cc.begin(), cc.end(), '/'); + if (slash_num == 2) { + have_week = true; + std::string::size_type first_slash = cc.find('/'); + week_str = cc.substr(0, first_slash); + compact_cron = cc.substr(first_slash + 1); + } else { + compact_cron = cc; + } + + std::string::size_type colon = compact_cron.find('-'); + std::string::size_type underline = compact_cron.find('/'); + int week = have_week ? (std::atoi(week_str.c_str()) % 7) : 0; + int start = std::atoi(compact_cron.substr(0, colon).c_str()); + int end = std::atoi(compact_cron.substr(colon + 1, underline).c_str()); + int usage = std::atoi(compact_cron.substr(underline + 1).c_str()); + std::time_t t = std::time(nullptr); + std::tm* t_m = std::localtime(&t); + + bool in_window = false; + if (start < end && (t_m->tm_hour >= start && t_m->tm_hour < end)) { + in_window = have_week ? (week == t_m->tm_wday) : true; + } else if (start > end && + ((t_m->tm_hour >= start && t_m->tm_hour < 24) || (t_m->tm_hour >= 0 && t_m->tm_hour < end))) { + in_window = !have_week; + } else { + have_scheduled_crontask_ = false; + } + + if (!have_scheduled_crontask_ && in_window) { + if ((static_cast(free_size) / static_cast(total_size)) * 100 >= usage) { + Status s = DoSameThingEveryDB(TaskType::kCompactAll); + if (s.ok()) { + LOG(INFO) << "[Cron]schedule compactRange, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB"; + } else { + LOG(INFO) << "[Cron]schedule compactRange Failed, freesize: " << free_size / 1048576 + << "MB, disksize: " << total_size / 1048576 << "MB, error: " << s.ToString(); + } + have_scheduled_crontask_ = true; + } else { + LOG(WARNING) << "compact-cron failed, because there is not enough disk space left, freesize" + << free_size / 1048576 << "MB, disksize: " << total_size / 1048576 << "MB"; + } + } + } + + if (g_pika_conf->compaction_strategy() == PikaConf::FullCompact) { + DoSameThingEveryDB(TaskType::kCompactAll); + } else if (g_pika_conf->compaction_strategy() == PikaConf::OldestOrBestDeleteRatioSstCompact) { + DoSameThingEveryDB(TaskType::kCompactOldestOrBestDeleteRatioSst); + } +} + +void PikaServer::AutoBinlogPurge() { DoSameThingEveryDB(TaskType::kPurgeLog); } + +void PikaServer::AutoServerlogPurge() { + std::string log_path = g_pika_conf->log_path(); + int retention_time = g_pika_conf->log_retention_time(); + if (retention_time < 0) { + return; + } + std::vector log_files; + + if (!pstd::FileExists(log_path)) { + return; + } + + if (pstd::GetChildren(log_path, log_files) != 0) { + return; + } + //Get the current time of system + time_t t = time(nullptr); + struct tm* now_time = localtime(&t); + now_time->tm_hour = 0; + now_time->tm_min = 0; + now_time->tm_sec = 0; + time_t now_timestamp = mktime(now_time); + + std::map>> log_files_by_level; + + //Serverlogformat: pika.[hostname].[user name].log.[severity level].[date].[time].[pid] + for (const auto& file : log_files) { + std::vector file_parts; + pstd::StringSplit(file, '.', file_parts); + if (file_parts.size() < 7) { + continue; + } + + std::string severity_level = file_parts[4]; + if (severity_level != "WARNING" && severity_level != "INFO" && severity_level != "ERROR") { + continue; + } + + int log_year, log_month, log_day; + if (sscanf(file_parts[5].c_str(), "%4d%2d%2d", &log_year, &log_month, &log_day) != 3) { + continue; + } + + //Get the time when the server log file was originally created + struct tm log_time; + log_time.tm_year = log_year - 1900; + log_time.tm_mon = log_month - 1; + log_time.tm_mday = log_day; + log_time.tm_hour = 0; + log_time.tm_min = 0; + log_time.tm_sec = 0; + log_time.tm_isdst = -1; + time_t log_timestamp = mktime(&log_time); + log_files_by_level[severity_level].push_back({file, log_timestamp}); +} + + // Process files for each log level + for (auto& [level, files] : log_files_by_level) { + // Sort by time in descending order + std::sort(files.begin(), files.end(), + [](const auto& a, const auto& b) { return a.second > b.second; }); + + bool has_recent_file = false; + for (const auto& [file, log_timestamp] : files) { + double diff_seconds = difftime(now_timestamp, log_timestamp); + int64_t interval_days = static_cast(diff_seconds / 86400); + if (interval_days <= retention_time) { + has_recent_file = true; + continue; + } + if (!has_recent_file) { + has_recent_file = true; + continue; + } + std::string log_file = log_path + "/" + file; + LOG(INFO) << "Deleting out of date log file: " << log_file; + if(!pstd::DeleteFile(log_file)) LOG(ERROR) << "Failed to delete log file: " << log_file; + } + } +} + +void PikaServer::AutoDeleteExpiredDump() { + std::string db_sync_prefix = g_pika_conf->bgsave_prefix(); + std::string db_sync_path = g_pika_conf->bgsave_path(); + int expiry_days = g_pika_conf->expire_dump_days(); + std::vector dump_dir; + + // Never expire + if (expiry_days <= 0) { + return; + } + + // Dump is not exist + if (!pstd::FileExists(db_sync_path)) { + return; + } + + // Directory traversal + if (pstd::GetChildren(db_sync_path, dump_dir) != 0) { + return; + } + // Handle dump directory + for (auto& i : dump_dir) { + if (i.substr(0, db_sync_prefix.size()) != db_sync_prefix || i.size() != (db_sync_prefix.size() + 8)) { + continue; + } + + std::string str_date = i.substr(db_sync_prefix.size(), (i.size() - db_sync_prefix.size())); + char* end = nullptr; + std::strtol(str_date.c_str(), &end, 10); + if (*end != 0) { + continue; + } + + // Parse filename + int dump_year = std::atoi(str_date.substr(0, 4).c_str()); + int dump_month = std::atoi(str_date.substr(4, 2).c_str()); + int dump_day = std::atoi(str_date.substr(6, 2).c_str()); + + time_t t = time(nullptr); + struct tm* now = localtime(&t); + int now_year = now->tm_year + 1900; + int now_month = now->tm_mon + 1; + int now_day = now->tm_mday; + + struct tm dump_time; + struct tm now_time; + + dump_time.tm_year = dump_year; + dump_time.tm_mon = dump_month; + dump_time.tm_mday = dump_day; + dump_time.tm_hour = 0; + dump_time.tm_min = 0; + dump_time.tm_sec = 0; + + now_time.tm_year = now_year; + now_time.tm_mon = now_month; + now_time.tm_mday = now_day; + now_time.tm_hour = 0; + now_time.tm_min = 0; + now_time.tm_sec = 0; + + int64_t dump_timestamp = mktime(&dump_time); + int64_t now_timestamp = mktime(&now_time); + // How many days, 1 day = 86400s + int64_t interval_days = (now_timestamp - dump_timestamp) / 86400; + + if (interval_days >= expiry_days) { + std::string dump_file = db_sync_path + i; + if (CountSyncSlaves() == 0) { + LOG(INFO) << "Not syncing, delete dump file: " << dump_file; + pstd::DeleteDirIfExist(dump_file); + } else { + LOG(INFO) << "Syncing, can not delete " << dump_file << " dump file"; + } + } + } +} + +void PikaServer::AutoUpdateNetworkMetric() { + monotime current_time = getMonotonicUs(); + size_t factor = 5e6; // us, 5s + instant_->trackInstantaneousMetric(STATS_METRIC_NET_INPUT, + g_pika_server->NetInputBytes() + g_pika_server->NetReplInputBytes(), current_time, + factor); + instant_->trackInstantaneousMetric(STATS_METRIC_NET_OUTPUT, + g_pika_server->NetOutputBytes() + g_pika_server->NetReplOutputBytes(), + current_time, factor); + instant_->trackInstantaneousMetric(STATS_METRIC_NET_INPUT_REPLICATION, g_pika_server->NetReplInputBytes(), + current_time, factor); + instant_->trackInstantaneousMetric(STATS_METRIC_NET_OUTPUT_REPLICATION, g_pika_server->NetReplOutputBytes(), + current_time, factor); +} + +void PikaServer::PrintThreadPoolQueueStatus() { + // Print the current queue size if it exceeds QUEUE_SIZE_THRESHOLD_PERCENTAGE/100 of the maximum queue size. + size_t cur_size = ClientProcessorThreadPoolCurQueueSize(); + size_t max_size = ClientProcessorThreadPoolMaxQueueSize(); + size_t thread_hold = (max_size / 100) * QUEUE_SIZE_THRESHOLD_PERCENTAGE; + if (cur_size > thread_hold) { + LOG(INFO) << "The current queue size of the Pika Server's client thread processor thread pool: " << cur_size; + } +} + +void PikaServer::InitStorageOptions() { + std::lock_guard rwl(storage_options_rw_); + + // For rocksdb::Options + storage_options_.options.create_if_missing = true; + storage_options_.options.keep_log_file_num = 10; + storage_options_.options.max_manifest_file_size = 64 * 1024 * 1024; + storage_options_.options.max_log_file_size = 512 * 1024 * 1024; + + storage_options_.options.write_buffer_size = g_pika_conf->write_buffer_size(); + storage_options_.options.arena_block_size = g_pika_conf->arena_block_size(); + storage_options_.options.write_buffer_manager = + std::make_shared(g_pika_conf->max_write_buffer_size()); + storage_options_.options.max_total_wal_size = g_pika_conf->MaxTotalWalSize(); + storage_options_.options.max_write_buffer_number = g_pika_conf->max_write_buffer_number(); + storage_options_.options.level0_file_num_compaction_trigger = g_pika_conf->level0_file_num_compaction_trigger(); + storage_options_.options.level0_stop_writes_trigger = g_pika_conf->level0_stop_writes_trigger(); + storage_options_.options.level0_slowdown_writes_trigger = g_pika_conf->level0_slowdown_writes_trigger(); + storage_options_.options.min_write_buffer_number_to_merge = g_pika_conf->min_write_buffer_number_to_merge(); + storage_options_.options.max_bytes_for_level_base = g_pika_conf->level0_file_num_compaction_trigger() * g_pika_conf->write_buffer_size(); + storage_options_.options.max_subcompactions = g_pika_conf->max_subcompactions(); + storage_options_.options.target_file_size_base = g_pika_conf->target_file_size_base(); + storage_options_.options.max_compaction_bytes = g_pika_conf->max_compaction_bytes(); + storage_options_.options.max_background_flushes = g_pika_conf->max_background_flushes(); + storage_options_.options.max_background_compactions = g_pika_conf->max_background_compactions(); + storage_options_.options.disable_auto_compactions = g_pika_conf->disable_auto_compactions(); + storage_options_.options.max_background_jobs = g_pika_conf->max_background_jobs(); + storage_options_.options.delayed_write_rate = g_pika_conf->delayed_write_rate(); + storage_options_.options.max_open_files = g_pika_conf->max_cache_files(); + storage_options_.options.max_bytes_for_level_multiplier = g_pika_conf->max_bytes_for_level_multiplier(); + storage_options_.options.optimize_filters_for_hits = g_pika_conf->optimize_filters_for_hits(); + storage_options_.options.level_compaction_dynamic_level_bytes = g_pika_conf->level_compaction_dynamic_level_bytes(); + + storage_options_.options.compression = PikaConf::GetCompression(g_pika_conf->compression()); + storage_options_.options.compression_per_level = g_pika_conf->compression_per_level(); + // avoid blocking io on scan + // see https://github.com/facebook/rocksdb/wiki/IO#avoid-blocking-io + storage_options_.options.avoid_unnecessary_blocking_io = true; + + // default l0 l1 noCompression l2 and more use `compression` option + if (storage_options_.options.compression_per_level.empty() && + storage_options_.options.compression != rocksdb::kNoCompression) { + storage_options_.options.compression_per_level.push_back(rocksdb::kNoCompression); + storage_options_.options.compression_per_level.push_back(rocksdb::kNoCompression); + storage_options_.options.compression_per_level.push_back(storage_options_.options.compression); + } + + // For rocksdb::BlockBasedDBOptions + storage_options_.table_options.block_size = g_pika_conf->block_size(); + storage_options_.table_options.cache_index_and_filter_blocks = g_pika_conf->cache_index_and_filter_blocks(); + storage_options_.block_cache_size = g_pika_conf->block_cache(); + storage_options_.share_block_cache = g_pika_conf->share_block_cache(); + + storage_options_.table_options.pin_l0_filter_and_index_blocks_in_cache = + g_pika_conf->pin_l0_filter_and_index_blocks_in_cache(); + + if (storage_options_.block_cache_size == 0) { + storage_options_.table_options.no_block_cache = true; + } else if (storage_options_.share_block_cache) { + storage_options_.table_options.block_cache = + rocksdb::NewLRUCache(storage_options_.block_cache_size, static_cast(g_pika_conf->num_shard_bits())); + } + storage_options_.options.rate_limiter = + std::shared_ptr( + rocksdb::NewGenericRateLimiter( + g_pika_conf->rate_limiter_bandwidth(), + g_pika_conf->rate_limiter_refill_period_us(), + static_cast(g_pika_conf->rate_limiter_fairness()), + static_cast(g_pika_conf->rate_limiter_mode()), + g_pika_conf->rate_limiter_auto_tuned() + )); + // For Storage small compaction + storage_options_.statistics_max_size = g_pika_conf->max_cache_statistic_keys(); + storage_options_.small_compaction_threshold = g_pika_conf->small_compaction_threshold(); + + // For Storage compaction + storage_options_.compact_param_.best_delete_min_ratio_ = g_pika_conf->best_delete_min_ratio(); + storage_options_.compact_param_.dont_compact_sst_created_in_seconds_ = g_pika_conf->dont_compact_sst_created_in_seconds(); + storage_options_.compact_param_.force_compact_file_age_seconds_ = g_pika_conf->force_compact_file_age_seconds(); + storage_options_.compact_param_.force_compact_min_delete_ratio_ = g_pika_conf->force_compact_min_delete_ratio(); + storage_options_.compact_param_.compact_every_num_of_files_ = g_pika_conf->compact_every_num_of_files(); + + // rocksdb blob + if (g_pika_conf->enable_blob_files()) { + storage_options_.options.enable_blob_files = g_pika_conf->enable_blob_files(); + storage_options_.options.min_blob_size = g_pika_conf->min_blob_size(); + storage_options_.options.blob_file_size = g_pika_conf->blob_file_size(); + storage_options_.options.blob_compression_type = PikaConf::GetCompression(g_pika_conf->blob_compression_type()); + storage_options_.options.enable_blob_garbage_collection = g_pika_conf->enable_blob_garbage_collection(); + storage_options_.options.blob_garbage_collection_age_cutoff = g_pika_conf->blob_garbage_collection_age_cutoff(); + storage_options_.options.blob_garbage_collection_force_threshold = + g_pika_conf->blob_garbage_collection_force_threshold(); + if (g_pika_conf->blob_cache() > 0) { // blob cache less than 0,not open cache + storage_options_.options.blob_cache = + rocksdb::NewLRUCache(g_pika_conf->blob_cache(), static_cast(g_pika_conf->blob_num_shard_bits())); + } + } + + // for column-family options + storage_options_.options.ttl = g_pika_conf->rocksdb_ttl_second(); + storage_options_.options.periodic_compaction_seconds = g_pika_conf->rocksdb_periodic_compaction_second(); + + // For Partitioned Index Filters + if (g_pika_conf->enable_partitioned_index_filters()) { + storage_options_.table_options.index_type = rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch; + storage_options_.table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false)); + storage_options_.table_options.partition_filters = true; + storage_options_.table_options.metadata_block_size = 4096; + storage_options_.table_options.cache_index_and_filter_blocks_with_high_priority = true; + storage_options_.table_options.pin_top_level_index_and_filter = true; + storage_options_.table_options.optimize_filters_for_memory = true; + } + // For statistics + storage_options_.enable_db_statistics = g_pika_conf->enable_db_statistics(); + storage_options_.db_statistics_level = g_pika_conf->db_statistics_level(); +} + +storage::Status PikaServer::RewriteStorageOptions(const storage::OptionType& option_type, + const std::unordered_map& options_map) { + storage::Status s; + std::shared_lock db_rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + s = db_item.second->storage()->SetOptions(option_type, storage::ALL_DB, options_map); + if (!s.ok()) { + return s; + } + } + std::lock_guard rwl(storage_options_rw_); + s = storage_options_.ResetOptions(option_type, options_map); + return s; +} + +Status PikaServer::GetCmdRouting(std::vector& redis_cmds, std::vector* dst, + bool* all_local) { + UNUSED(redis_cmds); + UNUSED(dst); + *all_local = true; + return Status::OK(); +} + +void PikaServer::ServerStatus(std::string* info) { + std::stringstream tmp_stream; + size_t q_size = ClientProcessorThreadPoolCurQueueSize(); + tmp_stream << "Client Processor thread-pool queue size: " << q_size << "\r\n"; + info->append(tmp_stream.str()); +} + +bool PikaServer::SlotsMigrateBatch(const std::string &ip, int64_t port, int64_t time_out, int64_t slot_num,int64_t keys_num, const std::shared_ptr& db) { + return pika_migrate_thread_->ReqMigrateBatch(ip, port, time_out, slot_num, keys_num, db); +} + +void PikaServer::GetSlotsMgrtSenderStatus(std::string *ip, int64_t* port, int64_t *slot, bool *migrating, int64_t *moved, int64_t *remained) { + return pika_migrate_thread_->GetMigrateStatus(ip, port, slot, migrating, moved, remained); +} + +int PikaServer::SlotsMigrateOne(const std::string& key, const std::shared_ptr& db) { + return pika_migrate_thread_->ReqMigrateOne(key, db); +} + +bool PikaServer::SlotsMigrateAsyncCancel() { + pika_migrate_thread_->CancelMigrate(); + return true; +} + +void PikaServer::Bgslotsreload(const std::shared_ptr& db) { + // Only one thread can go through + { + std::lock_guard ml(bgslots_protector_); + if (bgslots_reload_.reloading || db->IsBgSaving()) { + return; + } + bgslots_reload_.reloading = true; + } + + bgslots_reload_.start_time = time(nullptr); + char s_time[32]; + size_t len = strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgslots_reload_.start_time)); + bgslots_reload_.s_start_time.assign(s_time, len); + bgslots_reload_.cursor = 0; + bgslots_reload_.pattern = "*"; + bgslots_reload_.count = 100; + bgslots_reload_.db = db; + + LOG(INFO) << "Start slot reloading"; + + // Start new thread if needed + bgsave_thread_.StartThread(); + bgsave_thread_.Schedule(&DoBgslotsreload, static_cast(this)); +} + +void DoBgslotsreload(void* arg) { + auto p = static_cast(arg); + PikaServer::BGSlotsReload reload = p->bgslots_reload(); + + // Do slotsreload + rocksdb::Status s; + std::vector keys; + int64_t cursor_ret = -1; + while(cursor_ret != 0 && p->GetSlotsreloading()) { + cursor_ret = reload.db->storage()->Scan(storage::DataType::kAll, reload.cursor, reload.pattern, reload.count, &keys); + + std::vector::const_iterator iter; + for (iter = keys.begin(); iter != keys.end(); iter++) { + std::string key_type; + int s = GetKeyType(*iter, key_type, reload.db); + //if key is slotkey, can't add to SlotKey + if (s > 0) { + if (key_type == "s" && ((*iter).find(SlotKeyPrefix) != std::string::npos || (*iter).find(SlotTagPrefix) != std::string::npos)) { + continue; + } + + AddSlotKey(key_type, *iter, reload.db); + } + } + + reload.cursor = cursor_ret; + p->SetSlotsreloadingCursor(cursor_ret); + keys.clear(); + } + p->SetSlotsreloading(false); + + if (cursor_ret == 0) { + LOG(INFO) << "Finish slot reloading"; + } else { + LOG(INFO) << "Stop slot reloading"; + } +} + +void PikaServer::Bgslotscleanup(std::vector cleanupSlots, const std::shared_ptr& db) { + // Only one thread can go through + { + std::lock_guard ml(bgslots_protector_); + if (bgslots_cleanup_.cleaningup || bgslots_reload_.reloading || db->IsBgSaving()) { + return; + } + bgslots_cleanup_.cleaningup = true; + } + + bgslots_cleanup_.start_time = time(nullptr); + char s_time[32]; + size_t len = strftime(s_time, sizeof(s_time), "%Y%m%d%H%M%S", localtime(&bgslots_cleanup_.start_time)); + bgslots_cleanup_.s_start_time.assign(s_time, len); + bgslots_cleanup_.cursor = 0; + bgslots_cleanup_.pattern = "*"; + bgslots_cleanup_.count = 100; + bgslots_cleanup_.db = db; + bgslots_cleanup_.cleanup_slots.swap(cleanupSlots); + + std::string slotsStr; + slotsStr.assign(cleanupSlots.begin(), cleanupSlots.end()); + LOG(INFO) << "Start slot cleanup, slots: " << slotsStr << std::endl; + + // Start new thread if needed + bgslots_cleanup_thread_.StartThread(); + bgslots_cleanup_thread_.Schedule(&DoBgslotscleanup, static_cast(this)); +} +int64_t PikaServer::GetLastSaveTime(const std::string& dir_path) { + std::vector dump_dir; + // Dump file is not exist + if (!pstd::FileExists(dir_path)) { + LOG(INFO) << "Dump file is not exist,path: " << dir_path; + return 0; + } + if (pstd::GetChildren(dir_path, dump_dir) != 0) { + return 0; + } + std::string dump_file = dir_path + dump_dir[0]; + struct stat fileStat; + if (stat(dump_file.c_str(), &fileStat) == 0) { + return static_cast(fileStat.st_mtime); + } + return 0; +} + +void PikaServer::AllClientUnAuth(const std::set& users) { + pika_dispatch_thread_->UnAuthUserAndKillClient(users, acl_->GetUserLock(Acl::DefaultUser)); +} + +void PikaServer::CheckPubsubClientKill(const std::string& userName, const std::vector& allChannel) { + pika_pubsub_thread_->ConnCanSubscribe(allChannel, [&](const std::shared_ptr& conn) -> bool { + auto pikaConn = std::dynamic_pointer_cast(conn); + if (pikaConn && pikaConn->UserName() == userName) { + return true; + } + return false; + }); +} + +void PikaServer::DisableCompact() { + /* disable auto compactions */ + std::unordered_map options_map{{"disable_auto_compactions", "true"}}; + storage::Status s = g_pika_server->RewriteStorageOptions(storage::OptionType::kColumnFamily, options_map); + if (!s.ok()) { + LOG(ERROR) << "-ERR Set storage::OptionType::kColumnFamily disable_auto_compactions error: " + s.ToString() + "\r\n"; + return; + } + g_pika_conf->SetDisableAutoCompaction("true"); + + /* cancel in-progress manual compactions */ + std::shared_lock rwl(dbs_rw_); + for (const auto& db_item : dbs_) { + db_item.second->DBLock(); + db_item.second->SetCompactRangeOptions(true); + db_item.second->DBUnlock(); + } +} + +void DoBgslotscleanup(void* arg) { + auto p = static_cast(arg); + PikaServer::BGSlotsCleanup cleanup = p->bgslots_cleanup(); + + // Do slotscleanup + std::vector keys; + int64_t cursor_ret = -1; + std::vector cleanupSlots(cleanup.cleanup_slots); + while (cursor_ret != 0 && p->GetSlotscleaningup()) { + cursor_ret = g_pika_server->bgslots_cleanup_.db->storage()->Scan(storage::DataType::kAll, cleanup.cursor, cleanup.pattern, cleanup.count, &keys); + + std::string key_type; + std::vector::const_iterator iter; + for (iter = keys.begin(); iter != keys.end(); iter++) { + if ((*iter).find(SlotKeyPrefix) != std::string::npos || (*iter).find(SlotTagPrefix) != std::string::npos) { + continue; + } + if (std::find(cleanupSlots.begin(), cleanupSlots.end(), GetSlotID(g_pika_conf->default_slot_num(), *iter)) != cleanupSlots.end()) { + if (GetKeyType(*iter, key_type, g_pika_server->bgslots_cleanup_.db) <= 0) { + LOG(WARNING) << "slots clean get key type for slot " << GetSlotID(g_pika_conf->default_slot_num(), *iter) << " key " << *iter << " error"; + continue; + } + if (DeleteKey(*iter, key_type[0], g_pika_server->bgslots_cleanup_.db) <= 0) { + LOG(WARNING) << "slots clean del for slot " << GetSlotID(g_pika_conf->default_slot_num(), *iter) << " key "<< *iter << " error"; + } + } + } + + cleanup.cursor = cursor_ret; + p->SetSlotscleaningupCursor(cursor_ret); + keys.clear(); + } + + for (int cleanupSlot : cleanupSlots) { + WriteDelKeyToBinlog(GetSlotKey(cleanupSlot), g_pika_server->bgslots_cleanup_.db); + WriteDelKeyToBinlog(GetSlotsTagKey(cleanupSlot), g_pika_server->bgslots_cleanup_.db); + } + + p->SetSlotscleaningup(false); + std::vector empty; + p->SetCleanupSlots(empty); + + std::string slotsStr; + slotsStr.assign(cleanup.cleanup_slots.begin(), cleanup.cleanup_slots.end()); + LOG(INFO) << "Finish slots cleanup, slots " << slotsStr; +} + +void PikaServer::ResetCacheAsync(uint32_t cache_num, std::shared_ptr db, cache::CacheConfig *cache_cfg) { + if (PIKA_CACHE_STATUS_OK == db->cache()->CacheStatus() + || PIKA_CACHE_STATUS_NONE == db->cache()->CacheStatus()) { + common_bg_thread_.StartThread(); + BGCacheTaskArg *arg = new BGCacheTaskArg(); + arg->db = db; + arg->cache_num = cache_num; + if (cache_cfg == nullptr) { + arg->task_type = CACHE_BGTASK_RESET_NUM; + } else { + arg->task_type = CACHE_BGTASK_RESET_CFG; + arg->cache_cfg = *cache_cfg; + } + common_bg_thread_.Schedule(&DoCacheBGTask, static_cast(arg)); + } else { + LOG(WARNING) << "can not reset cache in status: " << db->cache()->CacheStatus(); + } +} + +void PikaServer::ClearCacheDbAsync(std::shared_ptr db) { + // disable cache temporarily, and restore it after cache cleared + g_pika_conf->SetCacheDisableFlag(); + if (PIKA_CACHE_STATUS_OK != db->cache()->CacheStatus()) { + LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); + return; + } + common_bg_thread_.StartThread(); + BGCacheTaskArg *arg = new BGCacheTaskArg(); + arg->db = db; + arg->task_type = CACHE_BGTASK_CLEAR; + common_bg_thread_.Schedule(&DoCacheBGTask, static_cast(arg)); +} + +void PikaServer::DoCacheBGTask(void* arg) { + std::unique_ptr pCacheTaskArg(static_cast(arg)); + std::shared_ptr db = pCacheTaskArg->db; + + switch (pCacheTaskArg->task_type) { + case CACHE_BGTASK_CLEAR: + LOG(INFO) << "clear cache start..."; + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_CLEAR); + g_pika_server->ResetDisplayCacheInfo(PIKA_CACHE_STATUS_CLEAR, db); + db->cache()->FlushCache(); + LOG(INFO) << "clear cache finish"; + break; + case CACHE_BGTASK_RESET_NUM: + LOG(INFO) << "reset cache num start..."; + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_RESET); + g_pika_server->ResetDisplayCacheInfo(PIKA_CACHE_STATUS_RESET, db); + db->cache()->Reset(pCacheTaskArg->cache_num); + LOG(INFO) << "reset cache num finish"; + break; + case CACHE_BGTASK_RESET_CFG: + LOG(INFO) << "reset cache config start..."; + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_RESET); + g_pika_server->ResetDisplayCacheInfo(PIKA_CACHE_STATUS_RESET, db); + db->cache()->Reset(pCacheTaskArg->cache_num); + LOG(INFO) << "reset cache config finish"; + break; + default: + LOG(WARNING) << "invalid cache task type: " << pCacheTaskArg->task_type; + break; + } + + db->cache()->SetCacheStatus(PIKA_CACHE_STATUS_OK); + g_pika_conf->UnsetCacheDisableFlag(); +} + +void PikaServer::ResetCacheConfig(std::shared_ptr db) { + cache::CacheConfig cache_cfg; + cache_cfg.maxmemory = g_pika_conf->cache_maxmemory(); + cache_cfg.maxmemory_policy = g_pika_conf->cache_maxmemory_policy(); + cache_cfg.maxmemory_samples = g_pika_conf->cache_maxmemory_samples(); + cache_cfg.lfu_decay_time = g_pika_conf->cache_lfu_decay_time(); + cache_cfg.zset_cache_start_direction = g_pika_conf->zset_cache_start_direction(); + cache_cfg.zset_cache_field_num_per_key = g_pika_conf->zset_cache_field_num_per_key(); + db->cache()->ResetConfig(&cache_cfg); +} + +void PikaServer::ClearHitRatio(std::shared_ptr db) { + db->cache()->ClearHitRatio(); +} + +void PikaServer::OnCacheStartPosChanged(int zset_cache_start_direction, std::shared_ptr db) { + ResetCacheConfig(db); + ClearCacheDbAsyncV2(db); +} + +void PikaServer::ClearCacheDbAsyncV2(std::shared_ptr db) { + if (PIKA_CACHE_STATUS_OK != db->cache()->CacheStatus()) { + LOG(WARNING) << "can not clear cache in status: " << db->cache()->CacheStatus(); + return; + } + common_bg_thread_.StartThread(); + BGCacheTaskArg *arg = new BGCacheTaskArg(); + arg->db = db; + arg->task_type = CACHE_BGTASK_CLEAR; + arg->conf = std::move(g_pika_conf); + arg->reenable_cache = true; + common_bg_thread_.Schedule(&DoCacheBGTask, static_cast(arg)); +} + +void PikaServer::ProcessCronTask() { + for (auto& dbs : dbs_) { + auto cache = dbs.second->cache(); + cache->ProcessCronTask(); + } +} + +double PikaServer::HitRatio(void) { + std::unique_lock l(mu_); + int64_t hits = 0; + int64_t misses = 0; + cache::RedisCache::GetHitAndMissNum(&hits, &misses); + int64_t all_cmds = hits + misses; + if (0 >= all_cmds) { + return 0; + } + return hits / (all_cmds * 1.0); +} + +void PikaServer::UpdateCacheInfo(void) { + for (auto& dbs : dbs_) { + if (PIKA_CACHE_STATUS_OK != dbs.second->cache()->CacheStatus()) { + return; + } + // get cache info from redis cache + CacheInfo cache_info; + dbs.second->cache()->Info(cache_info); + dbs.second->UpdateCacheInfo(cache_info); + } +} + +void PikaServer::ResetDisplayCacheInfo(int status, std::shared_ptr db) { + db->ResetDisplayCacheInfo(status); +} + +void PikaServer::CacheConfigInit(cache::CacheConfig& cache_cfg) { + cache_cfg.maxmemory = g_pika_conf->cache_maxmemory(); + cache_cfg.maxmemory_policy = g_pika_conf->cache_maxmemory_policy(); + cache_cfg.maxmemory_samples = g_pika_conf->cache_maxmemory_samples(); + cache_cfg.lfu_decay_time = g_pika_conf->cache_lfu_decay_time(); +} diff --git a/tools/pika_migrate/src/pika_set.cc b/tools/pika_migrate/src/pika_set.cc new file mode 100644 index 0000000000..66ca7f168e --- /dev/null +++ b/tools/pika_migrate/src/pika_set.cc @@ -0,0 +1,729 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_set.h" +#include "include/pika_cache.h" +#include "include/pika_conf.h" +#include "pstd/include/pstd_string.h" +#include "include/pika_slot_command.h" + +void SAddCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSAdd); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + iter++; + members_.assign(iter, argv_.end()); +} + +void SAddCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->SAdd(key_, members_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + AddSlotKey("s", key_, db_); + res_.AppendInteger(count); +} + +void SAddCmd::DoThroughDB() { + Do(); +} + +void SAddCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SAddIfKeyExist(key_, members_); + } +} + +void SPopCmd::DoInitial() { + size_t argc = argv_.size(); + if (!CheckArg(argc)) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSPop); + return; + } + count_ = 1; + key_ = argv_[1]; + if (argc > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSPop); + } else if (argc == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameSPop); + return; + } + if (count_ <= 0) { + res_.SetRes(CmdRes::kErrOther, kCmdNameSPop); + return; + } + } +} + +void SPopCmd::Do() { + s_ = db_->storage()->SPop(key_, &members_, count_); + if (s_.ok()) { + res_.AppendArrayLenUint64(members_.size()); + for (const auto& member : members_) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SPopCmd::DoThroughDB() { + Do(); +} + +void SPopCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->SRem(key_, members_); + } +} + +void SPopCmd::DoBinlog() { + if (!s_.ok()) { + return; + } + + PikaCmdArgsType srem_args; + srem_args.emplace_back("srem"); + srem_args.emplace_back(key_); + for (auto m = members_.begin(); m != members_.end(); ++m) { + srem_args.emplace_back(*m); + } + + srem_cmd_->Initial(srem_args, db_name_); + srem_cmd_->SetConn(GetConn()); + srem_cmd_->SetResp(resp_.lock()); + srem_cmd_->DoBinlog(); +} + +void SCardCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSCard); + return; + } + key_ = argv_[1]; +} + +void SCardCmd::Do() { + int32_t card = 0; + s_ = db_->storage()->SCard(key_, &card); + if (s_.ok()) { + res_.AppendInteger(card); + } else if (s_.IsNotFound()) { + res_.AppendInteger(card); + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "scard error"); + } +} + +void SCardCmd::ReadCache() { + uint64_t card = 0; + auto s = db_->cache()->SCard(key_, &card); + if (s.ok()) { + res_.AppendInteger(card); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, "scard error"); + } +} + +void SCardCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SCardCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } +} + +void SMembersCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSMembers); + return; + } + key_ = argv_[1]; +} + +void SMembersCmd::Do() { + std::vector members; + s_ = db_->storage()->SMembers(key_, &members); + if (s_.ok()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + res_.AppendArrayLenUint64(members.size()); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SMembersCmd::ReadCache() { + std::vector members; + auto s = db_->cache()->SMembers(key_, &members); + if (s.ok()) { + res_.AppendArrayLen(members.size()); + for (const auto& member : members) { + res_.AppendStringLen(member.size()); + res_.AppendContent(member); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void SMembersCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SMembersCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } +} + +void SScanCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSScan); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_) == 0) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSScan); + return; + } + size_t argc = argv_.size(); + size_t index = 3; + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void SScanCmd::Do() { + int64_t next_cursor = 0; + std::vector members; + rocksdb::Status s = db_->storage()->SScan(key_, cursor_, pattern_, count_, &members, &next_cursor); + + if (s.ok()) { + res_.AppendContent("*2"); + char buf[32]; + int64_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendString(member); + } + } else if (s.IsNotFound()) { + res_.AppendContent("*2"); + char buf[32]; + int64_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(members.size()); + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void SRemCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSMembers); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + members_.assign(++iter, argv_.end()); +} + +void SRemCmd::Do() { + s_ = db_->storage()->SRem(key_, members_, &deleted_); + if (s_.ok()) { + res_.AppendInteger(deleted_); + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + res_.AppendInteger(deleted_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SRemCmd::DoThroughDB() { + Do(); +} + +void SRemCmd::DoUpdateCache() { + if (s_.ok() && deleted_ > 0) { + db_->cache()->SRem(key_, members_); + } +} + +void SUnionCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSUnion); + return; + } + auto iter = argv_.begin(); + keys_.assign(++iter, argv_.end()); +} + +void SUnionCmd::Do() { + std::vector members; + s_ = db_->storage()->SUnion(keys_, &members); + if (s_.ok()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SUnionstoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSUnionstore); + return; + } + dest_key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + keys_.assign(++iter, argv_.end()); +} + +void SUnionstoreCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->SUnionstore(dest_key_, keys_, value_to_dest_, &count); + if (s_.ok()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SUnionstoreCmd::DoThroughDB() { + Do(); +} + +void SUnionstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void SetOperationCmd::DoBinlog() { + PikaCmdArgsType del_args; + del_args.emplace_back("del"); + del_args.emplace_back(dest_key_); + del_cmd_->Initial(del_args, db_name_); + del_cmd_->SetConn(GetConn()); + del_cmd_->SetResp(resp_.lock()); + del_cmd_->DoBinlog(); + + if (value_to_dest_.size() == 0) { + //The union/diff/inter operation got an empty set, just exec del to simulate overwrite an empty set to dest_key + return; + } + + PikaCmdArgsType initial_args; + initial_args.emplace_back("sadd");//use "sadd" to distinguish the binlog of SaddCmd which use "SADD" for binlog + initial_args.emplace_back(dest_key_); + initial_args.emplace_back(value_to_dest_[0]); + sadd_cmd_->Initial(initial_args, db_name_); + sadd_cmd_->SetConn(GetConn()); + sadd_cmd_->SetResp(resp_.lock()); + + auto& sadd_argv = sadd_cmd_->argv(); + size_t data_size = value_to_dest_[0].size(); + + for (size_t i = 1; i < value_to_dest_.size(); i++) { + if (data_size >= 131072) { + // If the binlog has reached the size of 128KB. (131,072 bytes = 128KB) + sadd_cmd_->DoBinlog(); + sadd_argv.clear(); + sadd_argv.emplace_back("sadd"); + sadd_argv.emplace_back(dest_key_); + data_size = 0; + } + sadd_argv.emplace_back(value_to_dest_[i]); + data_size += value_to_dest_[i].size(); + } + sadd_cmd_->DoBinlog(); +} + +void SInterCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSInter); + return; + } + auto iter = argv_.begin(); + keys_.assign(++iter, argv_.end()); +} + +void SInterCmd::Do() { + std::vector members; + s_ = db_->storage()->SInter(keys_, &members); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SInterstoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSInterstore); + return; + } + dest_key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + keys_.assign(++iter, argv_.end()); +} + +void SInterstoreCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->SInterstore(dest_key_, keys_, value_to_dest_, &count); + if (s_.ok()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SInterstoreCmd::DoThroughDB() { + Do(); +} + +void SInterstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void SIsmemberCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSIsmember); + return; + } + key_ = argv_[1]; + member_ = argv_[2]; +} + +void SIsmemberCmd::Do() { + int32_t is_member = 0; + s_ = db_->storage()->SIsmember(key_, member_, &is_member); + if (is_member != 0) { + res_.AppendContent(":1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.AppendContent(":0"); + } + if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + } +} + +void SIsmemberCmd::ReadCache() { + auto s = db_->cache()->SIsmember(key_, member_); + if (s.ok()) { + res_.AppendContent(":1"); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + + +void SIsmemberCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SIsmemberCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } +} + +void SDiffCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSDiff); + return; + } + auto iter = argv_.begin(); + keys_.assign(++iter, argv_.end()); +} + +void SDiffCmd::Do() { + std::vector members; + s_ = db_->storage()->SDiff(keys_, &members); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther,s_.ToString()); + } +} + +void SDiffstoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSDiffstore); + return; + } + dest_key_ = argv_[1]; + auto iter = argv_.begin(); + iter++; + keys_.assign(++iter, argv_.end()); +} + +void SDiffstoreCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->SDiffstore(dest_key_, keys_, value_to_dest_, &count); + if (s_.ok()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SDiffstoreCmd::DoThroughDB() { + Do(); +} + +void SDiffstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void SMoveCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSMove); + return; + } + src_key_ = argv_[1]; + dest_key_ = argv_[2]; + member_ = argv_[3]; +} + +void SMoveCmd::Do() { + int32_t res = 0; + s_ = db_->storage()->SMove(src_key_, dest_key_, member_, &res); + if (s_.ok()) { + res_.AppendInteger(res); + move_success_ = res; + } else if (s_.IsNotFound()) { + res_.AppendInteger(res); + move_success_ = res; + res_.SetRes(CmdRes::kNoExists); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SMoveCmd::DoThroughDB() { + Do(); +} + +void SMoveCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector members; + members.emplace_back(member_); + db_->cache()->SRem(src_key_, members); + db_->cache()->SAddIfKeyExist(dest_key_, members); + } +} + +void SMoveCmd::DoBinlog() { + if (!move_success_) { + //the member is not in the source set, nothing changed + return; + } + PikaCmdArgsType srem_args; + //SremCmd use "SREM", SMove use "srem" + srem_args.emplace_back("srem"); + srem_args.emplace_back(src_key_); + srem_args.emplace_back(member_); + srem_cmd_->Initial(srem_args, db_name_); + + PikaCmdArgsType sadd_args; + //Saddcmd use "SADD", Smovecmd use "sadd" + sadd_args.emplace_back("sadd"); + sadd_args.emplace_back(dest_key_); + sadd_args.emplace_back(member_); + sadd_cmd_->Initial(sadd_args, db_name_); + + srem_cmd_->SetConn(GetConn()); + srem_cmd_->SetResp(resp_.lock()); + sadd_cmd_->SetConn(GetConn()); + sadd_cmd_->SetResp(resp_.lock()); + + srem_cmd_->DoBinlog(); + sadd_cmd_->DoBinlog(); +} + +void SRandmemberCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSRandmember); + return; + } + key_ = argv_[1]; + if (argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSRandmember); + return; + } else if (argv_.size() == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } else { + reply_arr = true; + } + } +} + +void SRandmemberCmd::Do() { + std::vector members; + s_ = db_->storage()->SRandmember(key_, static_cast(count_), &members); + if (s_.ok()) { + if (!reply_arr && (static_cast(!members.empty()) != 0U)) { + res_.AppendStringLenUint64(members[0].size()); + res_.AppendContent(members[0]); + } else { + res_.AppendArrayLenUint64(members.size()); + for (const auto& member : members) { + res_.AppendStringLenUint64(member.size()); + res_.AppendContent(member); + } + } + } else if (s_.IsNotFound()) { + res_.SetRes(CmdRes::kNoExists); + res_.AppendArrayLenUint64(members.size()); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void SRandmemberCmd::ReadCache() { + std::vector members; + auto s = db_->cache()->SRandmember(key_, count_, &members); + if (s.ok()) { + if (!reply_arr && members.size()) { + res_.AppendStringLen(members[0].size()); + res_.AppendContent(members[0]); + } else { + res_.AppendArrayLen(members.size()); + for (const auto& member : members) { + res_.AppendStringLen(member.size()); + res_.AppendContent(member); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void SRandmemberCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void SRandmemberCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_SET, key_, db_); + } +} + diff --git a/tools/pika_migrate/src/pika_slave_node.cc b/tools/pika_migrate/src/pika_slave_node.cc new file mode 100644 index 0000000000..a9adbd89b8 --- /dev/null +++ b/tools/pika_migrate/src/pika_slave_node.cc @@ -0,0 +1,107 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_slave_node.h" +#include "include/pika_conf.h" + +using pstd::Status; + +extern std::unique_ptr g_pika_conf; + +/* SyncWindow */ + +void SyncWindow::Push(const SyncWinItem& item) { + win_.push_back(item); + total_size_ += item.binlog_size_; +} + +bool SyncWindow::Update(const SyncWinItem& start_item, const SyncWinItem& end_item, LogOffset* acked_offset) { + size_t start_pos = win_.size(); + size_t end_pos = win_.size(); + for (size_t i = 0; i < win_.size(); ++i) { + if (win_[i] == start_item) { + start_pos = i; + } + if (win_[i] == end_item) { + end_pos = i; + break; + } + } + if (start_pos == win_.size() || end_pos == win_.size()) { + LOG(WARNING) << "Ack offset Start: " << start_item.ToString() << "End: " << end_item.ToString() + << " not found in binlog controller window." << std::endl + << "window status " << std::endl + << ToStringStatus(); + return false; + } + for (size_t i = start_pos; i <= end_pos; ++i) { + win_[i].acked_ = true; + total_size_ -= win_[i].binlog_size_; + } + while (!win_.empty()) { + if (win_[0].acked_) { + *acked_offset = win_[0].offset_; + win_.pop_front(); + } else { + break; + } + } + return true; +} + +int SyncWindow::Remaining() { + std::size_t remaining_size = g_pika_conf->sync_window_size() - win_.size(); + return static_cast(remaining_size > 0 ? remaining_size : 0); +} + +/* SlaveNode */ + +SlaveNode::SlaveNode(const std::string& ip, int port, const std::string& db_name, int session_id) + : RmNode(ip, port, db_name, session_id) + + {} + +SlaveNode::~SlaveNode() = default; + +Status SlaveNode::InitBinlogFileReader(const std::shared_ptr& binlog, const BinlogOffset& offset) { + binlog_reader = std::make_shared(); + int res = binlog_reader->Seek(binlog, offset.filenum, offset.offset); + if (res != 0) { + return Status::Corruption(ToString() + " binlog reader init failed"); + } + return Status::OK(); +} + +std::string SlaveNode::ToStringStatus() { + std::stringstream tmp_stream; + tmp_stream << " Slave_state: " << SlaveStateMsg[slave_state] << "\r\n"; + tmp_stream << " Binlog_sync_state: " << BinlogSyncStateMsg[b_state] << "\r\n"; + tmp_stream << " Sync_window: " + << "\r\n" + << sync_win.ToStringStatus(); + tmp_stream << " Sent_offset: " << sent_offset.ToString() << "\r\n"; + tmp_stream << " Acked_offset: " << acked_offset.ToString() << "\r\n"; + tmp_stream << " Binlog_reader activated: " << (binlog_reader != nullptr) << "\r\n"; + return tmp_stream.str(); +} + +Status SlaveNode::Update(const LogOffset& start, const LogOffset& end, LogOffset* updated_offset) { + if (slave_state != kSlaveBinlogSync) { + return Status::Corruption(ToString() + "state not BinlogSync"); + } + *updated_offset = LogOffset(); + bool res = sync_win.Update(SyncWinItem(start), SyncWinItem(end), updated_offset); + if (!res) { + return Status::Corruption("UpdateAckedInfo failed"); + } + if (*updated_offset == LogOffset()) { + // nothing to update return current acked_offset + *updated_offset = acked_offset; + return Status::OK(); + } + // update acked_offset + acked_offset = *updated_offset; + return Status::OK(); +} diff --git a/tools/pika_migrate/src/pika_slot_command.cc b/tools/pika_migrate/src/pika_slot_command.cc new file mode 100644 index 0000000000..9340a6ebb2 --- /dev/null +++ b/tools/pika_migrate/src/pika_slot_command.cc @@ -0,0 +1,1530 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "include/pika_admin.h" +#include "include/pika_cmd_table_manager.h" +#include "include/pika_command.h" +#include "include/pika_conf.h" +#include "include/pika_data_distribution.h" +#include "include/pika_define.h" +#include "include/pika_migrate_thread.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_slot_command.h" +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/pstd_status.h" +#include "pstd/include/pstd_string.h" +#include "src/redis_streams.h" +#include "storage/include/storage/storage.h" + +#define min(a, b) (((a) > (b)) ? (b) : (a)) +#define MAX_MEMBERS_NUM 512 + +extern std::unique_ptr g_pika_server; +extern std::unique_ptr g_pika_conf; +extern std::unique_ptr g_pika_rm; +extern std::unique_ptr g_pika_cmd_table_manager; + +PikaMigrate::PikaMigrate() { migrate_clients_.clear(); } + +PikaMigrate::~PikaMigrate() { + // close and release all clients + // get the mutex lock + std::lock_guard lm(mutex_); + KillAllMigrateClient(); +} + +net::NetCli *PikaMigrate::GetMigrateClient(const std::string &host, const int port, int timeout) { + std::string ip_port = host + ":" + std::to_string(port); + net::NetCli *migrate_cli; + pstd::Status s; + + auto migrate_clients_iter = migrate_clients_.find(ip_port); + if (migrate_clients_iter == migrate_clients_.end()) { + migrate_cli = net::NewRedisCli(); + s = migrate_cli->Connect(host, port, g_pika_server->host()); + if (!s.ok()) { + LOG(ERROR) << "GetMigrateClient: new migrate_cli[" << ip_port.c_str() << "] failed"; + + delete migrate_cli; + return nullptr; + } + + LOG(INFO) << "GetMigrateClient: new migrate_cli[" << ip_port.c_str() << "]"; + + // add a new migrate client to the map + migrate_clients_[ip_port] = migrate_cli; + } else { + migrate_cli = static_cast(migrate_clients_iter->second); + } + + // set the client connect timeout + migrate_cli->set_send_timeout(timeout); + migrate_cli->set_recv_timeout(timeout); + + // modify the client last time + gettimeofday(&migrate_cli->last_interaction_, nullptr); + + return migrate_cli; +} + +void PikaMigrate::KillMigrateClient(net::NetCli *migrate_cli) { + auto migrate_clients_iter = migrate_clients_.begin(); + while (migrate_clients_iter != migrate_clients_.end()) { + if (migrate_cli == static_cast(migrate_clients_iter->second)) { + LOG(INFO) << "KillMigrateClient: kill migrate_cli[" << migrate_clients_iter->first.c_str() << "]"; + + migrate_cli->Close(); + delete migrate_cli; + migrate_cli = nullptr; + + migrate_clients_.erase(migrate_clients_iter); + break; + } + + ++migrate_clients_iter; + } +} + +// clean and realse timeout client +void PikaMigrate::CleanMigrateClient() { + struct timeval now; + + // if the size of migrate_clients_ <= 0, don't need clean + if (migrate_clients_.size() <= 0) { + return; + } + + gettimeofday(&now, nullptr); + auto migrate_clients_iter = migrate_clients_.begin(); + while (migrate_clients_iter != migrate_clients_.end()) { + auto migrate_cli = static_cast(migrate_clients_iter->second); + // pika_server do DoTimingTask every 10s, so we Try colse the migrate_cli before pika timeout, do it at least 20s in + // advance + int timeout = (g_pika_conf->timeout() > 0) ? g_pika_conf->timeout() : 60; + if (now.tv_sec - migrate_cli->last_interaction_.tv_sec > timeout - 20) { + LOG(INFO) << "CleanMigrateClient: clean migrate_cli[" << migrate_clients_iter->first.c_str() << "]"; + migrate_cli->Close(); + delete migrate_cli; + + migrate_clients_iter = migrate_clients_.erase(migrate_clients_iter); + } else { + ++migrate_clients_iter; + } + } +} + +// clean and realse all client +void PikaMigrate::KillAllMigrateClient() { + auto migrate_clients_iter = migrate_clients_.begin(); + while (migrate_clients_iter != migrate_clients_.end()) { + auto migrate_cli = static_cast(migrate_clients_iter->second); + + LOG(INFO) << "KillAllMigrateClient: kill migrate_cli[" << migrate_clients_iter->first.c_str() << "]"; + + migrate_cli->Close(); + delete migrate_cli; + + migrate_clients_iter = migrate_clients_.erase(migrate_clients_iter); + } +} + +/* * + * do migrate a key-value for slotsmgrt/slotsmgrtone commands + * return value: + * -1 - error happens + * >=0 - # of success migration (0 or 1) + * */ +int PikaMigrate::MigrateKey(const std::string &host, const int port, int timeout, const std::string& key, + const char type, std::string &detail, const std::shared_ptr& db) { + int send_command_num = -1; + + net::NetCli *migrate_cli = GetMigrateClient(host, port, timeout); + if (!migrate_cli) { + detail = "IOERR error or timeout connecting to the client"; + return -1; + } + + send_command_num = MigrateSend(migrate_cli, key, type, detail, db); + if (send_command_num <= 0) { + return send_command_num; + } + + if (MigrateRecv(migrate_cli, send_command_num, detail)) { + return send_command_num; + } + + return -1; +} + +int PikaMigrate::MigrateSend(net::NetCli* migrate_cli, const std::string& key, const char type, std::string& detail, + const std::shared_ptr& db) { + std::string wbuf_str; + pstd::Status s; + int command_num = -1; + + // chech the client is alive + if (!migrate_cli) { + return -1; + } + + command_num = ParseKey(key, type, wbuf_str, db); + if (command_num < 0) { + detail = "ParseKey failed"; + return command_num; + } + + // don't need seed data, key is not exists + if (command_num == 0 || wbuf_str.empty()) { + return 0; + } + + s = migrate_cli->Send(&wbuf_str); + if (!s.ok()) { + LOG(ERROR) << "Connect slots target, Send error: " << s.ToString(); + detail = "Connect slots target, Send error: " + s.ToString(); + KillMigrateClient(migrate_cli); + return -1; + } + + return command_num; +} + +bool PikaMigrate::MigrateRecv(net::NetCli* migrate_cli, int need_receive, std::string& detail) { + pstd::Status s; + std::string reply; + int64_t ret; + + if (nullptr == migrate_cli || need_receive < 0) { + return false; + } + + net::RedisCmdArgsType argv; + while (need_receive) { + s = migrate_cli->Recv(&argv); + if (!s.ok()) { + LOG(ERROR) << "Connect slots target, Recv error: " << s.ToString(); + detail = "Connect slots target, Recv error: " + s.ToString(); + KillMigrateClient(migrate_cli); + return false; + } + + reply = argv[0]; + need_receive--; + + // set return ok + // zadd return number + // hset return 0 or 1 + // hmset return ok + // sadd return number + // rpush return length + // xadd return stream-id + if (argv.size() == 1 && + (kInnerReplOk == pstd::StringToLower(reply) || pstd::string2int(reply.data(), reply.size(), &ret))) { + // continue reiceve response + if (need_receive > 0) { + continue; + } + + // has got all responses + break; + } + + // failed + detail = "something wrong with slots migrate, reply: " + reply; + LOG(ERROR) << "something wrong with slots migrate, reply:" << reply; + return false; + } + + return true; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseKey(const std::string& key, const char type, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = -1; + int64_t ttl = 0; + rocksdb::Status s; + switch (type) { + case 'k': + command_num = ParseKKey(key, wbuf_str, db); + break; + case 'h': + command_num = ParseHKey(key, wbuf_str, db); + break; + case 'l': + command_num = ParseLKey(key, wbuf_str, db); + break; + case 'z': + command_num = ParseZKey(key, wbuf_str, db); + break; + case 's': + command_num = ParseSKey(key, wbuf_str, db); + break; + case 'm': + command_num = ParseMKey(key, wbuf_str, db); + break; + default: + LOG(INFO) << "ParseKey key[" << key << "], the type[" << type << "] is not support."; + return -1; + break; + } + + // error or key is not existed + if (command_num <= 0) { + LOG(INFO) << "ParseKey key[" << key << "], parse return " << command_num + << ", the key maybe is not exist or expired."; + return command_num; + } + + // skip kv, stream because kv and stream cmd: SET key value ttl + if (type == 'k' || type == 'm') { + return command_num; + } + + ttl = TTLByType(type, key, db); + + //-1 indicates the key is valid forever + if (ttl == -1) { + return command_num; + } + + // key is expired or not exist, don't migrate + if (ttl == 0 or ttl == -2) { + wbuf_str.clear(); + return 0; + } + + // no kv, because kv cmd: SET key value ttl + if (SetTTL(key, wbuf_str, ttl)) { + command_num += 1; + } + + return command_num; +} + +bool PikaMigrate::SetTTL(const std::string& key, std::string& wbuf_str, int64_t ttl) { + //-1 indicates the key is valid forever + if (ttl == -1) { + return false; + } + + // if ttl = -2 indicates, the key is not existed + if (ttl < 0) { + LOG(INFO) << "SetTTL key[" << key << "], ttl is " << ttl; + ttl = 0; + } + + net::RedisCmdArgsType argv; + std::string cmd; + + argv.emplace_back("EXPIRE"); + argv.emplace_back(key); + argv.emplace_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + + return true; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseKKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + net::RedisCmdArgsType argv; + std::string cmd; + std::string value; + int64_t ttl = 0; + rocksdb::Status s; + + s = db->storage()->Get(key, &value); + + // if key is not existed, don't migrate + if (s.IsNotFound()) { + return 0; + } + + if (!s.ok()) { + return -1; + } + + argv.emplace_back("SET"); + argv.emplace_back(key); + argv.emplace_back(value); + + ttl = TTLByType('k', key, db); + + // ttl = -1 indicates the key is valid forever, dont process + // key is expired or not exist, dont migrate + // todo check ttl + if (ttl == 0 || ttl == -2) { + wbuf_str.clear(); + return 0; + } + + if (ttl > 0) { + argv.emplace_back("EX"); + argv.emplace_back(std::to_string(ttl)); + } + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + return 1; +} + +int64_t PikaMigrate::TTLByType(const char key_type, const std::string& key, const std::shared_ptr& db) { + return db->storage()->TTL(key); +} + +int PikaMigrate::ParseZKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = 0; + + int64_t next_cursor = 0; + std::vector score_members; + do { + score_members.clear(); + rocksdb::Status s = db->storage()->ZScan(key, next_cursor, "*", MAX_MEMBERS_NUM, &score_members, &next_cursor); + if (s.ok()) { + if (score_members.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("ZADD"); + argv.emplace_back(key); + + for (const auto &score_member : score_members) { + argv.emplace_back(std::to_string(score_member.score)); + argv.emplace_back(score_member.member); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (next_cursor > 0); + + return command_num; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseHKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int64_t next_cursor = 0; + int command_num = 0; + std::vector field_values; + do { + field_values.clear(); + rocksdb::Status s = db->storage()->HScan(key, next_cursor, "*", MAX_MEMBERS_NUM, &field_values, &next_cursor); + if (s.ok()) { + if (field_values.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("HMSET"); + argv.emplace_back(key); + + for (const auto &field_value : field_values) { + argv.emplace_back(field_value.field); + argv.emplace_back(field_value.value); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (next_cursor > 0); + + return command_num; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseSKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = 0; + int64_t next_cursor = 0; + std::vector members; + + do { + members.clear(); + rocksdb::Status s = db->storage()->SScan(key, next_cursor, "*", MAX_MEMBERS_NUM, &members, &next_cursor); + + if (s.ok()) { + if (members.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("SADD"); + argv.emplace_back(key); + + for (const auto &member : members) { + argv.emplace_back(member); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (next_cursor > 0); + + return command_num; +} + +int PikaMigrate::ParseMKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int command_num = 0; + std::vector id_messages; + storage::StreamScanArgs arg; + storage::StreamUtils::StreamParseIntervalId("-", arg.start_sid, &arg.start_ex, 0); + storage::StreamUtils::StreamParseIntervalId("+", arg.end_sid, &arg.end_ex, UINT64_MAX); + auto s = db->storage()->XRange(key, arg, id_messages); + + if (s.ok()) { + net::RedisCmdArgsType argv; + std::string cmd; + argv.emplace_back("XADD"); + argv.emplace_back(key); + for (auto &fv : id_messages) { + std::vector message; + storage::StreamUtils::DeserializeMessage(fv.value, message); + storage::streamID sid; + sid.DeserializeFrom(fv.field); + argv.emplace_back(sid.ToString()); + for (auto &m : message) { + argv.emplace_back(m); + } + } + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + return command_num; +} + +// return -1 is error; 0 don't migrate; >0 the number of commond +int PikaMigrate::ParseLKey(const std::string& key, std::string& wbuf_str, const std::shared_ptr& db) { + int64_t left = 0; + int command_num = 0; + std::vector values; + + net::RedisCmdArgsType argv; + std::string cmd; + + // del old key, before migrate list; prevent redo when failed + argv.emplace_back("DEL"); + argv.emplace_back(key); + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + + do { + values.clear(); + rocksdb::Status s = db->storage()->LRange(key, left, left + (MAX_MEMBERS_NUM - 1), &values); + if (s.ok()) { + if (values.empty()) { + break; + } + + net::RedisCmdArgsType argv; + std::string cmd; + + argv.emplace_back("RPUSH"); + argv.emplace_back(key); + + for (const auto &value : values) { + argv.emplace_back(value); + } + + net::SerializeRedisCommand(argv, &cmd); + wbuf_str.append(cmd); + command_num++; + + left += MAX_MEMBERS_NUM; + } else if (s.IsNotFound()) { + wbuf_str.clear(); + return 0; + } else { + wbuf_str.clear(); + return -1; + } + } while (!values.empty()); + + if (command_num == 1) { + wbuf_str.clear(); + command_num = 0; + } + + return command_num; +} + +/* * + * do migrate a key-value for slotsmgrt/slotsmgrtone commands + * return value: + * -1 - error happens + * >=0 - # of success migration (0 or 1) + * */ +static int SlotsMgrtOne(const std::string &host, const int port, int timeout, const std::string& key, const char type, + std::string& detail, const std::shared_ptr& db) { + int send_command_num = 0; + rocksdb::Status s; + std::map type_status; + + send_command_num = g_pika_server->pika_migrate_->MigrateKey(host, port, timeout, key, type, detail, db); + + // the key is migrated to target, delete key and slotsinfo + if (send_command_num >= 1) { + std::vector keys; + keys.emplace_back(key); + int64_t count = db->storage()->Del(keys); + if (count > 0) { + WriteDelKeyToBinlog(key, db); + } + + // del slots info + RemSlotKeyByType(std::string(1, type), key, db); + return 1; + } + + // key is not existed, only del slotsinfo + if (send_command_num == 0) { + // del slots info + RemSlotKeyByType(std::string(1, type), key, db); + return 0; + } + return -1; +} + +void RemSlotKeyByType(const std::string& type, const std::string& key, const std::shared_ptr& db) { + uint32_t crc; + int hastag; + uint32_t slotNum = GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); + + std::string slot_key = GetSlotKey(slotNum); + int32_t res = 0; + + std::vector members; + members.emplace_back(type + key); + rocksdb::Status s = db->storage()->SRem(slot_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "srem key[" << key << "] from slotKey[" << slot_key << "] failed, error: " << s.ToString(); + return; + } + + if (hastag) { + std::string tag_key = GetSlotsTagKey(crc); + s = db->storage()->SRem(tag_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "srem key[" << key << "] from tagKey[" << tag_key << "] failed, error: " << s.ToString(); + return; + } + } +} + +/* * + * do migrate mutli key-value(s) for {slotsmgrt/slotsmgrtone}with tag commands + * return value: + * -1 - error happens + * >=0 - # of success migration + * */ +static int SlotsMgrtTag(const std::string& host, const int port, int timeout, const std::string& key, const char type, + std::string& detail, const std::shared_ptr& db) { + int count = 0; + uint32_t crc; + int hastag; + GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); + if (!hastag) { + if (type == 0) { + return 0; + } + return SlotsMgrtOne(host, port, timeout, key, type, detail, db); + } + + std::string tag_key = GetSlotsTagKey(crc); + std::vector members; + + // get all keys that have the same crc + rocksdb::Status s = db->storage()->SMembers(tag_key, &members); + if (!s.ok()) { + return -1; + } + + auto iter = members.begin(); + for (; iter != members.end(); iter++) { + std::string key = *iter; + char type = key.at(0); + key.erase(key.begin()); + int ret = SlotsMgrtOne(host, port, timeout, key, type, detail, db); + + // the key is migrated to target + if (ret == 1) { + count++; + continue; + } + + if (ret == 0) { + LOG(WARNING) << "slots migrate tag failed, key: " << key << ", detail: " << detail; + continue; + } + + return -1; + } + + return count; +} + +std::string GetSlotKey(uint32_t slot) { + return SlotKeyPrefix + std::to_string(slot); +} + +// add key to slotkey +void AddSlotKey(const std::string& type, const std::string& key, const std::shared_ptr& db) { + if (g_pika_conf->slotmigrate() != true) { + return; + } + + rocksdb::Status s; + int32_t res = -1; + uint32_t crc; + int hastag; + uint32_t slotID = GetSlotsID(g_pika_conf->default_slot_num(), key, &crc, &hastag); + std::string slot_key = GetSlotKey(slotID); + std::vector members; + members.emplace_back(type + key); + s = db->storage()->SAdd(slot_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "sadd key[" << key << "] to slotKey[" << slot_key << "] failed, error: " << s.ToString(); + return; + } + + // if res == 0, indicate the key is existed; may return, + // prevent write slot_key success, but write tag_key failed, so always write tag_key + if (hastag) { + std::string tag_key = GetSlotsTagKey(crc); + s = db->storage()->SAdd(tag_key, members, &res); + if (!s.ok()) { + LOG(ERROR) << "sadd key[" << key << "] to tagKey[" << tag_key << "] failed, error: " << s.ToString(); + return; + } + } +} + +// del key from slotkey +void RemSlotKey(const std::string& key, const std::shared_ptr& db) { + if (g_pika_conf->slotmigrate() != true) { + return; + } + std::string type; + if (GetKeyType(key, type, db) < 0) { + LOG(WARNING) << "SRem key: " << key << " from slotKey error"; + return; + } + std::string slotKey = GetSlotKey(GetSlotID(g_pika_conf->default_slot_num(), key)); + int32_t count = 0; + std::vector members(1, type + key); + rocksdb::Status s = db->storage()->SRem(slotKey, members, &count); + if (!s.ok()) { + LOG(WARNING) << "SRem key: " << key << " from slotKey, error: " << s.ToString(); + return; + } +} + +int GetKeyType(const std::string& key, std::string& key_type, const std::shared_ptr& db) { + enum storage::DataType type; + rocksdb::Status s = db->storage()->GetType(key, type); + if (!s.ok()) { + LOG(WARNING) << "Get key type error: " << key << " " << s.ToString(); + key_type = ""; + return -1; + } + auto key_type_char = storage::DataTypeToTag(type); + if (key_type_char == DataTypeToTag(storage::DataType::kNones)) { + LOG(WARNING) << "Get key type error: " << key; + key_type = ""; + return -1; + } + key_type = key_type_char; + return 1; +} + +// get slotstagkey by key +std::string GetSlotsTagKey(uint32_t crc) { + return SlotTagPrefix + std::to_string(crc); +} + +// delete key from db && cache +int DeleteKey(const std::string& key, const char key_type, const std::shared_ptr& db) { + int32_t res = 0; + std::string slotKey = GetSlotKey(GetSlotID(g_pika_conf->default_slot_num(), key)); + + // delete slotkey + std::vector members; + members.emplace_back(key_type + key); + rocksdb::Status s = db->storage()->SRem(slotKey, members, &res); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(INFO) << "Del key Srem key " << key << " not found"; + return 0; + } else { + LOG(WARNING) << "Del key Srem key: " << key << " from slotKey, error: " << strerror(errno); + return -1; + } + } + + // delete from cache + if (PIKA_CACHE_NONE != g_pika_conf->cache_mode() + && PIKA_CACHE_STATUS_OK == db->cache()->CacheStatus()) { + db->cache()->Del(members); + } + + // delete key from db + members.clear(); + members.emplace_back(key); + std::map type_status; + int64_t del_nums = db->storage()->Del(members); + if (0 > del_nums) { + LOG(WARNING) << "Del key: " << key << " at slot " << GetSlotID(g_pika_conf->default_slot_num(), key) << " error"; + return -1; + } + + return 1; +} + +void SlotsMgrtTagSlotCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlot); + return; + } + // Remember the first args is the opt name + auto it = argv_.begin() + 1; + dest_ip_ = *it++; + pstd::StringToLower(dest_ip_); + + std::string str_dest_port = *it++; + if (!pstd::string2int(str_dest_port.data(), str_dest_port.size(), &dest_port_)) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (dest_port_ < 0 || dest_port_ > 65535) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + + if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "destination address error"); + return; + } + + std::string str_timeout_ms = *it++; + if (!pstd::string2int(str_timeout_ms.data(), str_timeout_ms.size(), &timeout_ms_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (timeout_ms_ < 0) { + std::string detail = "invalid timeout number " + std::to_string(timeout_ms_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (timeout_ms_ == 0) { + timeout_ms_ = 100; + } + + std::string str_slot_num = *it++; + if (!pstd::string2int(str_slot_num.data(), str_slot_num.size(), &slot_id_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (slot_id_ < 0 || slot_id_ >= g_pika_conf->default_slot_num()) { + std::string detail = "invalid slot number " + std::to_string(slot_id_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } +} + +void SlotsMgrtTagSlotCmd::Do() { + if (g_pika_conf->slotmigrate() != true) { + LOG(WARNING) << "Not in slotmigrate mode"; + res_.SetRes(CmdRes::kErrOther, "not set slotmigrate"); + return; + } + + int32_t len = 0; + int ret = 0; + std::string detail; + std::string slot_key = GetSlotKey(static_cast(slot_id_)); + + // first, get the count of slot_key, prevent to sscan key very slowly when the key is not found + rocksdb::Status s = db_->storage()->SCard(slot_key, &len); + if (len < 0) { + detail = "Get the len of slot Error"; + } + // mutex between SlotsMgrtTagSlotCmd、SlotsMgrtTagOneCmd and migrator_thread + if (len > 0 && g_pika_server->pika_migrate_->Trylock()) { + g_pika_server->pika_migrate_->CleanMigrateClient(); + int64_t next_cursor = 0; + std::vector members; + rocksdb::Status s = db_->storage()->SScan(slot_key, 0, "*", 1, &members, &next_cursor); + if (s.ok()) { + for (const auto &member : members) { + std::string key = member; + char type = key.at(0); + key.erase(key.begin()); + ret = SlotsMgrtTag(dest_ip_, static_cast(dest_port_), static_cast(timeout_ms_), key, type, detail, db_); + } + } + // unlock + g_pika_server->pika_migrate_->Unlock(); + } else { + LOG(WARNING) << "pika migrate is running, try again later, slot_id_: " << slot_id_; + } + if (ret == 0) { + LOG(WARNING) << "slots migrate without tag failed, slot_id_: " << slot_id_ << ", detail: " << detail; + } + if (len >= 0 && ret >= 0) { + res_.AppendArrayLen(2); + // the number of keys migrated + res_.AppendInteger(ret); + // the number of keys remained + res_.AppendInteger(len - ret); + } else { + res_.SetRes(CmdRes::kErrOther, detail); + } + + return; +} + +// check key type +int SlotsMgrtTagOneCmd::KeyTypeCheck(const std::shared_ptr& db) { + enum storage::DataType type; + std::string key_type; + rocksdb::Status s = db->storage()->GetType(key_, type); + if (!s.ok()) { + if (s.IsNotFound()) { + LOG(WARNING) << "Migrate slot key " << key_ << " not found"; + res_.AppendInteger(0); + } else { + LOG(WARNING) << "Migrate slot key: " << key_ << " error: " << s.ToString(); + res_.SetRes(CmdRes::kErrOther, "migrate slot error"); + } + return -1; + } + key_type_ = storage::DataTypeToTag(type); + if (type == storage::DataType::kNones) { + LOG(WARNING) << "Migrate slot key: " << key_ << " not found"; + res_.AppendInteger(0); + return -1; + } + return 0; +} + +void SlotsMgrtTagOneCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlot); + return; + } + // Remember the first args is the opt name + auto it = argv_.begin() + 1; + dest_ip_ = *it++; + pstd::StringToLower(dest_ip_); + + std::string str_dest_port = *it++; + if (!pstd::string2int(str_dest_port.data(), str_dest_port.size(), &dest_port_)) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (dest_port_ < 0 || dest_port_ > 65535) { + std::string detail = "invalid port number " + std::to_string(dest_port_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + + if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "destination address error"); + return; + } + + std::string str_timeout_ms = *it++; + if (!pstd::string2int(str_timeout_ms.data(), str_timeout_ms.size(), &timeout_ms_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (timeout_ms_ < 0) { + std::string detail = "invalid timeout number " + std::to_string(timeout_ms_); + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + if (timeout_ms_ == 0) { + timeout_ms_ = 100; + } + + key_ = *it++; +} + +void SlotsMgrtTagOneCmd::Do() { + if (!g_pika_conf->slotmigrate()) { + LOG(WARNING) << "Not in slotmigrate mode"; + res_.SetRes(CmdRes::kErrOther, "not set slotmigrate"); + return; + } + + int64_t ret = 0; + int32_t len = 0; + int hastag = 0; + uint32_t crc = 0; + std::string detail; + rocksdb::Status s; + std::map type_status; + + // if you need migrates key, if the key is not existed, return + GetSlotsID(g_pika_conf->default_slot_num(), key_, &crc, &hastag); + if (!hastag) { + std::vector keys; + keys.emplace_back(key_); + + // check the key is not existed + ret = db_->storage()->Exists(keys); + + // when the key is not existed, ret = 0 + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "exists internal error"); + return; + } + + if (ret == 0) { + res_.AppendInteger(0); + return; + } + + // else need to migrate + } else { + // key is tag_key, check the number of the tag_key + std::string tag_key = GetSlotsTagKey(crc); + s = db_->storage()->SCard(tag_key, &len); + if (s.IsNotFound()) { + res_.AppendInteger(0); + return; + } + if (!s.ok() || len == -1) { + res_.SetRes(CmdRes::kErrOther, "can't get the number of tag_key"); + return; + } + + if (len == 0) { + res_.AppendInteger(0); + return; + } + + // else need to migrate + } + + // lock batch migrate, dont do slotsmgrttagslot when do slotsmgrttagone + // pika_server thread exit(~PikaMigrate) and dispatch thread do CronHandle nead lock() + g_pika_server->pika_migrate_->Lock(); + + // if the key is not existed, return + if (!hastag) { + std::vector keys; + keys.emplace_back(key_); + // the key may be deleted by another thread + std::map type_status; + ret = db_->storage()->Exists(keys); + + // when the key is not existed, ret = 0 + if (ret == -1) { + detail = s.ToString(); + } else if (KeyTypeCheck(db_) != 0) { + detail = "cont get the key type."; + ret = -1; + } else { + ret = SlotsMgrtTag(dest_ip_, static_cast(dest_port_), static_cast(timeout_ms_), key_, key_type_, detail, db_); + } + } else { + // key maybe doesn't exist, the key is tag key, migrate the same tag key + ret = SlotsMgrtTag(dest_ip_, static_cast(dest_port_), static_cast(timeout_ms_), key_, 0, detail, db_); + } + + // unlock the record lock + g_pika_server->pika_migrate_->Unlock(); + + if (ret >= 0) { + res_.AppendInteger(ret); + } else { + if (detail.size() == 0) { + detail = "Unknown Error"; + } + res_.SetRes(CmdRes::kErrOther, detail); + } + + return; +} + +/* * + * slotsinfo [start] [count] + * */ +void SlotsInfoCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsInfo); + return; + } + + if (argv_.size() >= 2) { + if (!pstd::string2int(argv_[1].data(), argv_[1].size(), &begin_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if (begin_ < 0 || begin_ >= end_) { + std::string detail = "invalid slot begin = " + argv_[1]; + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + } + + if (argv_.size() >= 3) { + int64_t count = 0; + if (!pstd::string2int(argv_[2].data(), argv_[2].size(), &count)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if (count < 0) { + std::string detail = "invalid slot count = " + argv_[2]; + res_.SetRes(CmdRes::kErrOther, detail); + return; + } + + if (begin_ + count < end_) { + end_ = begin_ + count; + } + } + + if (argv_.size() >= 4) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsInfo); + return; + } +} + +void SlotsInfoCmd::Do() { + int slotNum = g_pika_conf->default_slot_num(); + int slots_slot[slotNum]; + int slots_size[slotNum]; + memset(slots_slot, 0, slotNum); + memset(slots_size, 0, slotNum); + int n = 0; + int32_t len = 0; + std::string slot_key; + + for (auto i = static_cast(begin_); i < end_; i++) { + slot_key = GetSlotKey(i); + len = 0; + rocksdb::Status s = db_->storage()->SCard(slot_key, &len); + if (!s.ok() || len == 0) { + continue; + } + + slots_slot[n] = i; + slots_size[n] = len; + n++; + } + + res_.AppendArrayLen(n); + for (int i = 0; i < n; i++) { + res_.AppendArrayLen(2); + res_.AppendInteger(slots_slot[i]); + res_.AppendInteger(slots_size[i]); + } + + return; +} + +void SlotsMgrtTagSlotAsyncCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtTagSlotAsync); + } + // Remember the first args is the opt name + auto it = argv_.begin() + 1; + dest_ip_ = *it++; + pstd::StringToLower(dest_ip_); + + std::string str_dest_port = *it++; + if (!pstd::string2int(str_dest_port.data(), str_dest_port.size(), &dest_port_) || dest_port_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + if ((dest_ip_ == "127.0.0.1" || dest_ip_ == g_pika_server->host()) && dest_port_ == g_pika_server->port()) { + res_.SetRes(CmdRes::kErrOther, "destination address error"); + return; + } + + std::string str_timeout_ms = *it++; + if (!pstd::string2int(str_timeout_ms.data(), str_timeout_ms.size(), &timeout_ms_) || timeout_ms_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_max_bulks = *it++; + if (!pstd::string2int(str_max_bulks.data(), str_max_bulks.size(), &max_bulks_) || max_bulks_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_max_bytes_ = *it++; + if (!pstd::string2int(str_max_bytes_.data(), str_max_bytes_.size(), &max_bytes_) || max_bytes_ <= 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_slot_num = *it++; + if (!pstd::string2int(str_slot_num.data(), str_slot_num.size(), &slot_id_) || slot_id_ < 0 || + slot_id_ >= g_pika_conf->default_slot_num()) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + + std::string str_keys_num = *it++; + if (!pstd::string2int(str_keys_num.data(), str_keys_num.size(), &keys_num_) || keys_num_ < 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + return; +} + +void SlotsMgrtTagSlotAsyncCmd::Do() { + // check whether open slotmigrate + if (!g_pika_conf->slotmigrate()) { + res_.SetRes(CmdRes::kErrOther, "please open slotmigrate and reload slot"); + return; + } + + int32_t remained = 0; + std::string slotKey = GetSlotKey(static_cast(slot_id_)); + storage::Status status = db_->storage()->SCard(slotKey, &remained); + if (status.IsNotFound()) { + LOG(INFO) << "find no record in slot " << slot_id_; + res_.AppendArrayLen(2); + res_.AppendInteger(0); + res_.AppendInteger(remained); + return; + } + if (!status.ok()) { + LOG(WARNING) << "Slot batch migrate keys get result error"; + res_.SetRes(CmdRes::kErrOther, "Slot batch migrating keys get result error"); + return; + } + + bool ret = g_pika_server->SlotsMigrateBatch(dest_ip_, dest_port_, timeout_ms_, slot_id_, keys_num_, db_); + if (!ret) { + LOG(WARNING) << "Slot batch migrate keys error"; + res_.SetRes(CmdRes::kErrOther, "Slot batch migrating keys error, may be currently migrating"); + return; + } + + res_.AppendArrayLen(2); + res_.AppendInteger(0); + res_.AppendInteger(remained); + return; +} + +void SlotsMgrtAsyncStatusCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncStatus); + } + return; +} + +void SlotsMgrtAsyncStatusCmd::Do() { + std::string status; + std::string ip; + int64_t port = -1, slots = -1, moved = -1, remained = -1; + bool migrating = false; + g_pika_server->GetSlotsMgrtSenderStatus(&ip, &port, &slots, &migrating, &moved, &remained); + std::string mstatus = migrating ? "yes" : "no"; + res_.AppendArrayLen(5); + status = "dest server: " + ip + ":" + std::to_string(port); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "slot number: " + std::to_string(slots); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "migrating : " + mstatus; + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "moved keys : " + std::to_string(moved); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + status = "remain keys: " + std::to_string(remained); + res_.AppendStringLenUint64(status.size()); + res_.AppendContent(status); + + return; +} + +void SlotsMgrtAsyncCancelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtAsyncCancel); + } + return; +} + +void SlotsMgrtAsyncCancelCmd::Do() { + bool ret = g_pika_server->SlotsMigrateAsyncCancel(); + if (!ret) { + res_.SetRes(CmdRes::kErrOther, "slotsmgrt-async-cancel error"); + } + res_.SetRes(CmdRes::kOk); + return; +} + +void SlotsDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsDel); + } + slots_.assign(argv_.begin(), argv_.end()); + return; +} + +void SlotsDelCmd::Do() { + std::vector keys; + std::vector::const_iterator iter; + for (iter = slots_.begin(); iter != slots_.end(); iter++) { + keys.emplace_back(SlotKeyPrefix + *iter); + } + std::map type_status; + int64_t count = db_->storage()->Del(keys); + if (count >= 0) { + res_.AppendInteger(count); + } else { + res_.SetRes(CmdRes::kErrOther, "SlotsDel error"); + } + return; +} + +/* * + * slotshashkey [key1 key2...] + * */ +void SlotsHashKeyCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsHashKey); + return; + } + + auto iter = argv_.begin(); + keys_.assign(++iter, argv_.end()); + return; +} + +void SlotsHashKeyCmd::Do() { + std::vector::const_iterator keys_it; + + res_.AppendArrayLenUint64(keys_.size()); + for (keys_it = keys_.begin(); keys_it != keys_.end(); ++keys_it) { + res_.AppendInteger(GetSlotsID(g_pika_conf->default_slot_num(), *keys_it, nullptr, nullptr)); + } + + return; +} + +void SlotsScanCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); + return; + } + key_ = SlotKeyPrefix + argv_[1]; + if (std::stoll(argv_[1].data()) < 0 || std::stoll(argv_[1].data()) >= g_pika_conf->default_slot_num()) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); + return; + } + if (!pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_)) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsScan); + return; + } + size_t argc = argv_.size(), index = 3; + while (index < argc) { + std::string opt = argv_[index]; + if (!strcasecmp(opt.data(), "match") || !strcasecmp(opt.data(), "count")) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (!strcasecmp(opt.data(), "match")) { + pattern_ = argv_[index]; + } else if (!pstd::string2int(argv_[index].data(), argv_[index].size(), &count_)) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + return; +} + +void SlotsScanCmd::Do() { + std::vector members; + rocksdb::Status s = db_->storage()->SScan(key_, cursor_, pattern_, count_, &members, &cursor_); + + if (members.size() <= 0) { + cursor_ = 0; + } + res_.AppendContent("*2"); + + char buf[32]; + int64_t len = pstd::ll2string(buf, sizeof(buf), cursor_); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(members.size()); + auto iter_member = members.begin(); + for (; iter_member != members.end(); iter_member++) { + res_.AppendStringLenUint64(iter_member->size()); + res_.AppendContent(*iter_member); + } + return; +} + +void SlotsMgrtExecWrapperCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsMgrtExecWrapper); + } + auto it = argv_.begin() + 1; + key_ = *it++; + pstd::StringToLower(key_); + return; +} + +// return 0 means key doesn't exist, or key is not migrating +// return 1 means key is migrating +// return -1 means something wrong +void SlotsMgrtExecWrapperCmd::Do() { + res_.AppendArrayLen(2); + int ret = g_pika_server->SlotsMigrateOne(key_, db_); + switch (ret) { + case 0: + res_.AppendInteger(0); + res_.AppendInteger(0); + return; + case 1: + res_.AppendInteger(1); + res_.AppendInteger(1); + return; + default: + res_.AppendInteger(-1); + res_.AppendInteger(-1); + return; + } + return; +} + +void SlotsReloadCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsReload); + } + return; +} + +void SlotsReloadCmd::Do() { + g_pika_server->Bgslotsreload(db_); + const PikaServer::BGSlotsReload &info = g_pika_server->bgslots_reload(); + char buf[256]; + snprintf(buf, sizeof(buf), "+%s : %lld", info.s_start_time.c_str(), g_pika_server->GetSlotsreloadingCursor()); + res_.AppendContent(buf); + return; +} + +void SlotsReloadOffCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsReloadOff); + } + return; +} + +void SlotsReloadOffCmd::Do() { + g_pika_server->SetSlotsreloading(false); + res_.SetRes(CmdRes::kOk); + return; +} + +void SlotsCleanupCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsCleanup); + } + + auto iter = argv_.begin() + 1; + std::string slot; + long slotLong = 0; + std::vector slots; + for (; iter != argv_.end(); iter++) { + slot = *iter; + if (!pstd::string2int(slot.data(), slot.size(), &slotLong) || slotLong < 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + slots.emplace_back(static_cast(slotLong)); + } + cleanup_slots_.swap(slots); + return; +} + +void SlotsCleanupCmd::Do() { + g_pika_server->Bgslotscleanup(cleanup_slots_, db_); + std::vector cleanup_slots(g_pika_server->GetCleanupSlots()); + res_.AppendArrayLenUint64(cleanup_slots.size()); + auto iter = cleanup_slots.begin(); + for (; iter != cleanup_slots.end(); iter++) { + res_.AppendInteger(*iter); + } + return; +} + +void SlotsCleanupOffCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameSlotsCleanupOff); + } + return; +} + +void SlotsCleanupOffCmd::Do() { + g_pika_server->StopBgslotscleanup(); + res_.SetRes(CmdRes::kOk); + return; +} diff --git a/tools/pika_migrate/src/pika_stable_log.cc b/tools/pika_migrate/src/pika_stable_log.cc new file mode 100644 index 0000000000..ba51d9171c --- /dev/null +++ b/tools/pika_migrate/src/pika_stable_log.cc @@ -0,0 +1,225 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include + +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "include/pika_stable_log.h" +#include "pstd/include/env.h" +#include "include/pika_conf.h" + +using pstd::Status; + +extern PikaServer* g_pika_server; +extern std::unique_ptr g_pika_rm; + +StableLog::StableLog(std::string db_name, std::string log_path) + : purging_(false), db_name_(std::move(db_name)), log_path_(std::move(log_path)) { + stable_logger_ = std::make_shared(log_path_, g_pika_conf->binlog_file_size()); + std::map binlogs; + if (!GetBinlogFiles(&binlogs)) { + LOG(FATAL) << log_path_ << " Could not get binlog files!"; + } + if (!binlogs.empty()) { + UpdateFirstOffset(binlogs.begin()->first); + } +} + +StableLog::~StableLog() = default; + +void StableLog::Leave() { + Close(); + RemoveStableLogDir(); +} + +void StableLog::Close() { stable_logger_->Close(); } + +void StableLog::RemoveStableLogDir() { + std::string logpath = log_path_; + if (logpath[logpath.length() - 1] == '/') { + logpath.erase(logpath.length() - 1); + } + logpath.append("_deleting/"); + if (pstd::RenameFile(log_path_, logpath) != 0) { + LOG(WARNING) << "Failed to move log to trash, error: " << strerror(errno); + return; + } + g_pika_server->PurgeDir(logpath); + + LOG(WARNING) << "DB StableLog: " << db_name_ << " move to trash success"; +} + +bool StableLog::PurgeStableLogs(uint32_t to, bool manual) { + // Only one thread can go through + bool expect = false; + if (!purging_.compare_exchange_strong(expect, true)) { + LOG(WARNING) << "purge process already exist"; + return false; + } + auto arg = new PurgeStableLogArg(); + arg->to = to; + arg->manual = manual; + arg->logger = shared_from_this(); + g_pika_server->PurgelogsTaskSchedule(&DoPurgeStableLogs, static_cast(arg)); + return true; +} + +void StableLog::ClearPurge() { purging_ = false; } + +void StableLog::DoPurgeStableLogs(void* arg) { + std::unique_ptr purge_arg(static_cast(arg)); + purge_arg->logger->PurgeFiles(purge_arg->to, purge_arg->manual); + purge_arg->logger->ClearPurge(); +} + +bool StableLog::PurgeFiles(uint32_t to, bool manual) { + std::map binlogs; + if (!GetBinlogFiles(&binlogs)) { + LOG(WARNING) << log_path_ << " Could not get binlog files!"; + return false; + } + + int delete_num = 0; + struct stat file_stat; + auto remain_expire_num = static_cast(binlogs.size() - g_pika_conf->expire_logs_nums()); + std::shared_ptr master_db = nullptr; + std::map::iterator it; + for (it = binlogs.begin(); it != binlogs.end(); ++it) { + if ((manual && it->first <= to) // Manual purgelogsto + || (remain_expire_num > 0) // Expire num trigger + || (binlogs.size() - delete_num > 10 // At lease remain 10 files + && stat(((log_path_ + it->second)).c_str(), &file_stat) == 0 && + file_stat.st_mtime < time(nullptr) - g_pika_conf->expire_logs_days() * 24 * 3600)) { // Expire time trigger + // We check this every time to avoid lock when we do file deletion + master_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(db_name_)); + if (!master_db) { + LOG(WARNING) << "DB: " << db_name_ << "Not Found"; + return false; + } + + if (!master_db->BinlogCloudPurge(it->first)) { + LOG(WARNING) << log_path_ << " Could not purge " << (it->first) << ", since it is already be used"; + return false; + } + + // Do delete + if (pstd::DeleteFile(log_path_ + it->second)) { + ++delete_num; + --remain_expire_num; + } else { + LOG(WARNING) << log_path_ << " Purge log file : " << (it->second) << " failed! error: delete file failed"; + } + } else { + // Break when face the first one not satisfied + // Since the binlogs is order by the file index + break; + } + } + if (delete_num != 0) { + std::map binlogs; + if (!GetBinlogFiles(&binlogs)) { + LOG(WARNING) << log_path_ << " Could not get binlog files!"; + return false; + } + auto it = binlogs.begin(); + if (it != binlogs.end()) { + UpdateFirstOffset(it->first); + } + } + if (delete_num != 0) { + LOG(INFO) << log_path_ << " Success purge " << delete_num << " binlog file"; + } + return true; +} + +bool StableLog::GetBinlogFiles(std::map* binlogs) { + std::vector children; + int ret = pstd::GetChildren(log_path_, children); + if (ret) { + LOG(WARNING) << log_path_ << " Get all files in log path failed! error:" << ret; + return false; + } + + int64_t index = 0; + std::string sindex; + std::vector::iterator it; + for (it = children.begin(); it != children.end(); ++it) { + if ((*it).compare(0, kBinlogPrefixLen, kBinlogPrefix) != 0) { + continue; + } + sindex = (*it).substr(kBinlogPrefixLen); + if (pstd::string2int(sindex.c_str(), sindex.size(), &index) == 1) { + binlogs->insert(std::pair(static_cast(index), *it)); + } + } + return true; +} + +void StableLog::UpdateFirstOffset(uint32_t filenum) { + PikaBinlogReader binlog_reader; + int res = binlog_reader.Seek(stable_logger_, filenum, 0); + if (res != 0) { + LOG(WARNING) << "Binlog reader init failed"; + return; + } + + BinlogItem item; + BinlogOffset offset; + while (true) { + std::string binlog; + Status s = binlog_reader.Get(&binlog, &(offset.filenum), &(offset.offset)); + if (s.IsEndFile()) { + return; + } + if (!s.ok()) { + LOG(WARNING) << "Binlog reader get failed"; + return; + } + if (!PikaBinlogTransverter::BinlogItemWithoutContentDecode(TypeFirst, binlog, &item)) { + LOG(WARNING) << "Binlog item decode failed"; + return; + } + // exec_time == 0, could be padding binlog + if (item.exec_time() != 0) { + break; + } + } + + std::lock_guard l(offset_rwlock_); + first_offset_.b_offset = offset; + first_offset_.l_offset.term = item.term_id(); + first_offset_.l_offset.index = item.logic_id(); +} + +Status StableLog::PurgeFileAfter(uint32_t filenum) { + std::map binlogs; + bool res = GetBinlogFiles(&binlogs); + if (!res) { + return Status::Corruption("GetBinlogFiles failed"); + } + for (auto& it : binlogs) { + if (it.first > filenum) { + // Do delete + auto filename = log_path_ + it.second; + if (!pstd::DeleteFile(filename)) { + return Status::IOError("pstd::DeleteFile faield, filename = " + filename); + } + LOG(WARNING) << "Delete file " << filename; + } + } + return Status::OK(); +} + +Status StableLog::TruncateTo(const LogOffset& offset) { + Status s = PurgeFileAfter(offset.b_offset.filenum); + if (!s.ok()) { + return s; + } + return stable_logger_->Truncate(offset.b_offset.filenum, offset.b_offset.offset, offset.l_offset.index); +} diff --git a/tools/pika_migrate/src/pika_statistic.cc b/tools/pika_migrate/src/pika_statistic.cc new file mode 100644 index 0000000000..b7ab7a8c53 --- /dev/null +++ b/tools/pika_migrate/src/pika_statistic.cc @@ -0,0 +1,111 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_statistic.h" + +#include "pstd/include/env.h" + +#include "include/pika_command.h" + +/* QpsStatistic */ + +QpsStatistic::QpsStatistic() + : querynum(0), + write_querynum(0), + last_querynum(0), + last_write_querynum(0), + last_sec_querynum(0), + last_sec_write_querynum(0), + last_time_us(0) {} + +QpsStatistic::QpsStatistic(const QpsStatistic& other) { + querynum = other.querynum.load(); + write_querynum = other.write_querynum.load(); + last_querynum = other.last_querynum.load(); + last_write_querynum = other.last_write_querynum.load(); + last_sec_querynum = other.last_sec_querynum.load(); + last_sec_write_querynum = other.last_sec_write_querynum.load(); + last_time_us = other.last_time_us.load(); +} + +void QpsStatistic::IncreaseQueryNum(bool is_write) { + querynum++; + if (is_write) { + write_querynum++; + } +} + +void QpsStatistic::ResetLastSecQuerynum() { + uint64_t last_query = last_querynum.load(); + uint64_t last_write_query = last_write_querynum.load(); + uint64_t cur_query = querynum.load(); + uint64_t cur_write_query = write_querynum.load(); + uint64_t last_time = last_time_us.load(); + if (cur_write_query < last_write_query) { + cur_write_query = last_write_query; + } + if (cur_query < last_query) { + cur_query = last_query; + } + uint64_t delta_query = cur_query - last_query; + uint64_t delta_write_query = cur_write_query - last_write_query; + uint64_t cur_time_us = pstd::NowMicros(); + if (cur_time_us <= last_time) { + cur_time_us = last_time + 1; + } + uint64_t delta_time_us = cur_time_us - last_time; + last_sec_querynum.store(delta_query * 1000000 / (delta_time_us)); + last_sec_write_querynum.store(delta_write_query * 1000000 / (delta_time_us)); + last_querynum.store(cur_query); + last_write_querynum.store(cur_write_query); + + last_time_us.store(cur_time_us); +} + +/* Statistic */ + +Statistic::Statistic() { + pthread_rwlockattr_t db_stat_rw_attr; + pthread_rwlockattr_init(&db_stat_rw_attr); +} + +QpsStatistic Statistic::DBStat(const std::string& db_name) { + std::shared_lock l(db_stat_rw); + return db_stat[db_name]; +} + +std::unordered_map Statistic::AllDBStat() { + std::shared_lock l(db_stat_rw); + return db_stat; +} + +void Statistic::UpdateDBQps(const std::string& db_name, const std::string& command, bool is_write) { + bool db_exist = true; + std::unordered_map::iterator iter; + { + std::shared_lock l(db_stat_rw); + auto search = db_stat.find(db_name); + if (search == db_stat.end()) { + db_exist = false; + } else { + iter = search; + } + } + if (db_exist) { + iter->second.IncreaseQueryNum(is_write); + } else { + { + std::lock_guard l(db_stat_rw); + db_stat[db_name].IncreaseQueryNum(is_write); + } + } +} + +void Statistic::ResetDBLastSecQuerynum() { + std::shared_lock l(db_stat_rw); + for (auto& stat : db_stat) { + stat.second.ResetLastSecQuerynum(); + } +} diff --git a/tools/pika_migrate/src/pika_stream.cc b/tools/pika_migrate/src/pika_stream.cc new file mode 100644 index 0000000000..3bddf8c564 --- /dev/null +++ b/tools/pika_migrate/src/pika_stream.cc @@ -0,0 +1,540 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_stream.h" +#include +#include +#include +#include + +#include "glog/logging.h" +#include "include/pika_command.h" +#include "include/pika_db.h" +#include "include/pika_slot_command.h" +#include "include/pika_define.h" +#include "storage/storage.h" + +// s : rocksdb::Status +// res : CmdRes +#define TRY_CATCH_ERROR(s, res) \ + do { \ + if (!s.ok()) { \ + LOG(ERROR) << s.ToString(); \ + res.SetRes(CmdRes::kErrOther, s.ToString()); \ + return; \ + } \ + } while (0) + +void ParseAddOrTrimArgsOrReply(CmdRes &res, const PikaCmdArgsType &argv, storage::StreamAddTrimArgs &args, int *idpos, + bool is_xadd) { + int i = 2; + bool limit_given = false; + for (; i < argv.size(); ++i) { + size_t moreargs = argv.size() - 1 - i; + const std::string &opt = argv[i]; + + if (is_xadd && strcasecmp(opt.c_str(), "*") == 0 && opt.size() == 1) { + // case: XADD mystream * field value [field value ...] + break; + + } else if (strcasecmp(opt.c_str(), "maxlen") == 0 && moreargs) { + // case: XADD mystream ... MAXLEN [= | ~] threshold ... + if (args.trim_strategy != storage::StreamTrimStrategy::TRIM_STRATEGY_NONE) { + res.SetRes(CmdRes::kSyntaxErr, "syntax error, MAXLEN and MINID options at the same time are not compatible"); + return; + } + const auto &next = argv[i + 1]; + if (moreargs >= 2 && (next == "~" || next == "=")) { + // we allways not do approx trim, so we ignore the ~ and = + i++; + } + // parse threshold as uint64 + if (!storage::StreamUtils::string2uint64(argv[i + 1].c_str(), args.maxlen)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid MAXLEN argument"); + } + i++; + args.trim_strategy = storage::StreamTrimStrategy::TRIM_STRATEGY_MAXLEN; + args.trim_strategy_arg_idx = i; + + } else if (strcasecmp(opt.c_str(), "minid") == 0 && moreargs) { + // case: XADD mystream ... MINID [= | ~] threshold ... + if (args.trim_strategy != storage::StreamTrimStrategy::TRIM_STRATEGY_NONE) { + res.SetRes(CmdRes::kSyntaxErr, "syntax error, MAXLEN and MINID options at the same time are not compatible"); + return; + } + const auto &next = argv[i + 1]; + if (moreargs >= 2 && (next == "~" || next == "=") && next.size() == 1) { + // we allways not do approx trim, so we ignore the ~ and = + i++; + } + // parse threshold as stremID + if (!storage::StreamUtils::StreamParseID(argv[i + 1], args.minid, 0)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + i++; + args.trim_strategy = storage::StreamTrimStrategy::TRIM_STRATEGY_MINID; + args.trim_strategy_arg_idx = i; + + } else if (strcasecmp(opt.c_str(), "limit") == 0 && moreargs) { + // case: XADD mystream ... ~ threshold LIMIT count ... + // we do not need approx trim, so we do not support LIMIT option + res.SetRes(CmdRes::kSyntaxErr, "syntax error, Pika do not support LIMIT option"); + return; + + } else if (is_xadd && strcasecmp(opt.c_str(), "nomkstream") == 0) { + // case: XADD mystream ... NOMKSTREAM ... + args.no_mkstream = true; + + } else if (is_xadd) { + // case: XADD mystream ... ID ... + if (!storage::StreamUtils::StreamParseStrictID(argv[i], args.id, 0, &args.seq_given)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + args.id_given = true; + break; + } else { + res.SetRes(CmdRes::kSyntaxErr); + return; + } + } // end for + + if (idpos) { + *idpos = i; + } else if (is_xadd) { + res.SetRes(CmdRes::kErrOther, "idpos is null, xadd comand must parse idpos"); + } +} + +/* XREADGROUP GROUP group consumer [COUNT count] [BLOCK milliseconds] + * [NOACK] STREAMS key [key ...] id [id ...] + * XREAD [COUNT count] [BLOCK milliseconds] STREAMS key [key ...] id + * [id ...] */ +void ParseReadOrReadGroupArgsOrReply(CmdRes &res, const PikaCmdArgsType &argv, storage::StreamReadGroupReadArgs &args, + bool is_xreadgroup) { + int streams_arg_idx{0}; // the index of stream keys arg + size_t streams_cnt{0}; // the count of stream keys + + for (int i = 1; i < argv.size(); ++i) { + size_t moreargs = argv.size() - i - 1; + const std::string &o = argv[i]; + if (strcasecmp(o.c_str(), "BLOCK") == 0 && moreargs) { + i++; + if (!storage::StreamUtils::string2uint64(argv[i].c_str(), args.block)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid BLOCK argument"); + return; + } + } else if (strcasecmp(o.c_str(), "COUNT") == 0 && moreargs) { + i++; + if (!storage::StreamUtils::string2int32(argv[i].c_str(), args.count)) { + res.SetRes(CmdRes::kInvalidParameter, "Invalid COUNT argument"); + return; + } + if (args.count < 0) args.count = 0; + } else if (strcasecmp(o.c_str(), "STREAMS") == 0 && moreargs) { + streams_arg_idx = i + 1; + streams_cnt = argv.size() - streams_arg_idx; + if (streams_cnt % 2 != 0) { + res.SetRes(CmdRes::kSyntaxErr, "Unbalanced list of streams: for each stream key an ID must be specified"); + return; + } + streams_cnt /= 2; + break; + } else if (strcasecmp(o.c_str(), "GROUP") == 0 && moreargs >= 2) { + if (!is_xreadgroup) { + res.SetRes(CmdRes::kSyntaxErr, "The GROUP option is only supported by XREADGROUP. You called XREAD instead."); + return; + } + args.group_name = argv[i + 1]; + args.consumer_name = argv[i + 2]; + i += 2; + } else if (strcasecmp(o.c_str(), "NOACK") == 0) { + if (!is_xreadgroup) { + res.SetRes(CmdRes::kSyntaxErr, "The NOACK option is only supported by XREADGROUP. You called XREAD instead."); + return; + } + args.noack_ = true; + } else { + res.SetRes(CmdRes::kSyntaxErr); + return; + } + } + + if (streams_arg_idx == 0) { + res.SetRes(CmdRes::kSyntaxErr); + return; + } + + if (is_xreadgroup && args.group_name.empty()) { + res.SetRes(CmdRes::kSyntaxErr, "Missing GROUP option for XREADGROUP"); + return; + } + + // collect keys and ids + for (auto i = streams_arg_idx + streams_cnt; i < argv.size(); ++i) { + auto key_idx = i - streams_cnt; + args.keys.push_back(argv[key_idx]); + args.unparsed_ids.push_back(argv[i]); + const std::string &key = argv[i - streams_cnt]; + } +} + +void AppendMessagesToRes(CmdRes &res, std::vector &id_messages, const DB* db) { + assert(db); + res.AppendArrayLenUint64(id_messages.size()); + for (auto &fv : id_messages) { + std::vector message; + if (!storage::StreamUtils::DeserializeMessage(fv.value, message)) { + LOG(ERROR) << "Deserialize message failed"; + res.SetRes(CmdRes::kErrOther, "Deserialize message failed"); + return; + } + + assert(message.size() % 2 == 0); + res.AppendArrayLen(2); + storage::streamID sid; + sid.DeserializeFrom(fv.field); + res.AppendString(sid.ToString()); // field here is the stream id + res.AppendArrayLenUint64(message.size()); + for (auto &m : message) { + res.AppendString(m); + } + } +} + +void XAddCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXAdd); + return; + } + key_ = argv_[1]; + + int idpos{-1}; + ParseAddOrTrimArgsOrReply(res_, argv_, args_, &idpos, true); + if (res_.ret() != CmdRes::kNone) { + return; + } else if (idpos < 0) { + LOG(ERROR) << "Invalid idpos: " << idpos; + res_.SetRes(CmdRes::kErrOther); + return; + } + + field_pos_ = idpos + 1; + if ((argv_.size() - field_pos_) % 2 == 1 || (argv_.size() - field_pos_) < 2) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXAdd); + return; + } +} + +void XAddCmd::Do() { + std::string message; + if (!storage::StreamUtils::SerializeMessage(argv_, message, field_pos_)) { + res_.SetRes(CmdRes::kErrOther, "Serialize message failed"); + return; + } + + auto s = db_->storage()->XAdd(key_, message, args_); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + // reset command's id in argvs if it not be given + if (!args_.id_given || !args_.seq_given) { + assert(field_pos_ > 0); + argv_[field_pos_ - 1] = args_.id.ToString(); + } + + res_.AppendString(args_.id.ToString()); + AddSlotKey("m", key_, db_); +} + +void XRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXRange); + return; + } + key_ = argv_[1]; + if (!storage::StreamUtils::StreamParseIntervalId(argv_[2], args_.start_sid, &args_.start_ex, 0) || + !storage::StreamUtils::StreamParseIntervalId(argv_[3], args_.end_sid, &args_.end_ex, UINT64_MAX)) { + res_.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + if (args_.start_ex && args_.start_sid.ms == UINT64_MAX && args_.start_sid.seq == UINT64_MAX) { + res_.SetRes(CmdRes::kInvalidParameter, "invalid start id"); + return; + } + if (args_.end_ex && args_.end_sid.ms == 0 && args_.end_sid.seq == 0) { + res_.SetRes(CmdRes::kInvalidParameter, "invalid end id"); + return; + } + if (argv_.size() == 6) { + if (!storage::StreamUtils::string2int32(argv_[5].c_str(), args_.limit)) { + res_.SetRes(CmdRes::kInvalidParameter, "COUNT should be a integer greater than 0 and not bigger than INT32_MAX"); + return; + } + } +} + +void XRangeCmd::Do() { + std::vector id_messages; + + if (args_.start_sid <= args_.end_sid) { + auto s = db_->storage()->XRange(key_, args_, id_messages); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } + AppendMessagesToRes(res_, id_messages, db_.get()); +} + +void XRevrangeCmd::Do() { + std::vector id_messages; + + if (args_.start_sid >= args_.end_sid) { + auto s = db_->storage()->XRevrange(key_, args_, id_messages); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + } + + AppendMessagesToRes(res_, id_messages, db_.get()); +} + +void XDelCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXAdd); + return; + } + + key_ = argv_[1]; + for (int i = 2; i < argv_.size(); i++) { + storage::streamID id; + if (!storage::StreamUtils::StreamParseStrictID(argv_[i], id, 0, nullptr)) { + res_.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return; + } + if (res_.ret() != CmdRes::kNone) { + return; + } + ids_.emplace_back(id); + } +} + +void XDelCmd::Do() { + int32_t count{0}; + auto s = db_->storage()->XDel(key_, ids_, count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } + + if (count > INT_MAX) { + return res_.SetRes(CmdRes::kErrOther, "count is larger than INT_MAX"); + } + + res_.AppendInteger(count); +} + +void XLenCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXLen); + return; + } + key_ = argv_[1]; +} + +void XLenCmd::Do() { + int32_t len{0}; + auto s = db_->storage()->XLen(key_, len); + if (s.IsNotFound()) { + res_.SetRes(CmdRes::kNotFound); + return; + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + if (len > INT_MAX) { + return res_.SetRes(CmdRes::kErrOther, "stream's length is larger than INT_MAX"); + } + + res_.AppendInteger(len); + return; +} + +void XReadCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXRead); + return; + } + + ParseReadOrReadGroupArgsOrReply(res_, argv_, args_, false); +} + +void XReadCmd::Do() { + std::vector> results; + // The wrong key will not trigger error, just be ignored, + // we need to save the right key,and return it to client. + std::vector reserved_keys; + auto s = db_->storage()->XRead(args_, results, reserved_keys); + + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else if (!s.ok() && s.ToString() == + "The > ID can be specified only when calling " + "XREADGROUP using the GROUP " + " option.") { + res_.SetRes(CmdRes::kSyntaxErr, s.ToString()); + } else if (!s.ok()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } + + if (results.empty()) { + res_.AppendArrayLen(-1); + return; + } + + assert(results.size() == reserved_keys.size()); + + // 2 do the scan + res_.AppendArrayLenUint64(results.size()); + for (size_t i = 0; i < results.size(); ++i) { + res_.AppendArrayLen(2); + res_.AppendString(reserved_keys[i]); + AppendMessagesToRes(res_, results[i], db_.get()); + } +} + +void XTrimCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXTrim); + return; + } + + key_ = argv_[1]; + ParseAddOrTrimArgsOrReply(res_, argv_, args_, nullptr, false); + if (res_.ret() != CmdRes::kNone) { + return; + } +} + +void XTrimCmd::Do() { + int32_t count{0}; + auto s = db_->storage()->XTrim(key_, args_, count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } + + if (count > INT_MAX) { + return res_.SetRes(CmdRes::kErrOther, "count is larger than INT_MAX"); + } + + res_.AppendInteger(count); + return; +} + +void XInfoCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameXInfo); + return; + } + + subcmd_ = argv_[1]; + key_ = argv_[2]; + if (!strcasecmp(subcmd_.c_str(), "STREAM")) { + if (argv_.size() > 3 && strcasecmp(subcmd_.c_str(), "FULL") != 0) { + is_full_ = true; + if (argv_.size() > 4 && !storage::StreamUtils::string2uint64(argv_[4].c_str(), count_)) { + res_.SetRes(CmdRes::kInvalidParameter, "invalid count"); + return; + } + } else if (argv_.size() > 3) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + + } else if (!strcasecmp(subcmd_.c_str(), "GROUPS")) { + if (argv_.size() != 3) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + cgroupname_ = argv_[3]; + + } else if (!strcasecmp(subcmd_.c_str(), "CONSUMERS")) { + if (argv_.size() != 4) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + cgroupname_ = argv_[3]; + consumername_ = argv_[4]; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void XInfoCmd::Do() { + if (!strcasecmp(subcmd_.c_str(), "STREAM")) { + this->StreamInfo(db_); + } else if (!strcasecmp(subcmd_.c_str(), "GROUPS")) { + // Korpse: TODO: + // this->GroupsInfo(slot); + } else if (!strcasecmp(subcmd_.c_str(), "CONSUMERS")) { + // Korpse: TODO: + // this->ConsumersInfo(slot); + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void XInfoCmd::StreamInfo(std::shared_ptr& db) { + storage::StreamInfoResult info; + auto s = db_->storage()->XInfo(key_, info); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s.ok() && !s.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + return; + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kNotFound); + return; + } + + // // 2 append the stream info + res_.AppendArrayLen(10); + res_.AppendString("length"); + res_.AppendInteger(static_cast(info.length)); + res_.AppendString("last-generated-id"); + res_.AppendString(info.last_id_str); + res_.AppendString("max-deleted-entry-id"); + res_.AppendString(info.max_deleted_entry_id_str); + res_.AppendString("entries-added"); + res_.AppendInteger(static_cast(info.entries_added)); + res_.AppendString("recorded-first-entry-id"); + res_.AppendString(info.first_id_str); +} diff --git a/tools/pika_migrate/src/pika_transaction.cc b/tools/pika_migrate/src/pika_transaction.cc new file mode 100644 index 0000000000..85381dcf8d --- /dev/null +++ b/tools/pika_migrate/src/pika_transaction.cc @@ -0,0 +1,313 @@ +// Copyright (c) 2018-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/pika_transaction.h" +#include "include/pika_admin.h" +#include "include/pika_client_conn.h" +#include "include/pika_define.h" +#include "include/pika_list.h" +#include "include/pika_rm.h" +#include "include/pika_server.h" +#include "src/pstd/include/scope_record_lock.h" + +extern std::unique_ptr g_pika_server; +extern std::unique_ptr g_pika_rm; + +void MultiCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (conn == nullptr || client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (client_conn->IsInTxn()) { + res_.SetRes(CmdRes::kErrOther, "MULTI calls can not be nested"); + return; + } + client_conn->SetTxnStartState(true); + res_.SetRes(CmdRes::kOk); +} + +void MultiCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } +} + +void ExecCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + std::vector res_vec = {}; + std::vector> resp_strs; + for (size_t i = 0; i < cmds_.size(); ++i) { + resp_strs.emplace_back(std::make_shared()); + } + auto resp_strs_iter = resp_strs.begin(); + std::for_each(cmds_.begin(), cmds_.end(), [&client_conn, &res_vec, &resp_strs_iter](CmdInfo& each_cmd_info) { + each_cmd_info.cmd_->SetResp(*resp_strs_iter++); + auto& cmd = each_cmd_info.cmd_; + auto& db = each_cmd_info.db_; + auto sync_db = each_cmd_info.sync_db_; + cmd->res() = {}; + if (cmd->name() == kCmdNameFlushall) { + auto flushall = std::dynamic_pointer_cast(cmd); + flushall->FlushAllWithoutLock(); + client_conn->SetTxnFailedIfKeyExists(); + } else if (cmd->name() == kCmdNameFlushdb) { + auto flushdb = std::dynamic_pointer_cast(cmd); + flushdb->DoWithoutLock(); + if (cmd->res().ok()) { + cmd->res().SetRes(CmdRes::kOk); + } + client_conn->SetTxnFailedIfKeyExists(each_cmd_info.db_->GetDBName()); + } else { + cmd->Do(); + if (cmd->res().ok() && cmd->is_write()) { + cmd->DoBinlog(); + auto db_keys = cmd->current_key(); + for (auto& item : db_keys) { + item = cmd->db_name().append(item); + } + if (cmd->IsNeedUpdateCache()) { + cmd->DoUpdateCache(); + } + client_conn->SetTxnFailedFromKeys(db_keys); + } + } + res_vec.emplace_back(cmd->res()); + }); + + res_.AppendArrayLen(res_vec.size()); + for (auto& r : res_vec) { + res_.AppendStringRaw(r.message()); + } +} + +void ExecCmd::Execute() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (!client_conn->IsInTxn()) { + res_.SetRes(CmdRes::kErrOther, "EXEC without MULTI"); + return; + } + if (IsTxnFailedAndSetState()) { + client_conn->ExitTxn(); + return; + } + SetCmdsVec(); + Lock(); + Do(); + + Unlock(); + ServeToBLrPopWithKeys(); + list_cmd_.clear(); + client_conn->ExitTxn(); +} + +void ExecCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } +} + +bool ExecCmd::IsTxnFailedAndSetState() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn->IsTxnInitFailed()) { + res_.SetRes(CmdRes::kTxnAbort, "Transaction discarded because of previous errors."); + return true; + } + if (client_conn->IsTxnWatchFailed()) { + res_.AppendStringLen(-1); + return true; + } + return false; +} + +void ExecCmd::Lock() { + g_pika_server->DBLockShared(); + std::for_each(lock_db_.begin(), lock_db_.end(), [](auto& need_lock_db) { + need_lock_db->DBLock(); + }); + if (is_lock_rm_dbs_) { + g_pika_rm->DBLock(); + } + + std::for_each(r_lock_dbs_.begin(), r_lock_dbs_.end(), [this](auto& need_lock_db) { + if (lock_db_keys_.count(need_lock_db) != 0) { + pstd::lock::MultiRecordLock record_lock(need_lock_db->LockMgr()); + record_lock.Lock(lock_db_keys_[need_lock_db]); + } + need_lock_db->DBLockShared(); + }); +} + +void ExecCmd::Unlock() { + std::for_each(r_lock_dbs_.begin(), r_lock_dbs_.end(), [this](auto& need_lock_db) { + if (lock_db_keys_.count(need_lock_db) != 0) { + pstd::lock::MultiRecordLock record_lock(need_lock_db->LockMgr()); + record_lock.Unlock(lock_db_keys_[need_lock_db]); + } + need_lock_db->DBUnlockShared(); + }); + if (is_lock_rm_dbs_) { + g_pika_rm->DBUnlock(); + } + std::for_each(lock_db_.begin(), lock_db_.end(), [](auto& need_lock_db) { + need_lock_db->DBUnlock(); + }); + g_pika_server->DBUnlockShared(); +} + +void ExecCmd::SetCmdsVec() { + auto client_conn = std::dynamic_pointer_cast(GetConn()); + auto cmd_que = client_conn->GetTxnCmdQue(); + + while (!cmd_que.empty()) { + auto cmd = cmd_que.front(); + auto cmd_db = client_conn->GetCurrentTable(); + auto db = g_pika_server->GetDB(cmd_db); + auto sync_db = g_pika_rm->GetSyncMasterDBByName(DBInfo(cmd->db_name())); + cmds_.emplace_back(cmd, db, sync_db); + if (cmd->name() == kCmdNameSelect) { + cmd->Do(); + } else if (cmd->name() == kCmdNameFlushdb) { + is_lock_rm_dbs_ = true; + lock_db_.emplace(g_pika_server->GetDB(cmd_db)); + } else if (cmd->name() == kCmdNameFlushall) { + is_lock_rm_dbs_ = true; + for (const auto& db_item : g_pika_server->GetDB()) { + lock_db_.emplace(db_item.second); + } + } else { + r_lock_dbs_.emplace(db); + if (lock_db_keys_.count(db) == 0) { + lock_db_keys_.emplace(db, std::vector{}); + } + auto cmd_keys = cmd->current_key(); + lock_db_keys_[db].insert(lock_db_keys_[db].end(), cmd_keys.begin(), cmd_keys.end()); + if (cmd->name() == kCmdNameLPush || cmd->name() == kCmdNameRPush) { + list_cmd_.insert(list_cmd_.end(), cmds_.back()); + } + } + cmd_que.pop(); + } +} + +void ExecCmd::ServeToBLrPopWithKeys() { + for (auto each_list_cmd : list_cmd_) { + auto push_keys = each_list_cmd.cmd_->current_key(); + //PS: currently, except for blpop/brpop, there are three cmds inherited from BlockingBaseCmd: lpush, rpush, rpoplpush + //For rpoplpush which has 2 keys(source and receiver), push_keys[0] fetchs the receiver, push_keys[1] fetchs the source.(see RpopLpushCmd::current_key() + auto push_key = push_keys[0]; + if (auto push_list_cmd = std::dynamic_pointer_cast(each_list_cmd.cmd_); + push_list_cmd != nullptr) { + push_list_cmd->TryToServeBLrPopWithThisKey(push_key, each_list_cmd.db_); + } + } +} + +void WatchCmd::Execute() { + Do(); +} + +void WatchCmd::Do() { + auto mp = std::map{}; + for (const auto& key : keys_) { + auto type_count = db_->storage()->IsExist(key, &mp); + if (type_count > 1) { + res_.SetRes(CmdRes::CmdRet::kErrOther, "EXEC WATCH watch key must be unique"); + return; + } + mp.clear(); + } + + + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (client_conn->IsInTxn()) { + res_.SetRes(CmdRes::CmdRet::kErrOther, "WATCH inside MULTI is not allowed"); + return; + } + client_conn->AddKeysToWatch(db_keys_); + res_.SetRes(CmdRes::kOk); +} + +void WatchCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } + size_t pos = 1; + while (pos < argv_.size()) { + keys_.emplace_back(argv_[pos]); + db_keys_.push_back(db_name() + "_" + argv_[pos++]); + } +} + +void UnwatchCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (client_conn->IsTxnExecing()) { + res_.SetRes(CmdRes::CmdRet::kOk); + return ; + } + client_conn->RemoveWatchedKeys(); + if (client_conn->IsTxnWatchFailed()) { + client_conn->SetTxnWatchFailState(false); + } + res_.SetRes(CmdRes::CmdRet::kOk); +} + +void UnwatchCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } +} + +void DiscardCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, name()); + return; + } +} + +void DiscardCmd::Do() { + auto conn = GetConn(); + auto client_conn = std::dynamic_pointer_cast(conn); + if (client_conn == nullptr) { + res_.SetRes(CmdRes::kErrOther, name()); + return; + } + if (!client_conn->IsInTxn()) { + res_.SetRes(CmdRes::kErrOther, "DISCARD without MULTI"); + return; + } + client_conn->ExitTxn(); + res_.SetRes(CmdRes::CmdRet::kOk); +} diff --git a/tools/pika_migrate/src/pika_zset.cc b/tools/pika_migrate/src/pika_zset.cc new file mode 100644 index 0000000000..6b62dbf93b --- /dev/null +++ b/tools/pika_migrate/src/pika_zset.cc @@ -0,0 +1,1544 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_zset.h" +#include "include/pika_slot_command.h" + +#include + +#include "pstd/include/pstd_string.h" +#include "include/pika_cache.h" + +void ZAddCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZAdd); + return; + } + size_t argc = argv_.size(); + if (argc % 2 == 1) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + key_ = argv_[1]; + score_members.clear(); + double score; + size_t index = 2; + for (; index < argc; index += 2) { + if (pstd::string2d(argv_[index].data(), argv_[index].size(), &score) == 0) { + res_.SetRes(CmdRes::kInvalidFloat); + return; + } + score_members.push_back({score, argv_[index + 1]}); + } +} + +void ZAddCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->ZAdd(key_, score_members, &count); + if (s_.ok()) { + res_.AppendInteger(count); + AddSlotKey("z", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZAddCmd::DoThroughDB() { + Do(); +} + +void ZAddCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZAddIfKeyExist(key_, score_members); + } +} + +void ZCardCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZCard); + return; + } + key_ = argv_[1]; +} + +void ZCardCmd::Do() { + int32_t card = 0; + s_ = db_->storage()->ZCard(key_, &card); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(card); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, "zcard error"); + } +} + +void ZCardCmd::ReadCache() { + res_.SetRes(CmdRes::kCacheMiss); +} + +void ZCardCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZCardCmd::DoUpdateCache() { + return; +} + +void ZScanCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZScan); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &cursor_) == 0) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZScan); + return; + } + size_t argc = argv_.size(); + size_t index = 3; + while (index < argc) { + std::string opt = argv_[index]; + if ((strcasecmp(opt.data(), "match") == 0) || (strcasecmp(opt.data(), "count") == 0)) { + index++; + if (index >= argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(opt.data(), "match") == 0) { + pattern_ = argv_[index]; + } else if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } + if (count_ < 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } +} + +void ZScanCmd::Do() { + int64_t next_cursor = 0; + std::vector score_members; + rocksdb::Status s = db_->storage()->ZScan(key_, cursor_, pattern_, count_, &score_members, &next_cursor); + if (s.ok() || s.IsNotFound()) { + res_.AppendContent("*2"); + char buf[32]; + int64_t len = pstd::ll2string(buf, sizeof(buf), next_cursor); + res_.AppendStringLen(len); + res_.AppendContent(buf); + + res_.AppendArrayLenUint64(score_members.size() * 2); + for (const auto& score_member : score_members) { + res_.AppendString(score_member.member); + + len = pstd::d2string(buf, sizeof(buf), score_member.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZIncrbyCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZIncrby); + return; + } + key_ = argv_[1]; + if (pstd::string2d(argv_[2].data(), argv_[2].size(), &by_) == 0) { + res_.SetRes(CmdRes::kInvalidFloat); + return; + } + member_ = argv_[3]; +} + +void ZIncrbyCmd::Do() { + double score = 0.0; + rocksdb::Status s = db_->storage()->ZIncrby(key_, member_, by_, &score); + if (s.ok()) { + score_ = score; + char buf[32]; + int64_t len = pstd::d2string(buf, sizeof(buf), score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + AddSlotKey("z", key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZIncrbyCmd::DoThroughDB() { + Do(); +} + +void ZIncrbyCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZIncrbyIfKeyExist(key_, member_, by_, this, db_); + } +} + +void ZsetRangeParentCmd::DoInitial() { + if (argv_.size() == 5 && (strcasecmp(argv_[4].data(), "withscores") == 0)) { + is_ws_ = true; + } else if (argv_.size() != 4) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &stop_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void ZRangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRange); + return; + } + ZsetRangeParentCmd::DoInitial(); +} + +void ZRangeCmd::Do() { + std::vector score_members; + s_ = db_->storage()->ZRange(key_, static_cast(start_), static_cast(stop_), &score_members); + if (s_.ok() || s_.IsNotFound()) { + if (is_ws_) { + char buf[32]; + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLenUint64(score_members.size()); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRangeCmd::ReadCache() { + std::vector score_members; + auto s = db_->cache()->ZRange(key_, start_, stop_, &score_members, db_); + if (s.ok()) { + if (is_ws_) { + char buf[32]; + int64_t len; + res_.AppendArrayLen(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendStringLen(sm.member.size()); + res_.AppendContent(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(score_members.size()); + for (const auto& sm : score_members) { + res_.AppendStringLen(sm.member.size()); + res_.AppendContent(sm.member); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } + return; +} + +void ZRangeCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZRevrangeCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrange); + return; + } + ZsetRangeParentCmd::DoInitial(); +} + +void ZRevrangeCmd::Do() { + std::vector score_members; + s_ = db_->storage()->ZRevrange(key_, static_cast(start_), static_cast(stop_), &score_members); + if (s_.ok() || s_.IsNotFound()) { + if (is_ws_) { + char buf[32]; + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLenUint64(score_members.size()); + for (const auto& sm : score_members) { + res_.AppendStringLenUint64(sm.member.size()); + res_.AppendContent(sm.member); + } + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRevrangeCmd::ReadCache() { + std::vector score_members; + auto s = db_->cache()->ZRevrange(key_, start_, stop_, &score_members, db_); + + if (s.ok()) { + if (is_ws_) { + char buf[32]; + int64_t len; + res_.AppendArrayLen(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendStringLen(sm.member.size()); + res_.AppendContent(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(score_members.size()); + for (const auto& sm : score_members) { + res_.AppendStringLen(sm.member.size()); + res_.AppendContent(sm.member); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } + return; +} + +void ZRevrangeCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrangeCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +int32_t DoScoreStrRange(std::string begin_score, std::string end_score, bool* left_close, bool* right_close, + double* min_score, double* max_score) { + if (!begin_score.empty() && begin_score.at(0) == '(') { + *left_close = false; + begin_score.erase(begin_score.begin()); + } + if (begin_score == "-inf") { + *min_score = storage::ZSET_SCORE_MIN; + } else if (begin_score == "inf" || begin_score == "+inf") { + *min_score = storage::ZSET_SCORE_MAX; + } else if (pstd::string2d(begin_score.data(), begin_score.size(), min_score) == 0) { + return -1; + } + + if (!end_score.empty() && end_score.at(0) == '(') { + *right_close = false; + end_score.erase(end_score.begin()); + } + if (end_score == "+inf" || end_score == "inf") { + *max_score = storage::ZSET_SCORE_MAX; + } else if (end_score == "-inf") { + *max_score = storage::ZSET_SCORE_MIN; + } else if (pstd::string2d(end_score.data(), end_score.size(), max_score) == 0) { + return -1; + } + return 0; +} + +static void FitLimit(int64_t& count, int64_t& offset, const int64_t size) { + count = count >= 0 ? count : size; + offset = (offset >= 0 && offset < size) ? offset : size; + count = (offset + count < size) ? count : size - offset; +} + +void ZsetRangebyscoreParentCmd::DoInitial() { + key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; + int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); + return; + } + size_t argc = argv_.size(); + if (argc < 5) { + return; + } + size_t index = 4; + while (index < argc) { + if (strcasecmp(argv_[index].data(), "withscores") == 0) { + with_scores_ = true; + } else if (strcasecmp(argv_[index].data(), "limit") == 0) { + if (index + 3 > argc) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + if (pstd::string2int(argv_[index].data(), argv_[index].size(), &offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + index++; + if (pstd::string2int(argv_[index].data(), argv_[index].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } +} + +void ZRangebyscoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRangebyscore); + return; + } + ZsetRangebyscoreParentCmd::DoInitial(); +} + +void ZRangebyscoreCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent("*0"); + return; + } + std::vector score_members; + s_ = db_->storage()->ZRangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + FitLimit(count_, offset_, static_cast(score_members.size())); + size_t index = offset_; + size_t end = offset_ + count_; + if (with_scores_) { + char buf[32]; + int64_t len; + res_.AppendArrayLen(count_ * 2); + for (; index < end; index++) { + res_.AppendStringLenUint64(score_members[index].member.size()); + res_.AppendContent(score_members[index].member); + len = pstd::d2string(buf, sizeof(buf), score_members[index].score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(count_); + for (; index < end; index++) { + res_.AppendStringLenUint64(score_members[index].member.size()); + res_.AppendContent(score_members[index].member); + } + } +} + +void ZRangebyscoreCmd::ReadCache() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent("*0"); + return; + } + + std::vector score_members; + min_ = std::to_string(min_score_); + max_ = std::to_string(max_score_); + auto s = db_->cache()->ZRangebyscore(key_, min_, max_, &score_members, this); + if (s.ok()) { + auto sm_count = score_members.size(); + if (with_scores_) { + char buf[32]; + int64_t len; + res_.AppendArrayLen(sm_count * 2); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + len = pstd::d2string(buf, sizeof(buf), item.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(sm_count); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRangebyscoreCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRangebyscoreCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZRevrangebyscoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrangebyscore); + return; + } + ZsetRangebyscoreParentCmd::DoInitial(); + double tmp_score = 0.0; + tmp_score = min_score_; + min_score_ = max_score_; + max_score_ = tmp_score; + + bool tmp_close = false; + tmp_close = left_close_; + left_close_ = right_close_; + right_close_ = tmp_close; +} + +void ZRevrangebyscoreCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent("*0"); + return; + } + std::vector score_members; + s_ = db_->storage()->ZRevrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &score_members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + FitLimit(count_, offset_, static_cast(score_members.size())); + int64_t index = offset_; + int64_t end = offset_ + count_; + if (with_scores_) { + char buf[32]; + int64_t len = 0; + res_.AppendArrayLen(count_ * 2); + for (; index < end; index++) { + res_.AppendStringLenUint64(score_members[index].member.size()); + res_.AppendContent(score_members[index].member); + len = pstd::d2string(buf, sizeof(buf), score_members[index].score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(count_); + for (; index < end; index++) { + res_.AppendStringLenUint64(score_members[index].member.size()); + res_.AppendContent(score_members[index].member); + } + } +} + +void ZRevrangebyscoreCmd::ReadCache() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN + || max_score_ < min_score_) { + res_.AppendContent("*0"); + return; + } + std::vector score_members; + auto s = db_->cache()->ZRevrangebyscore(key_, min_, max_, &score_members, this, db_); + if (s.ok()) { + auto sm_count = score_members.size(); + if (with_scores_) { + char buf[32]; + int64_t len; + res_.AppendArrayLen(sm_count * 2); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + len = pstd::d2string(buf, sizeof(buf), item.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else { + res_.AppendArrayLen(sm_count); + for (auto& item : score_members) { + res_.AppendStringLen(item.member.size()); + res_.AppendContent(item.member); + } + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRevrangebyscoreCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrangebyscoreCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZCountCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZCount); + return; + } + key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; + int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); + return; + } +} + +void ZCountCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent("*0"); + return; + } + + int32_t count = 0; + s_ = db_->storage()->ZCount(key_, min_score_, max_score_, left_close_, right_close_, &count); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZCountCmd::ReadCache() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent("*0"); + return; + } + uint64_t count = 0; + auto s = db_->cache()->ZCount(key_, min_, max_, &count, this); + if (s.ok()) { + res_.AppendInteger(count); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZCountCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZCountCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZRemCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRem); + return; + } + key_ = argv_[1]; + auto iter = argv_.begin() + 2; + members_.assign(iter, argv_.end()); +} + +void ZRemCmd::Do() { + s_ = db_->storage()->ZRem(key_, members_, &deleted_); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(deleted_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRemCmd::DoThroughDB() { + Do(); +} + +void ZRemCmd::DoUpdateCache() { + if (s_.ok() && deleted_ > 0) { + db_->cache()->ZRem(key_, members_, db_); + } +} + +void ZsetUIstoreParentCmd::DoInitial() { + dest_key_ = argv_[1]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &num_keys_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (num_keys_ < 1) { + res_.SetRes(CmdRes::kErrOther, "at least 1 input key is needed for ZUNIONSTORE/ZINTERSTORE"); + return; + } + auto argc = argv_.size(); + if (argc < num_keys_ + 3) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + keys_.assign(argv_.begin() + 3, argv_.begin() + 3 + num_keys_); + weights_.assign(num_keys_, 1); + auto index = num_keys_ + 3; + while (index < argc) { + if (strcasecmp(argv_[index].data(), "weights") == 0) { + index++; + if (argc < index + num_keys_) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + double weight; + auto base = index; + for (; index < base + num_keys_; index++) { + if (pstd::string2d(argv_[index].data(), argv_[index].size(), &weight) == 0) { + res_.SetRes(CmdRes::kErrOther, "weight value is not a float"); + return; + } + weights_[index - base] = weight; + } + } else if (strcasecmp(argv_[index].data(), "aggregate") == 0) { + index++; + if (argc < index + 1) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (strcasecmp(argv_[index].data(), "sum") == 0) { + aggregate_ = storage::SUM; + } else if (strcasecmp(argv_[index].data(), "min") == 0) { + aggregate_ = storage::MIN; + } else if (strcasecmp(argv_[index].data(), "max") == 0) { + aggregate_ = storage::MAX; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + index++; + } else { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + } +} + +void ZUnionstoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZUnionstore); + return; + } + ZsetUIstoreParentCmd::DoInitial(); +} + +void ZUnionstoreCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->ZUnionstore(dest_key_, keys_, weights_, aggregate_, value_to_dest_, &count); + if (s_.ok()) { + res_.AppendInteger(count); + AddSlotKey("z", dest_key_, db_); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZUnionstoreCmd::DoThroughDB() { + Do(); +} + +void ZUnionstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void ZUnionstoreCmd::DoBinlog() { + PikaCmdArgsType del_args; + del_args.emplace_back("del"); + del_args.emplace_back(dest_key_); + std::shared_ptr del_cmd = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB); + del_cmd->Initial(del_args, db_name_); + del_cmd->SetConn(GetConn()); + del_cmd->SetResp(resp_.lock()); + del_cmd->DoBinlog(); + + if (value_to_dest_.empty()) { + // The union operation got an empty set, only use del to simulate overwrite the dest_key with empty set + return; + } + + PikaCmdArgsType initial_args; + initial_args.emplace_back("zadd"); + initial_args.emplace_back(dest_key_); + auto first_pair = value_to_dest_.begin(); + char buf[32]; + int64_t d_len = pstd::d2string(buf, sizeof(buf), first_pair->second); + initial_args.emplace_back(buf); + initial_args.emplace_back(first_pair->first); + value_to_dest_.erase(value_to_dest_.begin()); + zadd_cmd_->Initial(initial_args, db_name_); + zadd_cmd_->SetConn(GetConn()); + zadd_cmd_->SetResp(resp_.lock()); + + auto& zadd_argv = zadd_cmd_->argv(); + size_t data_size = d_len + zadd_argv[3].size(); + constexpr size_t kDataSize = 131072; //128KB + for (const auto& it : value_to_dest_) { + if (data_size >= kDataSize) { + // If the binlog has reached the size of 128KB. (131,072 bytes = 128KB) + zadd_cmd_->DoBinlog(); + zadd_argv.clear(); + zadd_argv.emplace_back("zadd"); + zadd_argv.emplace_back(dest_key_); + data_size = 0; + } + d_len = pstd::d2string(buf, sizeof(buf), it.second); + zadd_argv.emplace_back(buf); + zadd_argv.emplace_back(it.first); + data_size += (d_len + it.first.size()); + } + zadd_cmd_->DoBinlog(); +} + +void ZInterstoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZInterstore); + return; + } + ZsetUIstoreParentCmd::DoInitial(); +} + +void ZInterstoreCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->ZInterstore(dest_key_, keys_, weights_, aggregate_, value_to_dest_, &count); + if (s_.ok()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZInterstoreCmd::DoThroughDB() { + Do(); +} + +void ZInterstoreCmd::DoUpdateCache() { + if (s_.ok()) { + std::vector v; + v.emplace_back(dest_key_); + db_->cache()->Del(v); + } +} + +void ZInterstoreCmd::DoBinlog() { + PikaCmdArgsType del_args; + del_args.emplace_back("del"); + del_args.emplace_back(dest_key_); + std::shared_ptr del_cmd = std::make_unique(kCmdNameDel, -2, kCmdFlagsWrite | kCmdFlagsKv | kCmdFlagsDoThroughDB); + del_cmd->Initial(del_args, db_name_); + del_cmd->SetConn(GetConn()); + del_cmd->SetResp(resp_.lock()); + del_cmd->DoBinlog(); + + if (value_to_dest_.size() == 0) { + //The inter operation got an empty set, just exec del to simulate overwrite an empty set to dest_key + return; + } + + PikaCmdArgsType initial_args; + initial_args.emplace_back("zadd"); + initial_args.emplace_back(dest_key_); + char buf[32]; + int64_t d_len = pstd::d2string(buf, sizeof(buf), value_to_dest_[0].score); + initial_args.emplace_back(buf); + initial_args.emplace_back(value_to_dest_[0].member); + zadd_cmd_->Initial(initial_args, db_name_); + zadd_cmd_->SetConn(GetConn()); + zadd_cmd_->SetResp(resp_.lock()); + + auto& zadd_argv = zadd_cmd_->argv(); + size_t data_size = d_len + value_to_dest_[0].member.size(); + constexpr size_t kDataSize = 131072; //128KB + for (size_t i = 1; i < value_to_dest_.size(); i++) { + if (data_size >= kDataSize) { + // If the binlog has reached the size of 128KB. (131,072 bytes = 128KB) + zadd_cmd_->DoBinlog(); + zadd_argv.clear(); + zadd_argv.emplace_back("zadd"); + zadd_argv.emplace_back(dest_key_); + data_size = 0; + } + d_len = pstd::d2string(buf, sizeof(buf), value_to_dest_[i].score); + zadd_argv.emplace_back(buf); + zadd_argv.emplace_back(value_to_dest_[i].member); + data_size += (value_to_dest_[i].member.size() + d_len); + } + zadd_cmd_->DoBinlog(); +} + +void ZsetRankParentCmd::DoInitial() { + key_ = argv_[1]; + member_ = argv_[2]; +} + +void ZRankCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRank); + return; + } + ZsetRankParentCmd::DoInitial(); +} + +void ZRankCmd::Do() { + int32_t rank = 0; + s_ = db_->storage()->ZRank(key_, member_, &rank); + if (s_.ok()) { + res_.AppendInteger(rank); + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRankCmd::ReadCache() { + int64_t rank = 0; + auto s = db_->cache()->ZRank(key_, member_, &rank, db_); + if (s.ok()) { + res_.AppendInteger(rank); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRankCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRankCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZRevrankCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrank); + return; + } + ZsetRankParentCmd::DoInitial(); +} + +void ZRevrankCmd::Do() { + int32_t revrank = 0; + s_ = db_->storage()->ZRevrank(key_, member_, &revrank); + if (s_.ok()) { + res_.AppendInteger(revrank); + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRevrankCmd::ReadCache() { + int64_t revrank = 0; + auto s = db_->cache()->ZRevrank(key_, member_, &revrank, db_); + if (s.ok()) { + res_.AppendInteger(revrank); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRevrankCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrankCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZScoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZScore); + return; + } + key_ = argv_[1]; + member_ = argv_[2]; +} + +void ZScoreCmd::Do() { + double score = 0.0; + s_ = db_->storage()->ZScore(key_, member_, &score); + if (s_.ok()) { + char buf[32]; + int64_t len = pstd::d2string(buf, sizeof(buf), score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } else if (s_.IsNotFound()) { + res_.AppendContent("$-1"); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZScoreCmd::ReadCache() { + double score = 0.0; + auto s = db_->cache()->ZScore(key_, member_, &score, db_); + if (s.ok()) { + char buf[32]; + int64_t len = pstd::d2string(buf, sizeof(buf), score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZScoreCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZScoreCmd::DoUpdateCache() { + return; +} + +static int32_t DoMemberRange(const std::string& raw_min_member, const std::string& raw_max_member, bool* left_close, + bool* right_close, std::string* min_member, std::string* max_member) { + if (raw_min_member == "-") { + *min_member = "-"; + } else if (raw_min_member == "+") { + *min_member = "+"; + } else { + if (!raw_min_member.empty() && raw_min_member.at(0) == '(') { + *left_close = false; + } else if (!raw_min_member.empty() && raw_min_member.at(0) == '[') { + *left_close = true; + } else { + return -1; + } + min_member->assign(raw_min_member.begin() + 1, raw_min_member.end()); + } + + if (raw_max_member == "+") { + *max_member = "+"; + } else if (raw_max_member == "-") { + *max_member = "-"; + } else { + if (!raw_max_member.empty() && raw_max_member.at(0) == '(') { + *right_close = false; + } else if (!raw_max_member.empty() && raw_max_member.at(0) == '[') { + *right_close = true; + } else { + return -1; + } + max_member->assign(raw_max_member.begin() + 1, raw_max_member.end()); + } + return 0; +} + +void ZsetRangebylexParentCmd::DoInitial() { + key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; + int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); + return; + } + size_t argc = argv_.size(); + if (argc == 4) { + return; + } else if (argc != 7 || strcasecmp(argv_[4].data(), "limit") != 0) { + res_.SetRes(CmdRes::kSyntaxErr); + return; + } + if (pstd::string2int(argv_[5].data(), argv_[5].size(), &offset_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::string2int(argv_[6].data(), argv_[6].size(), &count_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void ZRangebylexCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRangebylex); + return; + } + ZsetRangebylexParentCmd::DoInitial(); +} + +void ZRangebylexCmd::Do() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent("*0"); + return; + } + std::vector members; + s_ = db_->storage()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + FitLimit(count_, offset_, static_cast(members.size())); + + res_.AppendArrayLen(count_); + size_t index = offset_; + size_t end = offset_ + count_; + for (; index < end; index++) { + res_.AppendStringLenUint64(members[index].size()); + res_.AppendContent(members[index]); + } +} + +void ZRangebylexCmd::ReadCache() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent("*0"); + return; + } + std::vector members; + auto s = db_->cache()->ZRangebylex(key_, min_, max_, &members, db_); + if (s.ok()) { + FitLimit(count_, offset_, members.size()); + + res_.AppendArrayLen(count_); + size_t index = offset_; + size_t end = offset_ + count_; + for (; index < end; index++) { + res_.AppendStringLen(members[index].size()); + res_.AppendContent(members[index]); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRangebylexCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRangebylexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZRevrangebylexCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRevrangebylex); + return; + } + ZsetRangebylexParentCmd::DoInitial(); + + std::string tmp_s; + tmp_s = min_member_; + min_member_ = max_member_; + max_member_ = tmp_s; + + bool tmp_b = false; + tmp_b = left_close_; + left_close_ = right_close_; + right_close_ = tmp_b; +} + +void ZRevrangebylexCmd::Do() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent("*0"); + return; + } + std::vector members; + s_ = db_->storage()->ZRangebylex(key_, min_member_, max_member_, left_close_, right_close_, &members); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } + if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + FitLimit(count_, offset_, static_cast(members.size())); + + res_.AppendArrayLen(count_); + int64_t index = static_cast(members.size()) - 1 - offset_; + int64_t end = index - count_; + for (; index > end; index--) { + res_.AppendStringLenUint64(members[index].size()); + res_.AppendContent(members[index]); + } +} + +void ZRevrangebylexCmd::ReadCache() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent("*0"); + return; + } + std::vector members; + auto s = db_->cache()->ZRevrangebylex(key_, min_, max_, &members, db_); + if (s.ok()) { + auto size = count_ < members.size() ? count_ : members.size(); + res_.AppendArrayLen(static_cast(size)); + for (int i = 0; i < size; ++i) { + res_.AppendString(members[i]); + } + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZRevrangebylexCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZRevrangebylexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZLexcountCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZLexcount); + return; + } + key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; + int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); + return; + } +} + +void ZLexcountCmd::Do() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent(":0"); + return; + } + int32_t count = 0; + s_ = db_->storage()->ZLexcount(key_, min_member_, max_member_, left_close_, right_close_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + res_.AppendInteger(count); +} + +void ZLexcountCmd::ReadCache() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent(":0"); + return; + } + uint64_t count = 0; + auto s = db_->cache()->ZLexcount(key_, min_, max_, &count, db_); + if (s.ok()) { + res_.AppendInteger(count); + } else if (s.IsNotFound()) { + res_.SetRes(CmdRes::kCacheMiss); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZLexcountCmd::DoThroughDB() { + res_.clear(); + Do(); +} + +void ZLexcountCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->PushKeyToAsyncLoadQueue(PIKA_KEY_TYPE_ZSET, key_, db_); + } +} + +void ZRemrangebyrankCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRemrangebyrank); + return; + } + key_ = argv_[1]; + min_ = argv_[2]; + max_ = argv_[3]; + if (pstd::string2int(argv_[2].data(), argv_[2].size(), &start_rank_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } + if (pstd::string2int(argv_[3].data(), argv_[3].size(), &stop_rank_) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + return; + } +} + +void ZRemrangebyrankCmd::Do() { + int32_t count = 0; + s_ = db_->storage()->ZRemrangebyrank(key_, static_cast(start_rank_), static_cast(stop_rank_), &count); + if (s_.ok() || s_.IsNotFound()) { + res_.AppendInteger(count); + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + } +} + +void ZRemrangebyrankCmd::DoThroughDB() { + Do(); +} + +void ZRemrangebyrankCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZRemrangebyrank(key_, min_, max_, ele_deleted_, db_); + } +} + +void ZRemrangebyscoreCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRemrangebyscore); + return; + } + key_ = argv_[1]; + int32_t ret = DoScoreStrRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_score_, &max_score_); + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "min or max is not a float"); + return; + } +} + +void ZRemrangebyscoreCmd::Do() { + if (min_score_ == storage::ZSET_SCORE_MAX || max_score_ == storage::ZSET_SCORE_MIN) { + res_.AppendContent(":0"); + return; + } + int32_t count = 0; + s_ = db_->storage()->ZRemrangebyscore(key_, min_score_, max_score_, left_close_, right_close_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + res_.AppendInteger(count); +} + +void ZRemrangebyscoreCmd::DoThroughDB() { + Do(); +} + +void ZRemrangebyscoreCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZRemrangebyscore(key_, min_, max_, db_); + } +} + +void ZRemrangebylexCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZRemrangebylex); + return; + } + key_ = argv_[1]; + int32_t ret = DoMemberRange(argv_[2], argv_[3], &left_close_, &right_close_, &min_member_, &max_member_); + if (ret == -1) { + res_.SetRes(CmdRes::kErrOther, "min or max not valid string range item"); + return; + } +} + +void ZRemrangebylexCmd::Do() { + if (min_member_ == "+" || max_member_ == "-") { + res_.AppendContent("*0"); + return; + } + int32_t count = 0; + + s_ = db_->storage()->ZRemrangebylex(key_, min_member_, max_member_, left_close_, right_close_, &count); + if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + return; + } else if (!s_.ok() && !s_.IsNotFound()) { + res_.SetRes(CmdRes::kErrOther, s_.ToString()); + return; + } + res_.AppendInteger(count); +} + +void ZRemrangebylexCmd::DoThroughDB() { + Do(); +} + +void ZRemrangebylexCmd::DoUpdateCache() { + if (s_.ok()) { + db_->cache()->ZRemrangebylex(key_, min_, max_, db_); + } +} + +void ZPopmaxCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmax); + return; + } + key_ = argv_[1]; + count_ = 1; + if (argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmax); + } else if (argv_.size() == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), static_cast(&count_)) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } + } +} + +void ZPopmaxCmd::Do() { + std::vector score_members; + rocksdb::Status s = db_->storage()->ZPopMax(key_, count_, &score_members); + if (s.ok() || s.IsNotFound()) { + char buf[32]; + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendString(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} + +void ZPopminCmd::DoInitial() { + if (!CheckArg(argv_.size())) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmin); + return; + } + key_ = argv_[1]; + count_ = 1; + if (argv_.size() > 3) { + res_.SetRes(CmdRes::kWrongNum, kCmdNameZPopmin); + } else if (argv_.size() == 3) { + if (pstd::string2int(argv_[2].data(), argv_[2].size(), static_cast(&count_)) == 0) { + res_.SetRes(CmdRes::kInvalidInt); + } + } +} + +void ZPopminCmd::Do() { + std::vector score_members; + rocksdb::Status s = db_->storage()->ZPopMin(key_, count_, &score_members); + if (s.ok() || s.IsNotFound()) { + char buf[32]; + int64_t len = 0; + res_.AppendArrayLenUint64(score_members.size() * 2); + for (const auto& sm : score_members) { + res_.AppendString(sm.member); + len = pstd::d2string(buf, sizeof(buf), sm.score); + res_.AppendStringLen(len); + res_.AppendContent(buf); + } + } else if (s_.IsInvalidArgument()) { + res_.SetRes(CmdRes::kMultiKey); + } else { + res_.SetRes(CmdRes::kErrOther, s.ToString()); + } +} diff --git a/tools/pika_migrate/src/pstd/CMakeLists.txt b/tools/pika_migrate/src/pstd/CMakeLists.txt new file mode 100644 index 0000000000..306e2cc518 --- /dev/null +++ b/tools/pika_migrate/src/pstd/CMakeLists.txt @@ -0,0 +1,58 @@ +cmake_minimum_required(VERSION 3.18) + +set (CMAKE_CXX_STANDARD 17) +project (pstd) + +# 强制使用用户自定的 memcmp +add_compile_options("-fno-builtin-memcmp -pipe") + + +set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_HOST_SYSTEM_PROCESSOR}) +if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") + add_compile_options(-msse) +endif() + +add_compile_options(-W -Wextra -Wall -Wsign-compare) +add_compile_options(-Wno-unused-parameter -Wno-redundant-decls -Wwrite-strings) +add_compile_options(-Wpointer-arith -Wreorder -Wswitch -Wsign-promo -Woverloaded-virtual) +add_compile_options(-Wnon-virtual-dtor -Wno-missing-field-initializers) + +set(DISABLE_WARNING_AS_ERROR ON) +if(NOT DISABLE_WARNING_AS_ERROR) + add_compile_options(-Werror) +endif() + + +add_subdirectory(tests) +add_subdirectory(examples) + +aux_source_directory(./src DIR_SRCS) + +add_library(pstd STATIC ${DIR_SRCS}) + +add_dependencies(pstd + rocksdb + glog + gflags + fmt +) + +target_include_directories(pstd + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${PROJECT_SOURCE_DIR}/include + ${INSTALL_INCLUDEDIR} +) + +set(PSTD_DEP_LIBS ${GLOG_LIBRARY} ${GFLAGS_LIBRARY} ${FMT_LIBRARY} ${LIBUNWIND_LIBRARY}) + +if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + if(CMAKE_CXX_COMPILER_VERSION LESS 9) + list(APPEND PSTD_DEP_LIBS stdc++fs) + endif() +elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + if(CMAKE_CXX_COMPILER_VERSION LESS 9) + list(APPEND PSTD_DEP_LIBS c++fs) + endif() +endif() + +target_link_libraries(pstd PUBLIC ${PSTD_DEP_LIBS}) diff --git a/tools/pika_migrate/src/pstd/doc.h b/tools/pika_migrate/src/pstd/doc.h new file mode 100644 index 0000000000..08319542ad --- /dev/null +++ b/tools/pika_migrate/src/pstd/doc.h @@ -0,0 +1,6 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// pstd, typically as ``pika standard'', the sdk for pika. diff --git a/tools/pika_migrate/src/pstd/examples/CMakeLists.txt b/tools/pika_migrate/src/pstd/examples/CMakeLists.txt new file mode 100644 index 0000000000..4aeab08f12 --- /dev/null +++ b/tools/pika_migrate/src/pstd/examples/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required (VERSION 3.18) + +aux_source_directory(../src DIR_SRCS) +set(CMAKE_CXX_STANDARD 17) + +file(GLOB_RECURSE PSTD_EXAMPLES_SOURCE "${PROJECT_SOURCE_DIR}/examples/*.cc") + + +foreach(pstd_example_source ${PSTD_EXAMPLES_SOURCE}) +get_filename_component(pstd_example_filename ${pstd_example_source} NAME) + string(REPLACE ".cc" "" pstd_example_name ${pstd_example_filename}) + + add_executable(${pstd_example_name} ${pstd_example_source}) + target_include_directories(${pstd_example_name} + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + add_dependencies(${pstd_example_name} pstd glog gflags ${LIBUNWIND_NAME}) + + target_link_libraries(${pstd_example_name} + PUBLIC storage + PUBLIC pstd + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + PUBLIC pthread + ) +endforeach() diff --git a/tools/pika_migrate/src/pstd/examples/conf_example.cc b/tools/pika_migrate/src/pstd/examples/conf_example.cc new file mode 100644 index 0000000000..8c8f17c06e --- /dev/null +++ b/tools/pika_migrate/src/pstd/examples/conf_example.cc @@ -0,0 +1,22 @@ +#include "pstd/include/base_conf.h" +#include "pstd/include/xdebug.h" + +using namespace pstd; + +int main() { + BaseConf b("./conf/pika.conf"); + + if (b.LoadConf() == 0) { + log_info("LoadConf true"); + } else { + log_info("LoodConf error"); + } + + b.SetConfInt("port", 99999); + b.SetConfStr("pidfile", "./anan.pid"); + b.WriteBack(); + b.DumpConf(); + b.WriteSampleConf(); + + return 0; +} diff --git a/tools/pika_migrate/src/pstd/examples/hash_example.cc b/tools/pika_migrate/src/pstd/examples/hash_example.cc new file mode 100644 index 0000000000..114a99c63e --- /dev/null +++ b/tools/pika_migrate/src/pstd/examples/hash_example.cc @@ -0,0 +1,20 @@ +#include +#include + +#include "pstd/include/pstd_hash.h" + +using namespace pstd; +int main() { + std::string input = "grape"; + std::string output1 = sha256(input); + std::string output2 = md5(input); + + std::cout << "sha256('" << input << "'): " << output1 << std::endl; + std::cout << "md5('" << input << "'): " << output2 << std::endl; + + std::cout << "input is Sha256 " << isSha256(input) << std::endl; + + std::cout << "output1 is Sha256 " << isSha256(output1) << std::endl; + + return 0; +} diff --git a/tools/pika_migrate/src/pstd/include/base_conf.h b/tools/pika_migrate/src/pstd/include/base_conf.h new file mode 100644 index 0000000000..d89d27fb31 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/base_conf.h @@ -0,0 +1,82 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __PSTD_INCLUDE_BASE_CONF_H__ +#define __PSTD_INCLUDE_BASE_CONF_H__ + +#include +#include + +#include +#include +#include +#include + +#include "pstd/include/pstd_define.h" + +namespace pstd { + +class BaseConf { + public: + struct Rep { + std::string path; + enum ConfType { + kConf = 0, + kComment = 1, + }; + + struct ConfItem { + ConfType type; // 0 means conf, 1 means comment + std::string name; + std::string value; + ConfItem(ConfType t, std::string v) : type(t), value(std::move(v)) {} + ConfItem(ConfType t, std::string n, std::string v) : type(t), name(std::move(n)), value(std::move(v)) {} + }; + + explicit Rep(std::string p) : path(std::move(p)) {} + std::vector item; + }; + + explicit BaseConf(const std::string& path); + virtual ~BaseConf(); + + int LoadConf(); + int32_t ReloadConf(); + + // return false if the item dosen't exist + bool GetConfInt(const std::string& name, int* value) const; + bool GetConfIntHuman(const std::string& name, int* value) const; + bool GetConfInt64(const std::string& name, int64_t* value) const; + bool GetConfInt64Human(const std::string& name, int64_t* value) const; + + bool GetConfStr(const std::string& name, std::string* value) const; + bool GetConfBool(const std::string& name, bool* value) const; + bool GetConfStrVec(const std::string& name, std::vector* value) const; + bool GetConfDouble(const std::string& name, double* value) const; + bool GetConfStrMulti(const std::string& name, std::vector* values) const; + + bool SetConfInt(const std::string& name, int value); + bool SetConfInt64(const std::string& name, int64_t value); + + bool SetConfStr(const std::string& name, const std::string& value); + bool SetConfBool(const std::string& name, bool value); + bool SetConfStrVec(const std::string& name, const std::vector& value); + bool SetConfDouble(const std::string& name, double value); + + bool CheckConfExist(const std::string& name) const; + + void DumpConf() const; + bool WriteBack(); + void WriteSampleConf() const; + + void PushConfItem(const Rep::ConfItem& item); + + private: + std::unique_ptr rep_; +}; + +} // namespace pstd + +#endif // __PSTD_INCLUDE_BASE_CONF_H__ diff --git a/tools/pika_migrate/src/pstd/include/env.h b/tools/pika_migrate/src/pstd/include/env.h new file mode 100644 index 0000000000..f11680206f --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/env.h @@ -0,0 +1,162 @@ +#ifndef __PSTD_ENV_H__ +#define __PSTD_ENV_H__ + + +#include +#include +#include +#include + +#include "pstd/include/pstd_status.h" +#include "pstd/include/noncopyable.h" + +namespace pstd { + +class WritableFile; +class SequentialFile; +class RWFile; +class RandomRWFile; + +using TimeType = uint64_t; + +/* + * Set the resource limits of a process + */ +int SetMaxFileDescriptorNum(int64_t max_file_descriptor_num); + +/* + * Set size of initial mmap size + */ +void SetMmapBoundSize(size_t size); + +extern const size_t kPageSize; + +/* + * File Operations + */ +int IsDir(const std::string& path); +int DeleteDir(const std::string& path); +bool DeleteDirIfExist(const std::string& path); +int CreateDir(const std::string& path); +int CreatePath(const std::string& path, mode_t mode = 0755); +uint64_t Du(const std::string& filename); + +/* + * Whether the file is exist + * If exist return true, else return false + */ +bool FileExists(const std::string& path); + +bool DeleteFile(const std::string& fname); + +int RenameFile(const std::string& oldname, const std::string& newname); + +class FileLock : public pstd::noncopyable { + public: + FileLock() = default; + virtual ~FileLock()= default;; + + int fd_ = -1; + std::string name_; +}; + +int GetChildren(const std::string& dir, std::vector& result); +void GetDescendant(const std::string& dir, std::vector& result); + +TimeType NowMicros(); + +TimeType NowMillis(); + +void SleepForMicroseconds(int micros); + +Status NewSequentialFile(const std::string& fname, std::unique_ptr& result); + +Status NewWritableFile(const std::string& fname, std::unique_ptr& result); + +Status NewRWFile(const std::string& fname, std::unique_ptr& result); + +Status AppendSequentialFile(const std::string& fname, SequentialFile** result); + +Status AppendWritableFile(const std::string& fname, std::unique_ptr& result, uint64_t write_len = 0); + +Status NewRandomRWFile(const std::string& fname, std::unique_ptr& result); + +// A file abstraction for sequential writing. The implementation +// must provide buffering since callers may append small fragments +// at a time to the file. +class WritableFile : public pstd::noncopyable { + public: + WritableFile() = default; + virtual ~WritableFile(); + + virtual Status Append(const Slice& data) = 0; + virtual Status Close() = 0; + virtual Status Flush() = 0; + virtual Status Sync() = 0; + virtual Status Trim(uint64_t offset) = 0; + virtual uint64_t Filesize() = 0; +}; + +// A abstract for the sequential readable file +class SequentialFile { + public: + SequentialFile()= default;; + virtual ~SequentialFile(); + // virtual Status Read(size_t n, char *&result, char *scratch) = 0; + virtual Status Read(size_t n, Slice* result, char* scratch) = 0; + virtual Status Skip(uint64_t n) = 0; + // virtual Status Close() = 0; + virtual char* ReadLine(char* buf, int n) = 0; +}; + +class RWFile : public pstd::noncopyable { + public: + RWFile() = default; + virtual ~RWFile(); + virtual char* GetData() = 0; +}; + +// A file abstraction for random reading and writing. +class RandomRWFile : public pstd::noncopyable { + public: + RandomRWFile() = default; + virtual ~RandomRWFile() = default; + + // Write data from Slice data to file starting from offset + // Returns IOError on failure, but does not guarantee + // atomicity of a write. Returns OK status on success. + // + // Safe for concurrent use. + virtual Status Write(uint64_t offset, const Slice& data) = 0; + // Read up to "n" bytes from the file starting at "offset". + // "scratch[0..n-1]" may be written by this routine. Sets "*result" + // to the data that was read (including if fewer than "n" bytes were + // successfully read). May set "*result" to point at data in + // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when + // "*result" is used. If an error was encountered, returns a non-OK + // status. + // + // Safe for concurrent use by multiple threads. + virtual Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const = 0; + virtual Status Close() = 0; // closes the file + virtual Status Sync() = 0; // sync data + + /* + * Sync data and/or metadata as well. + * By default, sync only data. + * Override this method for environments where we need to sync + * metadata as well. + */ + virtual Status Fsync() { return Sync(); } + + /* + * Pre-allocate space for a file. + */ + virtual Status Allocate(off_t offset, off_t len) { + (void)offset; + (void)len; + return Status::OK(); + } +}; +} // namespace pstd +#endif // __PSTD_ENV_H__ diff --git a/tools/pika_migrate/src/pstd/include/fmacros.h b/tools/pika_migrate/src/pstd/include/fmacros.h new file mode 100644 index 0000000000..4c67133354 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/fmacros.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ___PSTD_FMACRO_H +#define ___PSTD_FMACRO_H + +#ifndef _BSD_SOURCE +# define _BSD_SOURCE +#endif + +#if defined(__linux__) +# define _GNU_SOURCE_REDIS +# define _DEFAULT_SOURCE +#endif + +#if defined(_AIX) +# define _ALL_SOURCE +#endif + +#if defined(__linux__) || defined(__OpenBSD__) +# define _XOPEN_SOURCE 700 +/* + * On NetBSD, _XOPEN_SOURCE undefines _NETBSD_SOURCE and + * thus hides inet_aton etc. + */ +#elif !defined(__NetBSD__) +# define _XOPEN_SOURCE +#endif + +#if defined(__sun) +# define _POSIX_C_SOURCE 199506L +#endif + +#ifndef _LARGEFILE_SOURCE +# define _LARGEFILE_SOURCE +#endif + +#define _FILE_OFFSET_BITS 64 + +#endif diff --git a/tools/pika_migrate/src/pstd/include/lock_mgr.h b/tools/pika_migrate/src/pstd/include/lock_mgr.h new file mode 100644 index 0000000000..978e9dd17a --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/lock_mgr.h @@ -0,0 +1,57 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __SRC_LOCK_MGR_H__ +#define __SRC_LOCK_MGR_H__ + +#include +#include + +#include "pstd/include/mutex.h" +#include "pstd/include/noncopyable.h" + +namespace pstd { + +namespace lock { +struct LockMap; +struct LockMapStripe; + +class LockMgr : public pstd::noncopyable { + public: + LockMgr(size_t default_num_stripes, int64_t max_num_locks, const std::shared_ptr& factory); + + ~LockMgr(); + + // Attempt to lock key. If OK status is returned, the caller is responsible + // for calling UnLock() on this key. + Status TryLock(const std::string& key); + + // Unlock a key locked by TryLock(). + void UnLock(const std::string& key); + + private: + // Default number of lock map stripes + const size_t default_num_stripes_[[maybe_unused]]; + + // Limit on number of keys locked per column family + const int64_t max_num_locks_; + + // Used to allocate mutexes/condvars to use when locking keys + std::shared_ptr mutex_factory_; + + // Map to locked key info + std::shared_ptr lock_map_; + + Status Acquire(const std::shared_ptr& stripe, const std::string& key); + + Status AcquireLocked(const std::shared_ptr& stripe, const std::string& key); + + void UnLockKey(const std::string& key, const std::shared_ptr& stripe); + +}; + +} // namespace lock +} // namespace pstd +#endif // __SRC_LOCK_MGR_H__ diff --git a/tools/pika_migrate/src/pstd/include/mutex.h b/tools/pika_migrate/src/pstd/include/mutex.h new file mode 100644 index 0000000000..3d67d426aa --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/mutex.h @@ -0,0 +1,86 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __SRC_MUTEX_H__ +#define __SRC_MUTEX_H__ + +#include + +#include "pstd/include/pstd_status.h" + +namespace pstd::lock { + +using Status = pstd::Status; + +class Mutex { + public: + virtual ~Mutex() = default; + + // Attempt to acquire lock. Return OK on success, or other Status on failure. + // If returned status is OK, Storage will eventually call UnLock(). + virtual Status Lock() = 0; + + // Attempt to acquire lock. If timeout is non-negative, operation may be + // failed after this many microseconds. + // Returns OK on success, + // TimedOut if timed out, + // or other Status on failure. + // If returned status is OK, Storage will eventually call UnLock(). + virtual Status TryLockFor(int64_t timeout_time) = 0; + + // Unlock Mutex that was successfully locked by Lock() or TryLockUntil() + virtual void UnLock() = 0; +}; + +class CondVar { + public: + virtual ~CondVar() = default; + + // Block current thread until condition variable is notified by a call to + // Notify() or NotifyAll(). Wait() will be called with mutex locked. + // Returns OK if notified. + // Returns non-OK if Storage should stop waiting and fail the operation. + // May return OK spuriously even if not notified. + virtual Status Wait(std::shared_ptr mutex) = 0; + + // Block current thread until condition variable is notified by a call to + // Notify() or NotifyAll(), or if the timeout is reached. + // Wait() will be called with mutex locked. + // + // If timeout is non-negative, operation should be failed after this many + // microseconds. + // If implementing a custom version of this class, the implementation may + // choose to ignore the timeout. + // + // Returns OK if notified. + // Returns TimedOut if timeout is reached. + // Returns other status if Storage should otherwis stop waiting and + // fail the operation. + // May return OK spuriously even if not notified. + virtual Status WaitFor(std::shared_ptr mutex, int64_t timeout_time) = 0; + + // If any threads are waiting on *this, unblock at least one of the + // waiting threads. + virtual void Notify() = 0; + + // Unblocks all threads waiting on *this. + virtual void NotifyAll() = 0; +}; + +// Factory class that can allocate mutexes and condition variables. +class MutexFactory { + public: + // Create a Mutex object. + virtual std::shared_ptr AllocateMutex() = 0; + + // Create a CondVar object. + virtual std::shared_ptr AllocateCondVar() = 0; + + virtual ~MutexFactory() = default; +}; + +} // namespace pstd::lock + +#endif // __SRC_MUTEX_H__ diff --git a/tools/pika_migrate/src/pstd/include/mutex_impl.h b/tools/pika_migrate/src/pstd/include/mutex_impl.h new file mode 100644 index 0000000000..529bb130d4 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/mutex_impl.h @@ -0,0 +1,23 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __SRC_MUTEX_IMPL_H__ +#define __SRC_MUTEX_IMPL_H__ + +#include "pstd/include/mutex.h" + +#include + +namespace pstd { +namespace lock { +// Default implementation of MutexFactory. +class MutexFactoryImpl : public MutexFactory { + public: + std::shared_ptr AllocateMutex() override; + std::shared_ptr AllocateCondVar() override; +}; +} // namespace lock +} // namespace pstd +#endif // SRC_MUTEX_IMPL_H__ diff --git a/tools/pika_migrate/src/pstd/include/noncopyable.h b/tools/pika_migrate/src/pstd/include/noncopyable.h new file mode 100644 index 0000000000..6a3518fb19 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/noncopyable.h @@ -0,0 +1,23 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_NONCOPYABLE_H_ +#define PIKA_NONCOPYABLE_H_ + +namespace pstd { + +class noncopyable { + protected: + noncopyable() = default; + ~noncopyable() = default; + + private: + noncopyable(const noncopyable&) = delete; + void operator=(const noncopyable&) = delete; +}; + +} // namespace pstd + +#endif diff --git a/tools/pika_migrate/src/pstd/include/pika_codis_slot.h b/tools/pika_migrate/src/pstd/include/pika_codis_slot.h new file mode 100644 index 0000000000..cb21fd0968 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pika_codis_slot.h @@ -0,0 +1,22 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef PIKA_CODIS_SLOT_H_ +#define PIKA_CODIS_SLOT_H_ + +#include +#include +#include + +using CRCU32 = uint32_t; + +// get the slot number by key +CRCU32 GetSlotsID(int slot_num, const std::string& str, CRCU32* pcrc, int* phastag); + +// get slot number of the key +CRCU32 GetSlotID(int slot_num, const std::string& str); + +#endif + diff --git a/tools/pika_migrate/src/pstd/include/posix.h b/tools/pika_migrate/src/pstd/include/posix.h new file mode 100644 index 0000000000..3371432d8b --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/posix.h @@ -0,0 +1,158 @@ +/* $begin csapp.h */ +#ifndef __CSAPP_H__ +# define __CSAPP_H__ + +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include + +/* Default file permissions are DEF_MODE & ~DEF_UMASK */ +/* $begin createmasks */ +# define DEF_MODE (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH) +# define DEF_UMASK (S_IWGRP | S_IWOTH) +/* $end createmasks */ + +/* Simplifies calls to bind(), connect(), and accept() */ +/* $begin sockaddrdef */ +using SA = struct sockaddr; +/* $end sockaddrdef */ + +/* Persistent state for the robust I/O (Rio) package */ +/* $begin rio_t */ +# define RIO_BUFSIZE 8192 +using rio_t = struct { + int rio_fd; /* descriptor for this internal buf */ + int rio_cnt; /* unread bytes in internal buf */ + char* rio_bufptr; /* next unread byte in internal buf */ + char rio_buf[RIO_BUFSIZE]; /* internal buffer */ +}; +/* $end rio_t */ + +/* External variables */ +extern char** environ; /* defined by libc */ + +/* Misc constants */ +# define MAXLINE 8192 /* max text line length */ +# define MAXBUF 8192 /* max I/O buffer size */ +# define LISTENQ 1024 /* second argument to listen() */ + +/* Process control wrappers */ +pid_t Fork(); +void Execve(const char* filename, char* const argv[], char* const envp[]); +pid_t Wait(int* status); +pid_t Waitpid(pid_t pid, int* iptr, int options); +void Kill(pid_t pid, int signum); +unsigned int Sleep(unsigned int secs); +void Pause(); +unsigned int Alarm(unsigned int seconds); +void Setpgid(pid_t pid, pid_t pgid); +pid_t Getpgrp(); + +/* Signal wrappers */ +using handler_t = void (int); +handler_t* Signal(int signum, handler_t* handler); +void Sigprocmask(int how, const sigset_t* set, sigset_t* oldset); +void Sigemptyset(sigset_t* set); +void Sigfillset(sigset_t* set); +void Sigaddset(sigset_t* set, int signum); +void Sigdelset(sigset_t* set, int signum); +int Sigismember(const sigset_t* set, int signum); + +/* Unix I/O wrappers */ +int Open(const char* pathname, int flags, mode_t mode); +ssize_t Read(int fd, void* buf, size_t count); +ssize_t Write(int fd, const void* buf, size_t count); +off_t Lseek(int fildes, off_t offset, int whence); +void Close(int fd); +int Select(int n, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, struct timeval* timeout); +int Dup2(int fd1, int fd2); +void Stat(const char* filename, struct stat* buf); +void Fstat(int fd, struct stat* buf); + +/* Memory mapping wrappers */ +void* Mmap(void* addr, size_t len, int prot, int flags, int fd, off_t offset); +void Munmap(void* start, size_t length); + +/* Standard I/O wrappers */ +void Fclose(FILE* fp); +FILE* Fdopen(int fd, const char* type); +char* Fgets(char* ptr, int n, FILE* stream); +FILE* Fopen(const char* filename, const char* mode); +void Fputs(const char* ptr, FILE* stream); +size_t Fread(void* ptr, size_t size, size_t nmemb, FILE* stream); +void Fwrite(const void* ptr, size_t size, size_t nmemb, FILE* stream); + +/* Dynamic storage allocation wrappers */ +void* Malloc(size_t size); +void* Realloc(void* ptr, size_t size); +void* Calloc(size_t nmemb, size_t size); +void Free(void* ptr); + +/* Sockets interface wrappers */ +int Socket(int domain, int type, int protocol); +void Setsockopt(int s, int level, int optname, const void* optval, int optlen); +void Bind(int sockfd, struct sockaddr* my_addr, int addrlen); +void Listen(int s, int backlog); +int Accept(int s, struct sockaddr* addr, socklen_t* addrlen); +void Connect(int sockfd, struct sockaddr* serv_addr, int addrlen); + +/* DNS wrappers */ +struct hostent* Gethostbyname(const char* name); +struct hostent* Gethostbyaddr(const char* addr, int len, int type); + +/* Pthreads thread control wrappers */ +void Pthread_create(pthread_t* tidp, pthread_attr_t* attrp, void* (*routine)(void*), void* argp); +void Pthread_join(pthread_t tid, void** thread_return); +void Pthread_cancel(pthread_t tid); +void Pthread_detach(pthread_t tid); +void Pthread_exit(void* retval); +pthread_t Pthread_self(); +void Pthread_once(pthread_once_t* once_control, void (*init_function)()); + +/* POSIX semaphore wrappers */ +void Sem_init(sem_t* sem, int pshared, unsigned int value); +void P(sem_t* sem); +void V(sem_t* sem); + +/* Rio (Robust I/O) package */ +ssize_t rio_readn(int fd, void* usrbuf, size_t n); +ssize_t rio_writen(int fd, void* usrbuf, size_t n); +void rio_readinitb(rio_t* rp, int fd); +ssize_t rio_readnb(rio_t* rp, void* usrbuf, size_t n); +ssize_t rio_readlineb(rio_t* rp, void* usrbuf, size_t maxlen); + +/* Wrappers for Rio package */ +ssize_t Rio_readn(int fd, void* ptr, size_t n); +void Rio_writen(int fd, void* usrbuf, size_t n); +void Rio_readinitb(rio_t* rp, int fd); +ssize_t Rio_readnb(rio_t* rp, void* usrbuf, size_t n); +ssize_t Rio_readlineb(rio_t* rp, void* usrbuf, size_t maxlen); + +/* Client/server helper functions */ +int open_clientfd(char* hostname, int portno); +int open_listenfd(int portno); + +/* Wrappers for client/server helper functions */ +int Open_clientfd(char* hostname, int port); +int Open_listenfd(int port); + +#endif /* __CSAPP_H__ */ +/* $end csapp.h */ diff --git a/tools/pika_migrate/src/pstd/include/pstd_coding.h b/tools/pika_migrate/src/pstd/include/pstd_coding.h new file mode 100644 index 0000000000..f601b5e337 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_coding.h @@ -0,0 +1,154 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Endian-neutral encoding: +// * Fixed-length numbers are encoded with least-significant byte first +// * In addition we support variable length "varint" encoding +// * Strings are encoded prefixed by their length in varint format + +#ifndef __PSTD_CODING_H__ +#define __PSTD_CODING_H__ + +#include +#include +#include + +#include "pstd/include/pstd_slice.h" + +namespace pstd { + +// Standard Put... routines append to a string +extern void PutFixed16(std::string* dst, uint16_t value); +extern void PutFixed32(std::string* dst, uint32_t value); +extern void PutFixed64(std::string* dst, uint64_t value); +extern void PutVarint32(std::string* dst, uint32_t value); +extern void PutVarint64(std::string* dst, uint64_t value); +extern void PutLengthPrefixedString(std::string* dst, const std::string& value); + +extern void GetFixed16(std::string* dst, uint16_t* value); +extern void GetFixed32(std::string* dst, uint32_t* value); +extern void GetFixed64(std::string* dst, uint64_t* value); +extern bool GetVarint32(std::string* input, uint32_t* value); +extern bool GetVarint64(std::string* input, uint64_t* value); + +extern void GetFixed16(Slice* dst, uint16_t* value); +extern void GetFixed32(Slice* dst, uint32_t* value); +extern void GetFixed64(Slice* dst, uint64_t* value); +extern bool GetVarint32(Slice* input, uint32_t* value); +extern bool GetVarint64(Slice* input, uint64_t* value); + +extern const char* GetLengthPrefixedSlice(const char* p, const char* limit, Slice* result); +extern bool GetLengthPrefixedSlice(Slice* input, Slice* result); +extern bool GetLengthPrefixedString(std::string* input, std::string* result); + +// Pointer-based variants of GetVarint... These either store a value +// in *v and return a pointer just past the parsed value, or return +// nullptr on error. These routines only look at bytes in the range +// [p..limit-1] +extern const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* v); +extern const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* v); + +// Returns the length of the varint32 or varint64 encoding of "v" +extern int VarintLength(uint64_t v); + +// Lower-level versions of Put... that write directly into a character buffer +// REQUIRES: dst has enough space for the value being written +extern void EncodeFixed16(char* buf, uint16_t value); +extern void EncodeFixed32(char* buf, uint32_t value); +extern void EncodeFixed64(char* buf, uint64_t value); + +// Lower-level versions of Put... that write directly into a character buffer +// and return a pointer just past the last byte written. +// REQUIRES: dst has enough space for the value being written +extern char* EncodeVarint32(char* dst, uint32_t value); +extern char* EncodeVarint64(char* dst, uint64_t value); + +// Lower-level versions of Get... that read directly from a character buffer +// without any bounds checking. + +inline uint16_t DecodeFixed16(const char* ptr) { + // Load the raw bytes + uint16_t result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; +} + +inline uint32_t DecodeFixed32(const char* ptr) { + // Load the raw bytes + uint32_t result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; +} + +inline uint64_t DecodeFixed64(const char* ptr) { + // Load the raw bytes + uint64_t result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; +} + +inline void GetFixed16(std::string* dst, uint16_t* value) { + if (!dst || !value) { + return; + } + *value = DecodeFixed16(dst->data()); + dst->erase(0, sizeof(uint16_t)); +} + +inline void GetFixed32(std::string* dst, uint32_t* value) { + if (!dst || !value) { + return; + } + *value = DecodeFixed32(dst->data()); + dst->erase(0, sizeof(uint32_t)); +} + +inline void GetFixed64(std::string* dst, uint64_t* value) { + if (!dst || !value) { + return; + } + *value = DecodeFixed64(dst->data()); + dst->erase(0, sizeof(uint64_t)); +} + +inline void GetFixed16(Slice* dst, uint16_t* value) { + if (!dst || !value) { + return; + } + *value = DecodeFixed16(dst->data()); + dst->remove_prefix(sizeof(uint16_t) / sizeof(char)); +} + +inline void GetFixed32(Slice* dst, uint32_t* value) { + if (!dst || !value) { + return; + } + *value = DecodeFixed32(dst->data()); + dst->remove_prefix(sizeof(uint32_t) / sizeof(char)); +} + +inline void GetFixed64(Slice* dst, uint64_t* value) { + if (!dst || !value) { + return; + } + *value = DecodeFixed64(dst->data()); + dst->remove_prefix(sizeof(uint64_t) / sizeof(char)); +} + +// Internal routine for use by fallback path of GetVarint32Ptr +extern const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32_t* value); +inline const char* GetVarint32Ptr(const char* p, const char* limit, uint32_t* value) { + if (p < limit) { + uint32_t result = *(reinterpret_cast(p)); + if ((result & 128) == 0) { + *value = result; + return p + 1; + } + } + return GetVarint32PtrFallback(p, limit, value); +} + +} // namespace pstd + +#endif // __PSTD_CODING_H__ diff --git a/tools/pika_migrate/src/pstd/include/pstd_defer.h b/tools/pika_migrate/src/pstd/include/pstd_defer.h new file mode 100644 index 0000000000..7c97c311d0 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_defer.h @@ -0,0 +1,99 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __PSTD_DEFER_H__ +#define __PSTD_DEFER_H__ + +#include +#include + +namespace pstd { + +// The defer class for C++11 +// +// Usage: +// void f() { +// FILE* fp = fopen(xxx); +// if (!fp) return; +// +// DEFER { +// // it'll be executed on f() exiting. +// fclose(fp); +// } +// +// ... // Do your business +// } +// +// An example for statics function time cost: +// +// #define STAT_FUNC_COST +// // !!! omits std::chrono namespace +// auto _start_ = steady_clock::now(); +// DEFER { +// auto end = steady_clock::now(); +// cout << "Used:" << duration_cast(end-_start_).count(); +// } +// +// // Insert into your function at first line. +// void f() { +// STAT_FUNC_COST; +// // when f() exit, will print its running time. +// } +// + +// CTAD: See https://en.cppreference.com/w/cpp/language/class_template_argument_deduction +#if __cpp_deduction_guides >= 201606 + +template +class ExecuteOnScopeExit { + public: + ExecuteOnScopeExit(F&& f) : func_(std::move(f)) {} + ExecuteOnScopeExit(const F& f) : func_(f) {} + ~ExecuteOnScopeExit() { func_(); } + + ExecuteOnScopeExit(const ExecuteOnScopeExit& e) = delete; + ExecuteOnScopeExit& operator=(const ExecuteOnScopeExit& f) = delete; + + private: + F func_; +}; + +#else + +class ExecuteOnScopeExit { + public: + ExecuteOnScopeExit() = default; + + // movable + ExecuteOnScopeExit(ExecuteOnScopeExit&&) = default; + ExecuteOnScopeExit& operator=(ExecuteOnScopeExit&&) = default; + + // non copyable + ExecuteOnScopeExit(const ExecuteOnScopeExit& e) = delete; + void operator=(const ExecuteOnScopeExit& f) = delete; + + template + ExecuteOnScopeExit(F&& f) : func_(std::forward(f)) {} + + ~ExecuteOnScopeExit() noexcept { + if (func_) func_(); + } + + private: + std::function func_; +}; + +#endif + +} // namespace pstd + +#define _CONCAT(a, b) a##b +#define _MAKE_DEFER_(line) pstd::ExecuteOnScopeExit _CONCAT(defer, line) = [&]() + +// !!! DEFER +#undef DEFER +#define DEFER _MAKE_DEFER_(__LINE__) + +#endif // __PSTD_DEFER_H__ diff --git a/tools/pika_migrate/src/pstd/include/pstd_define.h b/tools/pika_migrate/src/pstd/include/pstd_define.h new file mode 100644 index 0000000000..294dc482b7 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_define.h @@ -0,0 +1,9 @@ +#ifndef __PSTD_DEFINE_H__ +#define __PSTD_DEFINE_H__ + +#define SPACE ' ' +#define COLON ':' +#define COMMENT '#' +#define COMMA ',' + +#endif diff --git a/tools/pika_migrate/src/pstd/include/pstd_hash.h b/tools/pika_migrate/src/pstd/include/pstd_hash.h new file mode 100644 index 0000000000..deb8924160 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_hash.h @@ -0,0 +1,141 @@ +/* + * Updated to C++, zedwood.com 2012 + * Based on Olivier Gay's version + * See Modified BSD License below: + * + * FIPS 180-2 SHA-224/256/384/512 implementation + * Issue date: 04/30/2005 + * http://www.ouah.org/ogay/sha2/ + * + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* MD5 + converted to C++ class by Frank Thilo (thilo@unix-ag.org) + for bzflag (http://www.bzflag.org) + + based on: + + md5.h and md5.c + reference implementation of RFC 1321 + + Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All +rights reserved. + +License to copy and use this software is granted provided that it +is identified as the "RSA Data Security, Inc. MD5 Message-Digest +Algorithm" in all material mentioning or referencing this software +or this function. + +License is also granted to make and use derivative works provided +that such works are identified as "derived from the RSA Data +Security, Inc. MD5 Message-Digest Algorithm" in all material +mentioning or referencing the derived work. + +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" +without express or implied warranty of any kind. + +These notices must be retained in any copies of any part of this +documentation and/or software. + +*/ + +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __PSTD_HASH_H__ +#define __PSTD_HASH_H__ +#include +#include +#include + +namespace pstd { + +std::string md5(const std::string& str, bool raw = false); +std::string sha256(const std::string& input, bool raw = false); + +bool isSha256(const std::string& input); + +// a small class for calculating MD5 hashes of strings or byte arrays +// it is not meant to be fast or secure +// +// usage: 1) feed it blocks of uchars with update() +// 2) finalize() +// 3) get hexdigest() string +// or +// MD5(std::string).hexdigest() +// +// assumes that char is 8 bit and int is 32 bit +class MD5 { + public: + using size_type = unsigned int; // must be 32bit + + MD5(); + MD5(const std::string& text); + void update(const unsigned char* input, size_type length); + void update(const char* input, size_type length); + MD5& finalize(); + std::string hexdigest() const; + std::string rawdigest() const; + friend std::ostream& operator<<(std::ostream& /*out*/, MD5 md5); + + private: + void init(); + using uint1 = unsigned char; // 8bit + using uint4 = unsigned int; // 32bit + enum { blocksize = 64 }; // VC6 won't eat a const static int here + + void transform(const uint1 block[blocksize]); + static void decode(uint4 output[], const uint1 input[], size_type len); + static void encode(uint1 output[], const uint4 input[], size_type len); + + bool finalized; + uint1 buffer[blocksize]; // bytes that didn't fit in last 64 byte chunk + uint4 count[2]; // 64bit counter for number of bits (lo, hi) + uint4 state[4]; // digest so far + uint1 digest[16]; // the result + + // low level logic operations + static inline uint4 F(uint4 x, uint4 y, uint4 z); + static inline uint4 G(uint4 x, uint4 y, uint4 z); + static inline uint4 H(uint4 x, uint4 y, uint4 z); + static inline uint4 I(uint4 x, uint4 y, uint4 z); + static inline uint4 rotate_left(uint4 x, int n); + static inline void FF(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac); + static inline void GG(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac); + static inline void HH(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac); + static inline void II(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac); +}; + +} // namespace pstd + +#endif // __PSTD_HASH_H__ diff --git a/tools/pika_migrate/src/pstd/include/pstd_mutex.h b/tools/pika_migrate/src/pstd/include/pstd_mutex.h new file mode 100644 index 0000000000..b1ea9c8203 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_mutex.h @@ -0,0 +1,75 @@ +#ifndef __PSTD_MUTEXLOCK_H__ +#define __PSTD_MUTEXLOCK_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "noncopyable.h" + +namespace pstd { + +using Mutex = std::mutex; +using CondVar = std::condition_variable; +using RWMutex = std::shared_mutex; + +using OnceType = std::once_flag; + +template +void InitOnce(OnceType& once, F&& f, Args&&... args) { + return std::call_once(once, std::forward(f), std::forward(args)...); +} + +class RefMutex : public pstd::noncopyable { + public: + RefMutex() = default; + ~RefMutex() = default; + + // Lock and Unlock will increase and decrease refs_, + // should check refs before Unlock + void Lock(); + void Unlock(); + + void Ref(); + void Unref(); + bool IsLastRef() { return refs_ == 1; } + + private: + std::mutex mu_; + int refs_ = 0; +}; + +class RecordMutex : public pstd::noncopyable { + public: + RecordMutex()= default;; + ~RecordMutex(); + + void MultiLock(const std::vector& keys); + void Lock(const std::string& key); + void MultiUnlock(const std::vector& keys); + void Unlock(const std::string& key); + + private: + Mutex mutex_; + + std::unordered_map records_; +}; + +class RecordLock : public pstd::noncopyable { + public: + RecordLock(RecordMutex* mu, std::string key) : mu_(mu), key_(std::move(key)) { mu_->Lock(key_); } + ~RecordLock() { mu_->Unlock(key_); } + + private: + RecordMutex* const mu_; + std::string key_; +}; + +} // namespace pstd + +#endif diff --git a/tools/pika_migrate/src/pstd/include/pstd_slice.h b/tools/pika_migrate/src/pstd/include/pstd_slice.h new file mode 100644 index 0000000000..9b0402ceea --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_slice.h @@ -0,0 +1,114 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Slice is a simple structure containing a pointer into some external +// storage and a size. The user of a Slice must ensure that the slice +// is not used after the corresponding external storage has been +// deallocated. +// +// Multiple threads can invoke const methods on a Slice without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same Slice must use +// external synchronization. + +#ifndef __PSTD_SLICE_H__ +#define __PSTD_SLICE_H__ + +#include +#include +#include +#include + +namespace pstd { + +class Slice { + public: + // Create an empty slice. + Slice() = default; + + // Create a slice that refers to d[0,n-1]. + Slice(const char* d, size_t n) : data_(d), size_(n) {} + + // Create a slice that refers to the contents of "s" + Slice(const std::string& s) : data_(s.data()), size_(s.size()) {} + + // Create a slice that refers to s[0,strlen(s)-1] + Slice(const char* s) : data_(s), size_(strlen(s)) {} + + // Return a pointer to the beginning of the referenced data + const char* data() const { return data_; } + + // Return the length (in bytes) of the referenced data + size_t size() const { return size_; } + + // Return true iff the length of the referenced data is zero + bool empty() const { return size_ == 0; } + + // Return the ith byte in the referenced data. + // REQUIRES: n < size() + char operator[](size_t n) const { + assert(n < size()); + return data_[n]; + } + + // Change this slice to refer to an empty array + void clear() { + data_ = ""; + size_ = 0; + } + + // Drop the first "n" bytes from this slice. + void remove_prefix(size_t n) { + assert(n <= size()); + data_ += n; + size_ -= n; + } + + // Return a string that contains the copy of the referenced data. + std::string ToString() const { return std::string(data_, size_); } + + // Three-way comparison. Returns value: + // < 0 iff "*this" < "b", + // == 0 iff "*this" == "b", + // > 0 iff "*this" > "b" + int compare(const Slice& b) const; + + // Return true iff "x" is a prefix of "*this" + bool starts_with(const Slice& x) const { return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); } + + private: + const char* data_{""}; + size_t size_ = 0; + + // Intentionally copyable +}; + +inline bool operator==(const Slice& x, const Slice& y) { + return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0)); +} + +inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); } + +inline int Slice::compare(const Slice& b) const { + const size_t min_len = (size_ < b.size_) ? size_ : b.size_; + int r = memcmp(data_, b.data_, min_len); + if (r == 0) { + if (size_ < b.size_) { + r = -1; + } else if (size_ > b.size_) { + r = +1; + } + } + + return r; +} + +} // namespace pstd + +#endif // __PSTD_SLICE_H__ diff --git a/tools/pika_migrate/src/pstd/include/pstd_status.h b/tools/pika_migrate/src/pstd/include/pstd_status.h new file mode 100644 index 0000000000..e73282657f --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_status.h @@ -0,0 +1,129 @@ +#ifndef __PSTD_STATUS_H__ +#define __PSTD_STATUS_H__ + +#include +#include "pstd/include/pstd_slice.h" + +namespace pstd { + +class Status { + public: + // Create a success status. + Status() = default; + ~Status() { delete[] state_; } + + // Copy the specified status. + Status(const Status& s); + void operator=(const Status& s); + + // Return a success status. + static Status OK() { return {}; } + + // Return error status of an appropriate type. + static Status NotFound(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kNotFound, msg, msg2); } + static Status Corruption(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kCorruption, msg, msg2); } + static Status NotSupported(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kNotSupported, msg, msg2); } + static Status InvalidArgument(const Slice& msg, const Slice& msg2 = Slice()) { return {kInvalidArgument, msg, msg2}; } + static Status IOError(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kIOError, msg, msg2); } + static Status EndFile(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kEndFile, msg, msg2); } + + static Status Incomplete(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kIncomplete, msg, msg2); } + + static Status Complete(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kComplete, msg, msg2); } + + static Status Timeout(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kTimeout, msg, msg2); } + + static Status AuthFailed(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kAuthFailed, msg, msg2); } + + static Status Busy(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kBusy, msg, msg2); } + + static Status ItemNotExist(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kItemNotExist, msg, msg2); } + + static Status Error(const Slice& msg, const Slice& msg2 = Slice()) { return Status(kError, msg, msg2); } + + // Returns true if the status indicates success. + bool ok() const { return !state_; } + + // Returns true if the status indicates a NotFound error. + bool IsNotFound() const { return code() == kNotFound; } + + // Returns true if the status indicates a Corruption error. + bool IsCorruption() const { return code() == kCorruption; } + + // Returns true if the status indicates a NotSupported error. + bool IsNotSupported() const { return code() == kNotSupported; } + + // Returns true if the status indicates an IOError. + bool IsIOError() const { return code() == kIOError; } + + // Returns true if the status indicates an EOF. + bool IsEndFile() const { return code() == kEndFile; } + + // Returns true if the status is complete. + bool IsComplete() const { return code() == kComplete; } + + // Returns true if the status is Incomplete + bool IsIncomplete() const { return code() == kIncomplete; } + + // Returns true if the status is InvalidArgument + bool IsInvalidArgument() const { return code() == kInvalidArgument; } + + // Returns true if the status is Timeout + bool IsTimeout() const { return code() == kTimeout; } + + // Returns true if the status is AuthFailed + bool IsAuthFailed() const { return code() == kAuthFailed; } + + // Return true if the status is Busy + bool IsBusy() const { return code() == kBusy; } + + bool IsError() const { return code() == kError; } + + // Return a string representation of this status suitable for printing. + // Returns the string "OK" for success. + std::string ToString() const; + + private: + // OK status has a null state_. Otherwise, state_ is a new[] array + // of the following form: + // state_[0..3] == length of message + // state_[4] == code + // state_[5..] == message + const char* state_{nullptr}; + + enum Code { + kOk = 0, + kNotFound = 1, + kCorruption = 2, + kNotSupported = 3, + kInvalidArgument = 4, + kIOError = 5, + kEndFile = 6, + kIncomplete = 7, + kComplete = 8, + kTimeout = 9, + kAuthFailed = 10, + kBusy = 11, + kItemNotExist = 12, + kError = 13 + }; + + Code code() const { return !state_ ? kOk : static_cast(state_[4]); } + + Status(Code code, const Slice& msg, const Slice& msg2); + static const char* CopyState(const char* s); +}; + +inline Status::Status(const Status& s) { state_ = !s.state_ ? nullptr : CopyState(s.state_); } +inline void Status::operator=(const Status& s) { + // The following condition catches both aliasing (when this == &s), + // and the common case where both s and *this are ok. + if (&s != this && state_ != s.state_) { + delete[] state_; + state_ = !s.state_ ? nullptr : CopyState(s.state_); + } +} + +} // namespace pstd + +#endif // __PSTD_STATUS_H__ diff --git a/tools/pika_migrate/src/pstd/include/pstd_string.h b/tools/pika_migrate/src/pstd/include/pstd_string.h new file mode 100644 index 0000000000..ae1645783c --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/pstd_string.h @@ -0,0 +1,69 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +/* + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __PSTD_STRING_H__ +#define __PSTD_STRING_H__ + +#include +#include +#include + +namespace pstd { + +int stringmatchlen(const char* pattern, int patternLen, const char* string, int stringLen, int nocase); +int stringmatch(const char* p, const char* s, int nocase); +long long memtoll(const char* p, int* err); +int ll2string(char* dst, size_t dstlen, long long svalue); +int string2int(const char* s, size_t slen, long long* value); +int string2int(const char* s, size_t slen, long* lval); +int string2int(const char* s, size_t slen, unsigned long* lval); +int d2string(char* buf, size_t len, double value); +int string2d(const char* s, size_t slen, double* dval); +std::vector& StringSplit(const std::string& s, char delim, std::vector& elems); +void StringSplit2Set(const std::string& s, char delim, std::unordered_set& elems); +std::string Set2String(const std::unordered_set& elems, char delim); +std::string StringConcat(const std::vector& elems, char delim); +std::string& StringToLower(std::string& ori); +std::string& StringToUpper(std::string& ori); +std::string IpPortString(const std::string& ip, int port); +std::string ToRead(const std::string& str); +bool ParseIpPortString(const std::string& ip_port, std::string& ip, int& port); +std::string StringTrim(const std::string& ori, const std::string& charlist = " "); +std::string getRandomHexChars(size_t len); + +bool isspace(const std::string& str); + +} // namespace pstd + +#endif // __PSTD_STRING_H__ diff --git a/tools/pika_migrate/src/pstd/include/random.h b/tools/pika_migrate/src/pstd/include/random.h new file mode 100644 index 0000000000..b5636c1604 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/random.h @@ -0,0 +1,21 @@ +#ifndef __PSTD_INCLUDE_RANDOM_H__ +#define __PSTD_INCLUDE_RANDOM_H__ + +#include +#include + +namespace pstd { + +class Random { + public: + Random() { srand(time(nullptr)); } + + /* + * return Random number in [1...n] + */ + static uint32_t Uniform(int n) { return (random() % n) + 1; } +}; + +}; // namespace pstd + +#endif // __PSTD_INCLUDE_RANDOM_H__ diff --git a/tools/pika_migrate/src/pstd/include/rsync.h b/tools/pika_migrate/src/pstd/include/rsync.h new file mode 100644 index 0000000000..386ddf2d44 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/rsync.h @@ -0,0 +1,33 @@ +#ifndef __PSTD_RSYNC_H__ +#define __PSTD_RSYNC_H__ + +#include +#include + +namespace pstd { +const std::string kRsyncSecretFile = "pstd_rsync.secret"; +const std::string kRsyncConfFile = "pstd_rsync.conf"; +const std::string kRsyncLogFile = "pstd_rsync.log"; +const std::string kRsyncPidFile = "pstd_rsync.pid"; +const std::string kRsyncLockFile = "pstd_rsync.lock"; +const std::string kRsyncSubDir = "rsync"; +const std::string kRsyncUser = "rsync_user"; +struct RsyncRemote { + std::string host; + int port; + std::string module; + int kbps; // speed limit + RsyncRemote(std::string _host, const int _port, std::string _module, const int _kbps) + : host(std::move(_host)), port(_port), module(std::move(_module)), kbps(_kbps) {} +}; + +int StartRsync(const std::string& raw_path, const std::string& module, const std::string& ip, int port, + const std::string& passwd); +int StopRsync(const std::string& path); +int RsyncSendFile(const std::string& local_file_path, const std::string& remote_file_path, + const std::string& secret_file_path, const RsyncRemote& remote); +int RsyncSendClearTarget(const std::string& local_dir_path, const std::string& remote_dir_path, + const std::string& secret_file_path, const RsyncRemote& remote); + +} // namespace pstd +#endif diff --git a/tools/pika_migrate/src/pstd/include/scope_record_lock.h b/tools/pika_migrate/src/pstd/include/scope_record_lock.h new file mode 100644 index 0000000000..2ca3c93a21 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/scope_record_lock.h @@ -0,0 +1,57 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __SRC_SCOPE_RECORD_LOCK_H__ +#define __SRC_SCOPE_RECORD_LOCK_H__ + +#include +#include +#include +#include + +#include "pstd/include/lock_mgr.h" +#include "pstd/include/noncopyable.h" +#include "rocksdb/slice.h" + +namespace pstd::lock { + +using Slice = rocksdb::Slice; + +class ScopeRecordLock final : public pstd::noncopyable { + public: + ScopeRecordLock(const std::shared_ptr& lock_mgr, const Slice& key) : lock_mgr_(lock_mgr), key_(key) { + lock_mgr_->TryLock(key_.ToString()); + } + ~ScopeRecordLock() { lock_mgr_->UnLock(key_.ToString()); } + + private: + std::shared_ptr const lock_mgr_; + Slice key_; +}; + +class MultiScopeRecordLock final : public pstd::noncopyable { + public: + MultiScopeRecordLock(const std::shared_ptr& lock_mgr, const std::vector& keys); + ~MultiScopeRecordLock(); + + private: + std::shared_ptr const lock_mgr_; + std::vector keys_; +}; + +class MultiRecordLock : public noncopyable { + public: + explicit MultiRecordLock(const std::shared_ptr& lock_mgr) : lock_mgr_(lock_mgr) {} + ~MultiRecordLock() = default; + + void Lock(const std::vector& keys); + void Unlock(const std::vector& keys); + + private: + std::shared_ptr const lock_mgr_; +}; + +} // namespace pstd::lock +#endif // __SRC_SCOPE_RECORD_LOCK_H__ diff --git a/tools/pika_migrate/src/pstd/include/testutil.h b/tools/pika_migrate/src/pstd/include/testutil.h new file mode 100644 index 0000000000..f5a5d84950 --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/testutil.h @@ -0,0 +1,21 @@ +#ifndef __PSTD_INCLUDE_TESTUTIL_H__ +#define __PSTD_INCLUDE_TESTUTIL_H__ + +#include +#include + +namespace pstd { + +extern char* get_date_time(); +extern int GetTestDirectory(std::string* result); +extern void current_time_str(char * str, size_t max_len); + +#define output(fmt, args...) do { \ + char __time_str__[1024];\ + current_time_str(__time_str__, sizeof(__time_str__)); \ + printf("[%s %s %d]" fmt "\n", __time_str__, __FILE_NAME__, __LINE__, ##args); \ + } while (0) + +}; // namespace pstd + +#endif // __PSTD_INCLUDE_TESTUTIL_H__ diff --git a/tools/pika_migrate/src/pstd/include/version.h b/tools/pika_migrate/src/pstd/include/version.h new file mode 100644 index 0000000000..c4e9e5f55b --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/version.h @@ -0,0 +1,19 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +# +#ifndef __PSTD_VERSION__ +# define __PSTD_VERSION__ + +# define PSTD_MAJOR 1 +# define PSTD_MINOR 0 +# define PSTD_PATCH 1 + +#endif diff --git a/tools/pika_migrate/src/pstd/include/xdebug.h b/tools/pika_migrate/src/pstd/include/xdebug.h new file mode 100644 index 0000000000..bee6243efa --- /dev/null +++ b/tools/pika_migrate/src/pstd/include/xdebug.h @@ -0,0 +1,86 @@ +/** + * @file xdebug.h + * @brief debug macros + * @author chenzongzhi + * @version 1.0.0 + * @date 2014-04-25 + */ + +#ifndef __XDEBUG_H__ +# define __XDEBUG_H__ +# include +# include +# include +# include +# include + +# ifdef __XDEBUG__ +# define pint(x) qf_debug("%s = %d", #x, x) +# define psize(x) qf_debug("%s = %zu", #x, x) +# define pstr(x) qf_debug("%s = %s", #x, x) +// 如果A 不对, 那么就输出M +# define qf_check(A, M, ...) \ + if (!(A)) { \ + log_err(M, ##__VA_ARGS__); \ + errno = 0; \ + exit(-1); \ + } + +// 用来检测程序是否执行到这里 +# define sentinel(M, ...) \ + { \ + qf_debug(M, ##__VA_ARGS__); \ + errno = 0; \ + } + +# define qf_bin_debug(buf, size) \ + { fwrite(buf, 1, size, stderr); } + +# define _debug_time_def timeval s1, e; +# define _debug_getstart gettimeofday(&s1, nullptr) +# define _debug_getend gettimeofday(&e, nullptr) +# define _debug_time ((int)(((e.tv_sec - s1.tv_sec) * 1000 + (e.tv_usec - s1.tv_usec) / 1000))) + +# define clean_errno() (errno == 0 ? "None" : strerror(errno)) +# define log_err(M, ...) \ + { \ + fprintf(stderr, "[ERROR] (%s:%d %s errno: %s) " M "\n", __FILE__, __LINE__, get_date_time().c_str(), clean_errno(), ##__VA_ARGS__); \ + exit(-1); \ + } +# define log_warn(M, ...) \ + fprintf(stderr, "[WARN] (%s:%d: errno: %s) " M "\n", __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) +# define log_info(M, ...) fprintf(stderr, "[INFO] (%s:%d) " M "\n", __FILE__, __LINE__, ##__VA_ARGS__) + +# else + +# define pint(x) \ + {} +# define pstr(x) \ + {} +# define qf_bin_debug(buf, size) \ + {} + +# define _debug_time_def \ + {} +# define _debug_getstart \ + {} +# define _debug_getend \ + {} +# define _debug_time 0 + +# define sentinel(M, ...) \ + {} +# define qf_check(A, M, ...) \ + {} +# define log_err(M, ...) \ + {} +# define log_warn(M, ...) \ + {} +# define log_info(M, ...) \ + {} + +# endif + +#endif //__XDEBUG_H__ + +/* vim: set ts=4 sw=4 sts=4 tw=100 */ diff --git a/tools/pika_migrate/src/pstd/src/base_conf.cc b/tools/pika_migrate/src/pstd/src/base_conf.cc new file mode 100644 index 0000000000..e73878d702 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/base_conf.cc @@ -0,0 +1,381 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "pstd/include/base_conf.h" + +#include +#include + +#include +#include + +#include "pstd/include/env.h" +#include "pstd/include/pstd_string.h" +#include "pstd/include/xdebug.h" + +namespace pstd { + +static const int kConfItemLen = 1024 * 1024; + +BaseConf::BaseConf(const std::string& path) : rep_(std::make_unique(path)) {} + +BaseConf::~BaseConf() = default; + +int BaseConf::LoadConf() { + if (!FileExists(rep_->path)) { + return -1; + } + std::unique_ptr sequential_file; + NewSequentialFile(rep_->path, sequential_file); + // read conf items + + char line[kConfItemLen]; + char name[kConfItemLen]; + char value[kConfItemLen]; + int line_len = 0; + int name_len = 0; + int value_len = 0; + int sep_sign = 0; + Rep::ConfType type = Rep::kConf; + + while (sequential_file->ReadLine(line, kConfItemLen) != nullptr) { + sep_sign = 0; + name_len = 0; + value_len = 0; + type = Rep::kComment; + line_len = static_cast(strlen(line)); + for (int i = 0; i < line_len; i++) { + if (i == 0 && line[i] == COMMENT) { + type = Rep::kComment; + break; + } + switch (line[i]) { + case '\r': + case '\n': + break; + case SPACE: + if (value_len == 0) { // Allow spaces in value + break; + } + case COLON: + if (sep_sign == 0) { + type = Rep::kConf; + sep_sign = 1; + break; + } + default: + if (sep_sign == 0) { + name[name_len++] = line[i]; + } else { + value[value_len++] = line[i]; + } + } + } + + if (type == Rep::kConf) { + rep_->item.emplace_back(Rep::kConf, std::string(name, name_len), std::string(value, value_len)); + } else { + rep_->item.emplace_back(Rep::kComment, std::string(line, line_len)); + } + } + + // sequential_file->Close(); + return 0; +} + +int BaseConf::ReloadConf() { + auto rep = std::move(rep_); + rep_ = std::make_unique(rep->path); + if (LoadConf() == -1) { + rep_ = std::move(rep); + return -1; + } + return 0; +} + +bool BaseConf::GetConfInt(const std::string& name, int* value) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + (*value) = atoi(i.value.c_str()); + return true; + } + } + return false; +} + +bool BaseConf::GetConfIntHuman(const std::string& name, int* value) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + auto c_str = i.value.c_str(); + (*value) = static_cast(strtoll(c_str, nullptr, 10)); + char last = c_str[i.value.size() - 1]; + if (last == 'K' || last == 'k') { + (*value) *= (1 << 10); + } else if (last == 'M' || last == 'm') { + (*value) *= (1 << 20); + } else if (last == 'G' || last == 'g') { + (*value) *= (1 << 30); + } + return true; + } + } + return false; +} + +bool BaseConf::GetConfInt64Human(const std::string& name, int64_t* value) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + auto c_str = i.value.c_str(); + (*value) = strtoll(c_str, nullptr, 10); + char last = c_str[i.value.size() - 1]; + if (last == 'K' || last == 'k') { + (*value) *= (1 << 10); + } else if (last == 'M' || last == 'm') { + (*value) *= (1 << 20); + } else if (last == 'G' || last == 'g') { + (*value) *= (1 << 30); + } + return true; + } + } + return false; +} + +bool BaseConf::GetConfInt64(const std::string& name, int64_t* value) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + (*value) = strtoll(i.value.c_str(), nullptr, 10); + return true; + } + } + return false; +} + +bool BaseConf::GetConfStr(const std::string& name, std::string* val) const { + for (auto& i : rep_->item) { + if (i.type == 1) { + continue; + } + if (name == i.name) { + (*val) = i.value; + return true; + } + } + return false; +} + +bool BaseConf::GetConfStrVec(const std::string& name, std::vector* value) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + std::string val_str = i.value; + std::string::size_type pos; + while (true) { + pos = val_str.find(','); + if (pos == std::string::npos) { + value->push_back(StringTrim(val_str)); + break; + } + value->push_back(StringTrim(val_str.substr(0, pos))); + val_str = val_str.substr(pos + 1); + } + return true; + } + } + return false; +} + +bool BaseConf::GetConfBool(const std::string& name, bool* value) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + if (i.value == "true" || i.value == "1" || i.value == "yes") { + (*value) = true; + } else if (i.value == "false" || i.value == "0" || i.value == "no") { + (*value) = false; + } + return true; + } + } + return false; +} + +bool BaseConf::GetConfDouble(const std::string& name, double* value) const { + for (auto& item : rep_->item) { + if (item.type == Rep::kComment) { + continue; + } + if (name == item.name) { + *value = std::strtod(item.value.c_str(), nullptr); + return true; + } + } + return false; +} + +bool BaseConf::GetConfStrMulti(const std::string& name, std::vector* values) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + values->emplace_back(i.value); + } + } + return true; +} + +bool BaseConf::SetConfInt(const std::string& name, const int value) { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + i.value = std::to_string(value); + return true; + } + } + return false; +} + +bool BaseConf::SetConfInt64(const std::string& name, const int64_t value) { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + i.value = std::to_string(value); + return true; + } + } + return false; +} + +bool BaseConf::SetConfStr(const std::string& name, const std::string& value) { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + i.value = value; + return true; + } + } + return false; +} + +bool BaseConf::SetConfBool(const std::string& name, const bool value) { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + if (value) { + i.value = "true"; + } else { + i.value = "false"; + } + return true; + } + } + return false; +} + +bool BaseConf::SetConfStrVec(const std::string& name, const std::vector& value) { + std::string value_str = StringConcat(value, COMMA); + return SetConfStr(name, value_str); +} + +bool BaseConf::SetConfDouble(const std::string& name, const double value) { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + i.value = std::to_string(value); + return true; + } + } + return false; +} + +bool BaseConf::CheckConfExist(const std::string& name) const { + for (auto& i : rep_->item) { + if (i.type == Rep::kComment) { + continue; + } + if (name == i.name) { + return true; + } + } + return false; +} + +void BaseConf::DumpConf() const { + int cnt = 1; + for (auto& i : rep_->item) { + if (i.type == Rep::kConf) { + LOG(INFO) << fmt::format("{:2} {} {}", cnt++, i.name, i.value); + } + } +} + +bool BaseConf::WriteBack() { + std::unique_ptr write_file; + std::string tmp_path = rep_->path + ".tmp"; + Status ret = NewWritableFile(tmp_path, write_file); + LOG(INFO) << "ret " << ret.ToString(); + if (!write_file) { + return false; + } + std::string tmp; + for (auto& i : rep_->item) { + if (i.type == Rep::kConf) { + tmp = i.name + " : " + i.value + "\n"; + write_file->Append(tmp); + } else { + write_file->Append(i.value); + } + } + // should only use rename syscall, refer 'man rename' + // if we delete rep_->path, and then system crash before rename, we will lose old config + RenameFile(tmp_path, rep_->path); + return true; +} + +void BaseConf::WriteSampleConf() const { + std::unique_ptr write_file; + std::string sample_path = rep_->path + ".sample"; + Status ret = NewWritableFile(sample_path, write_file); + std::string tmp; + for (auto& i : rep_->item) { + if (i.type == Rep::kConf) { + tmp = i.name + " :\n"; + write_file->Append(tmp); + } else { + write_file->Append(i.value); + } + } +} + +void BaseConf::PushConfItem(const Rep::ConfItem& item) { rep_->item.push_back(item); } + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/build_version.cc b/tools/pika_migrate/src/pstd/src/build_version.cc new file mode 100644 index 0000000000..7e8f1fd035 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/build_version.cc @@ -0,0 +1,9 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "pstd/include/version.h" +const char* pstd_build_git_sha = "pstd_build_git_sha:2f67b928b3ccd2f23109802aa9932a7af45abcd9"; +const char* pstd_build_git_date = "pstd_build_git_date:2023-03-27"; +const char* pstd_build_compile_date = __DATE__; diff --git a/tools/pika_migrate/src/pstd/src/build_version.cc.in b/tools/pika_migrate/src/pstd/src/build_version.cc.in new file mode 100644 index 0000000000..f6befc6c3b --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/build_version.cc.in @@ -0,0 +1,4 @@ +#include "pstd/include/version.h" +const char* pstd_build_git_sha = "pstd_build_git_sha:@@GIT_SHA@@"; +const char* pstd_build_git_date = "pstd_build_git_date:@@GIT_DATE_TIME@@"; +const char* pstd_build_compile_date = __DATE__; diff --git a/tools/pika_migrate/src/pstd/src/build_version.h b/tools/pika_migrate/src/pstd/src/build_version.h new file mode 100644 index 0000000000..c8b7804e62 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/build_version.h @@ -0,0 +1,17 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once + +// this variable tells us about the git revision +extern const char* pstd_build_git_sha; + +// Date on which the code was compiled: +extern const char* pstd_build_compile_date; diff --git a/tools/pika_migrate/src/pstd/src/env.cc b/tools/pika_migrate/src/pstd/src/env.cc new file mode 100644 index 0000000000..1abfe35cf2 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/env.cc @@ -0,0 +1,689 @@ +#include "pstd/include/env.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if __has_include() +#include +namespace filesystem = std::filesystem; +#elif __has_include() +#include +namespace filesystem = std::experimental::filesystem; +#endif + +#include + +namespace pstd { + +/* + * Set the resource limits of a process + */ + +/* + * 0: success. + * -1: set failed. + * -2: get resource limits failed. + */ +const size_t kPageSize = getpagesize(); + +int SetMaxFileDescriptorNum(int64_t max_file_descriptor_num) { + // Try to Set the number of file descriptor + struct rlimit limit; + if (getrlimit(RLIMIT_NOFILE, &limit) != -1) { + if (limit.rlim_cur < static_cast(max_file_descriptor_num)) { + // rlim_cur could be set by any user while rlim_max are + // changeable only by root. + limit.rlim_cur = max_file_descriptor_num; + if (limit.rlim_cur > limit.rlim_max) { + limit.rlim_max = max_file_descriptor_num; + } + if (setrlimit(RLIMIT_NOFILE, &limit) != -1) { + return 0; + } else { + return -1; + }; + } else { + return 0; + } + } else { + return -2; + } +} + +/* + * size of initial mmap size + */ +size_t kMmapBoundSize = 1024 * 1024 * 4; + +void SetMmapBoundSize(size_t size) { kMmapBoundSize = size; } + +static Status IOError(const std::string& context, int err_number) { + return Status::IOError(context, strerror(err_number)); +} + +int CreateDir(const std::string& path) { + try { + if (filesystem::create_directory(path)) { + return 0; + } + } catch (const filesystem::filesystem_error& e) { + LOG(WARNING) << e.what(); + } catch (const std::exception& e) { + LOG(WARNING) << e.what(); + } + return -1; +} + +bool FileExists(const std::string& path) { + try { + return filesystem::exists(path); + } catch (const filesystem::filesystem_error& e) { + LOG(WARNING) << e.what(); + } catch (const std::exception& e) { + LOG(WARNING) << e.what(); + } + return false; +} + +bool DeleteFile(const std::string& fname) { + try { + return filesystem::remove(fname); + } catch (const filesystem::filesystem_error& e) { + LOG(WARNING) << e.what(); + } catch (const std::exception& e) { + LOG(WARNING) << e.what(); + } + return false; +} + +/** + ** CreatePath - ensure all directories in path exist + ** Algorithm takes the pessimistic view and works top-down to ensure + ** each directory in path exists, rather than optimistically creating + ** the last element and working backwards. + */ +int CreatePath(const std::string& path, mode_t mode) { + try { + if (!filesystem::create_directories(path)) { + return -1; + } + filesystem::permissions(path, static_cast(mode)); + return 0; + } catch (const filesystem::filesystem_error& e) { + LOG(WARNING) << e.what(); + } catch (const std::exception& e) { + LOG(WARNING) << e.what(); + } + + return -1; +} + +int GetChildren(const std::string& dir, std::vector& result) { + result.clear(); + if (filesystem::is_empty(dir)) { + return -1; + } + for (auto& de : filesystem::directory_iterator(dir)) { + result.emplace_back(de.path().filename()); + } + return 0; +} + +void GetDescendant(const std::string& dir, std::vector& result) { + result.clear(); + for (auto& de : filesystem::recursive_directory_iterator(dir)) { + result.emplace_back(de.path()); + } +} + +int RenameFile(const std::string& oldname, const std::string& newname) { + try { + filesystem::rename(oldname, newname); + return 0; + } catch (const filesystem::filesystem_error& e) { + LOG(WARNING) << e.what(); + } catch (const std::exception& e) { + LOG(WARNING) << e.what(); + } + return -1; +} + +int IsDir(const std::string& path) { + std::error_code ec; + if (filesystem::is_directory(path, ec)) { + return 0; + } else if (filesystem::is_regular_file(path, ec)) { + return 1; + } + return -1; +} + +int DeleteDir(const std::string& path) { + try { + if (filesystem::remove_all(path) == 0) { + return -1; + } + return 0; + } catch (const filesystem::filesystem_error& e) { + LOG(WARNING) << e.what(); + } catch (const std::exception& e) { + LOG(WARNING) << e.what(); + } + return -1; +} + +bool DeleteDirIfExist(const std::string& path) { + return !(IsDir(path) == 0 && DeleteDir(path) != 0); +} + +uint64_t Du(const std::string& path) { + uint64_t sum = 0; + try { + if (!filesystem::exists(path)) { + return 0; + } + if (filesystem::is_symlink(path)) { + filesystem::path symlink_path = filesystem::read_symlink(path); + sum = Du(symlink_path); + } else if (filesystem::is_directory(path)) { + for (const auto& entry : filesystem::directory_iterator(path)) { + if (entry.is_symlink()) { + sum += Du(filesystem::read_symlink(entry.path())); + } else if (entry.is_directory()) { + sum += Du(entry.path()); + } else if (entry.is_regular_file()) { + sum += entry.file_size(); + } + } + } else if (filesystem::is_regular_file(path)) { + sum = filesystem::file_size(path); + } + } catch (const filesystem::filesystem_error& ex) { + LOG(WARNING) << "Error accessing path: " << ex.what(); + } + + return sum; +} + +TimeType NowMicros() { + auto now = std::chrono::system_clock::now(); + return std::chrono::duration_cast(now.time_since_epoch()).count(); +} + +TimeType NowMillis() { + auto now = std::chrono::system_clock::now(); + return std::chrono::duration_cast(now.time_since_epoch()).count(); +} + +void SleepForMicroseconds(int micros) { std::this_thread::sleep_for(std::chrono::microseconds(micros)); } + +SequentialFile::~SequentialFile() = default; + +class PosixSequentialFile : public SequentialFile { + private: + std::string filename_; + FILE* file_ = nullptr; + + public: + virtual void setUnBuffer() { setbuf(file_, nullptr); } + + PosixSequentialFile(std::string fname, FILE* f) : filename_(std::move(fname)), file_(f) { setbuf(file_, nullptr); } + + ~PosixSequentialFile() override { + if (file_) { + fclose(file_); + } + } + + Status Read(size_t n, Slice* result, char* scratch) override { + Status s; + size_t r = fread(scratch, 1, n, file_); + + *result = Slice(scratch, r); + + if (r < n) { + if (feof(file_) != 0) { + s = Status::EndFile(filename_, "end file"); + // We leave status as ok if we hit the end of the file + } else { + // A partial read with an error: return a non-ok status + s = IOError(filename_, errno); + } + } + return s; + } + + Status Skip(uint64_t n) override { + if (fseek(file_, static_cast(n), SEEK_CUR) != 0) { + return IOError(filename_, errno); + } + return Status::OK(); + } + + char* ReadLine(char* buf, int n) override { return fgets(buf, n, file_); } + + virtual Status Close() { + if (fclose(file_) != 0) { + return IOError(filename_, errno); + } + file_ = nullptr; + return Status::OK(); + } +}; + +WritableFile::~WritableFile() = default; + +// We preallocate up to an extra megabyte and use memcpy to append new +// data to the file. This is safe since we either properly close the +// file before reading from it, or for log files, the reading code +// knows enough to skip zero suffixes. +class PosixMmapFile : public WritableFile { + private: + std::string filename_; + int fd_ = -1; + size_t page_size_ = 0; + size_t map_size_ = 0; // How much extra memory to map at a time + char* base_ = nullptr; // The mapped region + char* limit_ = nullptr; // Limit of the mapped region + char* dst_ = nullptr; // Where to write next (in range [base_,limit_]) + char* last_sync_ = nullptr; // Where have we synced up to + uint64_t file_offset_ = 0; // Offset of base_ in file + uint64_t write_len_ = 0; // The data that written in the file + + // Have we done an munmap of unsynced data? + bool pending_sync_ = false; + + // Roundup x to a multiple of y + static size_t Roundup(size_t x, size_t y) { return ((x + y - 1) / y) * y; } + + static size_t TrimDown(size_t x, size_t y) { return (x / y) * y; } + size_t TruncateToPageBoundary(size_t s) { + s -= (s & (page_size_ - 1)); + assert((s % page_size_) == 0); + return s; + } + + bool UnmapCurrentRegion() { + bool result = true; + if (base_) { + if (last_sync_ < limit_) { + // Defer syncing this data until next Sync() call, if any + pending_sync_ = true; + } + if (munmap(base_, limit_ - base_) != 0) { + result = false; + } + file_offset_ += limit_ - base_; + base_ = nullptr; + limit_ = nullptr; + last_sync_ = nullptr; + dst_ = nullptr; + + // Increase the amount we map the next time, but capped at 1MB + if (map_size_ < (1 << 20)) { + map_size_ *= 2; + } + } + return result; + } + + bool MapNewRegion() { + assert(base_ == nullptr); +#if defined(__APPLE__) + if (ftruncate(fd_, file_offset_ + map_size_) != 0) { +#else + if (posix_fallocate(fd_, static_cast(file_offset_), static_cast(map_size_)) != 0) { +#endif + LOG(WARNING) << "ftruncate error"; + return false; + } + void* ptr = mmap(nullptr, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, static_cast(file_offset_)); + if (ptr == MAP_FAILED) { // NOLINT + LOG(WARNING) << "mmap failed"; + return false; + } + base_ = reinterpret_cast(ptr); + limit_ = base_ + map_size_; + dst_ = base_ + write_len_; + write_len_ = 0; + last_sync_ = base_; + return true; + } + + public: + PosixMmapFile(std::string fname, int fd, size_t page_size, uint64_t write_len = 0) + : filename_(std::move(fname)), + fd_(fd), + page_size_(page_size), + map_size_(Roundup(kMmapBoundSize, page_size)), + + write_len_(write_len) + { + if (write_len_ != 0) { + while (map_size_ < write_len_) { + map_size_ += (1024 * 1024); + } + } + assert((page_size & (page_size - 1)) == 0); + } + + ~PosixMmapFile() override { + if (fd_ >= 0) { + PosixMmapFile::Close(); + } + } + + Status Append(const Slice& data) override { + const char* src = data.data(); + size_t left = data.size(); + while (left > 0) { + assert(base_ <= dst_); + assert(dst_ <= limit_); + size_t avail = limit_ - dst_; + if (!avail) { + if (!UnmapCurrentRegion() || !MapNewRegion()) { + return IOError(filename_, errno); + } + } + size_t n = (left <= avail) ? left : avail; + memcpy(dst_, src, n); + dst_ += n; + src += n; + left -= n; + } + return Status::OK(); + } + + Status Close() override { + Status s; + size_t unused = limit_ - dst_; + if (!UnmapCurrentRegion()) { + s = IOError(filename_, errno); + } else if (unused > 0) { + // Trim the extra space at the end of the file + if (ftruncate(fd_, static_cast(file_offset_ - unused)) < 0) { + s = IOError(filename_, errno); + } + } + + if (close(fd_) < 0) { + if (s.ok()) { + s = IOError(filename_, errno); + } + } + + fd_ = -1; + base_ = nullptr; + limit_ = nullptr; + return s; + } + + Status Flush() override { return Status::OK(); } + + Status Sync() override { + Status s; + + if (pending_sync_) { + // Some unmapped data was not synced + pending_sync_ = false; +#if defined(__APPLE__) + if (fsync(fd_) < 0) { +#else + if (fdatasync(fd_) < 0) { +#endif + s = IOError(filename_, errno); + } + } + + if (dst_ > last_sync_) { + // Find the beginnings of the pages that contain the first and last + // bytes to be synced. + size_t p1 = TruncateToPageBoundary(last_sync_ - base_); + size_t p2 = TruncateToPageBoundary(dst_ - base_ - 1); + last_sync_ = dst_; + if (msync(base_ + p1, p2 - p1 + page_size_, MS_SYNC) < 0) { + s = IOError(filename_, errno); + } + } + + return s; + } + + Status Trim(uint64_t target) override { + if (!UnmapCurrentRegion()) { + return IOError(filename_, errno); + } + + file_offset_ = target; + + if (!MapNewRegion()) { + return IOError(filename_, errno); + } + return Status::OK(); + } + + uint64_t Filesize() override { return write_len_ + file_offset_ + (dst_ - base_); } +}; + +RWFile::~RWFile() = default; + +class MmapRWFile : public RWFile { + public: + MmapRWFile(std::string fname, int fd, size_t page_size) + : filename_(std::move(fname)), fd_(fd), page_size_(page_size), map_size_(Roundup(65536, page_size)) { + DoMapRegion(); + } + + ~MmapRWFile() override { + if (fd_ >= 0) { + munmap(base_, map_size_); + } + } + + bool DoMapRegion() { +#if defined(__APPLE__) + if (ftruncate(fd_, map_size_) != 0) { +#else + if (posix_fallocate(fd_, 0, static_cast(map_size_)) != 0) { +#endif + return false; + } + void* ptr = mmap(nullptr, map_size_, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0); + if (ptr == MAP_FAILED) { // NOLINT + return false; + } + base_ = reinterpret_cast(ptr); + return true; + } + + char* GetData() override { return base_; } + char* base() { return base_; } + + private: + static size_t Roundup(size_t x, size_t y) { return ((x + y - 1) / y) * y; } + std::string filename_; + int fd_ = -1; + size_t page_size_[[maybe_unused]] = 0; + size_t map_size_ = 0; + char* base_ = nullptr; +}; + +class PosixRandomRWFile : public RandomRWFile { + private: + const std::string filename_; + int fd_ = -1; + bool pending_sync_ = false; + bool pending_fsync_ = false; + // bool fallocate_with_keep_size_; + + public: + PosixRandomRWFile(std::string fname, int fd) + : filename_(std::move(fname)), fd_(fd) { + // fallocate_with_keep_size_ = options.fallocate_with_keep_size; + } + + ~PosixRandomRWFile() override { + if (fd_ >= 0) { + // TODO(clang-tidy): Call virtual method during destruction bypasses virtual dispatch + // So I disabled next line clang-tidy check simply temporarily. + Close(); // NOLINT + } + } + + Status Write(uint64_t offset, const Slice& data) override { + const char* src = data.data(); + size_t left = data.size(); + Status s; + pending_sync_ = true; + pending_fsync_ = true; + + while (left != 0) { + ssize_t done = pwrite(fd_, src, left, static_cast(offset)); + if (done < 0) { + if (errno == EINTR) { + continue; + } + return IOError(filename_, errno); + } + + left -= done; + src += done; + offset += done; + } + + return Status::OK(); + } + + Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const override { + Status s; + ssize_t r = -1; + size_t left = n; + char* ptr = scratch; + while (left > 0) { + r = pread(fd_, ptr, left, static_cast(offset)); + if (r <= 0) { + if (errno == EINTR) { + continue; + } + break; + } + ptr += r; + offset += r; + left -= r; + } + *result = Slice(scratch, (r < 0) ? 0 : n - left); + if (r < 0) { + s = IOError(filename_, errno); + } + return s; + } + + Status Close() override { + Status s = Status::OK(); + if (fd_ >= 0 && close(fd_) < 0) { + s = IOError(filename_, errno); + } + fd_ = -1; + return s; + } + + Status Sync() override { +#if defined(__APPLE__) + if (pending_sync_ && fsync(fd_) < 0) { +#else + if (pending_sync_ && fdatasync(fd_) < 0) { +#endif + return IOError(filename_, errno); + } + pending_sync_ = false; + return Status::OK(); + } + + Status Fsync() override { + if (pending_fsync_ && fsync(fd_) < 0) { + return IOError(filename_, errno); + } + pending_fsync_ = false; + pending_sync_ = false; + return Status::OK(); + } + + // virtual Status Allocate(off_t offset, off_t len) override { + // TEST_KILL_RANDOM(rocksdb_kill_odds); + // int alloc_status = fallocate( + // fd_, fallocate_with_keep_size_ ? FALLOC_FL_KEEP_SIZE : 0, offset, len); + // if (alloc_status == 0) { + // return Status::OK(); + // } else { + // return IOError(filename_, errno); + // } + // } +}; + +Status NewSequentialFile(const std::string& fname, std::unique_ptr& result) { + FILE* f = fopen(fname.c_str(), "r"); + if (!f) { + return IOError(fname, errno); + } else { + result = std::make_unique(fname, f); + return Status::OK(); + } +} + +Status NewWritableFile(const std::string& fname, std::unique_ptr& result) { + Status s; + const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC | O_CLOEXEC, 0644); + if (fd < 0) { + s = IOError(fname, errno); + } else { + result = std::make_unique(fname, fd, kPageSize); + } + return s; +} + +Status NewRWFile(const std::string& fname, std::unique_ptr& result) { + Status s; + const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_CLOEXEC, 0644); + if (fd < 0) { + s = IOError(fname, errno); + } else { + result = std::make_unique(fname, fd, kPageSize); + } + return s; +} + +Status AppendWritableFile(const std::string& fname, std::unique_ptr& result, uint64_t write_len) { + Status s; + const int fd = open(fname.c_str(), O_RDWR | O_CLOEXEC, 0644); + if (fd < 0) { + s = IOError(fname, errno); + } else { + result = std::make_unique(fname, fd, kPageSize, write_len); + } + return s; +} + +Status NewRandomRWFile(const std::string& fname, std::unique_ptr& result) { + Status s; + const int fd = open(fname.c_str(), O_CREAT | O_RDWR, 0644); + if (fd < 0) { + s = IOError(fname, errno); + } else { + result = std::make_unique(fname, fd); + } + return s; +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/lock_mgr.cc b/tools/pika_migrate/src/pstd/src/lock_mgr.cc new file mode 100644 index 0000000000..e3f1e68f6f --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/lock_mgr.cc @@ -0,0 +1,177 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __STDC_FORMAT_MACROS +# define __STDC_FORMAT_MACROS +#endif + +#include "pstd/include/lock_mgr.h" + +#include +#include +#include +#include +#include + +#include "pstd/include/mutex.h" + +namespace pstd::lock { + +struct LockMapStripe { + explicit LockMapStripe(const std::shared_ptr& factory) { + stripe_mutex = factory->AllocateMutex(); + stripe_cv = factory->AllocateCondVar(); + assert(stripe_mutex); + assert(stripe_cv); + } + + // Mutex must be held before modifying keys map + std::shared_ptr stripe_mutex; + + // Condition Variable per stripe for waiting on a lock + std::shared_ptr stripe_cv; + + // Locked keys + std::unordered_set keys; +}; + +// Map of #num_stripes LockMapStripes +struct LockMap { + explicit LockMap(size_t num_stripes, const std::shared_ptr& factory) : num_stripes_(num_stripes) { + lock_map_stripes_.reserve(num_stripes); + for (size_t i = 0; i < num_stripes; i++) { + auto stripe = std::make_shared(factory); + lock_map_stripes_.push_back(stripe); + } + } + + ~LockMap() = default; + + // Number of sepearate LockMapStripes to create, each with their own Mutex + const size_t num_stripes_; + + // Count of keys that are currently locked. + // (Only maintained if LockMgr::max_num_locks_ is positive.) + std::atomic lock_cnt{0}; + + std::vector> lock_map_stripes_; + + size_t GetStripe(const std::string& key) const; +}; + +size_t LockMap::GetStripe(const std::string& key) const { + assert(num_stripes_ > 0); + size_t stripe = std::hash{}(key) % num_stripes_; + return stripe; +} + +LockMgr::LockMgr(size_t default_num_stripes, int64_t max_num_locks, const std::shared_ptr& mutex_factory) + : default_num_stripes_(default_num_stripes), + max_num_locks_(max_num_locks), + mutex_factory_(mutex_factory), + lock_map_(std::make_shared(default_num_stripes, mutex_factory)) {} + +LockMgr::~LockMgr() = default; + +Status LockMgr::TryLock(const std::string& key) { +#ifdef LOCKLESS + return Status::OK(); +#else + size_t stripe_num = lock_map_->GetStripe(key); + assert(lock_map_->lock_map_stripes_.size() > stripe_num); + auto stripe = lock_map_->lock_map_stripes_.at(stripe_num); + + return Acquire(stripe, key); +#endif +} + +// Helper function for TryLock(). +Status LockMgr::Acquire(const std::shared_ptr& stripe, const std::string& key) { + Status result; + + // we wait indefinitely to acquire the lock + result = stripe->stripe_mutex->Lock(); + + if (!result.ok()) { + // failed to acquire mutex + return result; + } + + // Acquire lock if we are able to + result = AcquireLocked(stripe, key); + + if (!result.ok()) { + // If we weren't able to acquire the lock, we will keep retrying + do { + result = stripe->stripe_cv->Wait(stripe->stripe_mutex); + if (result.ok()) { + result = AcquireLocked(stripe, key); + } + } while (!result.ok()); + } + + stripe->stripe_mutex->UnLock(); + + return result; +} + +// Try to lock this key after we have acquired the mutex. +// REQUIRED: Stripe mutex must be held. +Status LockMgr::AcquireLocked(const std::shared_ptr& stripe, const std::string& key) { + Status result; + // Check if this key is already locked + if (stripe->keys.find(key) != stripe->keys.end()) { + // Lock already held + result = Status::Busy("LockTimeout"); + } else { // Lock not held. + // Check lock limit + if (max_num_locks_ > 0 && lock_map_->lock_cnt.load(std::memory_order_acquire) >= max_num_locks_) { + result = Status::Busy("LockLimit"); + } else { + // acquire lock + stripe->keys.insert(key); + + // Maintain lock count if there is a limit on the number of locks + if (max_num_locks_ != 0) { + lock_map_->lock_cnt++; + } + } + } + + return result; +} + +void LockMgr::UnLockKey(const std::string& key, const std::shared_ptr& stripe) { +#ifdef LOCKLESS +#else + auto stripe_iter = stripe->keys.find(key); + if (stripe_iter != stripe->keys.end()) { + // Found the key locked. unlock it. + stripe->keys.erase(stripe_iter); + if (max_num_locks_ > 0) { + // Maintain lock count if there is a limit on the number of locks. + assert(lock_map_->lock_cnt.load(std::memory_order_relaxed) > 0); + lock_map_->lock_cnt--; + } + } else { + // This key is either not locked or locked by someone else. + } +#endif +} + +void LockMgr::UnLock(const std::string& key) { + // Lock the mutex for the stripe that this key hashes to + size_t stripe_num = lock_map_->GetStripe(key); + assert(lock_map_->lock_map_stripes_.size() > stripe_num); + auto stripe = lock_map_->lock_map_stripes_.at(stripe_num); + + stripe->stripe_mutex->Lock(); + UnLockKey(key, stripe); + stripe->stripe_mutex->UnLock(); + + // Signal waiting threads to retry locking + stripe->stripe_cv->NotifyAll(); +} +} // namespace pstd::lock diff --git a/tools/pika_migrate/src/pstd/src/mutex_impl.cc b/tools/pika_migrate/src/pstd/src/mutex_impl.cc new file mode 100644 index 0000000000..987760d94d --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/mutex_impl.cc @@ -0,0 +1,118 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include "pstd/include/mutex.h" +#include "pstd/include/mutex_impl.h" + +namespace pstd::lock { + +class MutexImpl : public Mutex { + public: + MutexImpl() = default; + ~MutexImpl() override = default; + + Status Lock() override; + + Status TryLockFor(int64_t timeout_time) override; + + void UnLock() override { mutex_.unlock(); } + + friend class CondVarImpl; + + private: + std::mutex mutex_; +}; + +class CondVarImpl : public CondVar { + public: + CondVarImpl() = default; + ~CondVarImpl() override = default; + + Status Wait(std::shared_ptr mutex) override; + + Status WaitFor(std::shared_ptr mutex, int64_t timeout_time) override; + + void Notify() override { cv_.notify_one(); } + + void NotifyAll() override { cv_.notify_all(); } + + private: + std::condition_variable cv_; +}; + +std::shared_ptr MutexFactoryImpl::AllocateMutex() { return std::shared_ptr(new MutexImpl()); } + +std::shared_ptr MutexFactoryImpl::AllocateCondVar() { return std::shared_ptr(new CondVarImpl()); } + +Status MutexImpl::Lock() { + mutex_.lock(); + return Status::OK(); +} + +Status MutexImpl::TryLockFor(int64_t timeout_time) { + bool locked = true; + + if (timeout_time == 0) { + locked = mutex_.try_lock(); + } else { + // Previously, this code used a std::timed_mutex. However, this was changed + // due to known bugs in gcc versions < 4.9. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54562 + // + // Since this mutex isn't held for long and only a single mutex is ever + // held at a time, it is reasonable to ignore the lock timeout_time here + // and only check it when waiting on the condition_variable. + mutex_.lock(); + } + + if (!locked) { + // timeout acquiring mutex + return Status::Timeout("MutexTimeout"); + } + + return Status::OK(); +} + +Status CondVarImpl::Wait(std::shared_ptr mutex) { + auto mutex_impl = reinterpret_cast(mutex.get()); + + std::unique_lock lock(mutex_impl->mutex_, std::adopt_lock); + cv_.wait(lock); + + // Make sure unique_lock doesn't unlock mutex when it destructs + lock.release(); + + return Status::OK(); +} + +Status CondVarImpl::WaitFor(std::shared_ptr mutex, int64_t timeout_time) { + Status s; + + auto mutex_impl = reinterpret_cast(mutex.get()); + std::unique_lock lock(mutex_impl->mutex_, std::adopt_lock); + + if (timeout_time < 0) { + // If timeout is negative, do not use a timeout + cv_.wait(lock); + } else { + auto duration = std::chrono::microseconds(timeout_time); + auto cv_status = cv_.wait_for(lock, duration); + + // Check if the wait stopped due to timing out. + if (cv_status == std::cv_status::timeout) { + s = Status::Timeout("MutexTimeout"); + } + } + + // Make sure unique_lock doesn't unlock mutex when it destructs + lock.release(); + + // CV was signaled, or we spuriously woke up (but didn't time out) + return s; +} +} // namespace pstd::lock diff --git a/tools/pika_migrate/src/pstd/src/pika_codis_slot.cc b/tools/pika_migrate/src/pstd/src/pika_codis_slot.cc new file mode 100644 index 0000000000..731cf480b3 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/pika_codis_slot.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2023-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "pstd/include/pika_codis_slot.h" + +// get slot tag +static const char *GetSlotsTag(const std::string &str, int *plen) { + const char *s = str.data(); + int i, j, n = static_cast(str.length()); + for (i = 0; i < n && s[i] != '{'; i++) { + } + if (i == n) { + return nullptr; + } + i++; + for (j = i; j < n && s[j] != '}'; j++) { + } + if (j == n) { + return nullptr; + } + if (plen != nullptr) { + *plen = j - i; + } + return s + i; +} + +// get slot number of the key +CRCU32 GetSlotID(int slot_num, const std::string &str) { return GetSlotsID(slot_num, str, nullptr, nullptr); } + +// get the slot number by key +CRCU32 GetSlotsID(int slot_num, const std::string &str, CRCU32 *pcrc, int *phastag) { + const char *s = str.data(); + int taglen; int hastag = 0; + const char *tag = GetSlotsTag(str, &taglen); + if (tag == nullptr) { + tag = s, taglen = static_cast(str.length()); + } else { + hastag = 1; + } + auto crc = crc32(0L, (const Bytef*)tag, taglen); + if (pcrc != nullptr) { + *pcrc = CRCU32(crc); + } + if (phastag != nullptr) { + *phastag = hastag; + } + return static_cast(crc) % slot_num; +} diff --git a/tools/pika_migrate/src/pstd/src/posix.cc b/tools/pika_migrate/src/pstd/src/posix.cc new file mode 100644 index 0000000000..53957d99cc --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/posix.cc @@ -0,0 +1,752 @@ +#include "pstd/include/posix.h" +#include "pstd/include/xdebug.h" + +#include +#include +#include +/********************************************* + * Wrappers for Unix process control functions + ********************************************/ + +/* $begin forkwrapper */ +pid_t Fork() { + pid_t pid; + + if ((pid = fork()) < 0) { + LOG(ERROR) << "Fork error: " << strerror(errno); + } + return pid; +} +/* $end forkwrapper */ + +void Execve(const char* filename, char* const argv[], char* const envp[]) { + if (execve(filename, argv, envp) < 0) { + LOG(ERROR) << "Execve error: " << strerror(errno); + } +} + +/* $begin wait */ +pid_t Wait(int* status) { + pid_t pid; + + if ((pid = wait(status)) < 0) { + LOG(ERROR) << "Wait error: " << strerror(errno); + } + return pid; +} +/* $end wait */ + +pid_t Waitpid(pid_t pid, int* iptr, int options) { + pid_t retpid; + + if ((retpid = waitpid(pid, iptr, options)) < 0) { + LOG(ERROR) << "Waitpid error: " << strerror(errno); + } + return (retpid); +} + +/* $begin kill */ +void Kill(pid_t pid, int signum) { + int rc; + + if ((rc = kill(pid, signum)) < 0) { + LOG(ERROR) << "Kill error: " << strerror(errno); + } +} +/* $end kill */ + +void Pause() { + (void)pause(); +} + +unsigned int Sleep(unsigned int secs) { return sleep(secs); } + +unsigned int Alarm(unsigned int seconds) { return alarm(seconds); } + +void Setpgid(pid_t pid, pid_t pgid) { + int rc; + + if ((rc = setpgid(pid, pgid)) < 0) { + LOG(ERROR) << "Setpgid error: " << strerror(errno); + } +} + +pid_t Getpgrp() { return getpgrp(); } + +/************************************ + * Wrappers for Unix signal functions + ***********************************/ + +/* $begin sigaction */ +handler_t* Signal(int signum, handler_t* handler) { + struct sigaction action; + struct sigaction old_action; + + action.sa_handler = handler; + sigemptyset(&action.sa_mask); /* block sigs of type being handled */ + action.sa_flags = SA_RESTART; /* restart syscalls if possible */ + + if (sigaction(signum, &action, &old_action) < 0) { + LOG(ERROR) << "Signal error: " << strerror(errno); + } + return (old_action.sa_handler); +} +/* $end sigaction */ + +void Sigprocmask(int how, const sigset_t* set, sigset_t* oldset) { + if (sigprocmask(how, set, oldset) < 0) { + LOG(ERROR) << "Sigprocmask error: " << strerror(errno); + } +} + +void Sigemptyset(sigset_t* set) { + if (sigemptyset(set) < 0) { + LOG(ERROR) << "Sigemptyset error: " << strerror(errno); + } +} + +void Sigfillset(sigset_t* set) { + if (sigfillset(set) < 0) { + LOG(ERROR) << "Sigfillset error: " << strerror(errno); + } +} + +void Sigaddset(sigset_t* set, int signum) { + if (sigaddset(set, signum) < 0) { + LOG(ERROR) << "Sigaddset error: " << strerror(errno); + } +} + +void Sigdelset(sigset_t* set, int signum) { + if (sigdelset(set, signum) < 0) { + LOG(ERROR) << "Sigdelset error: " << strerror(errno); + } +} + +int Sigismember(const sigset_t* set, int signum) { + int rc; + if (rc = sigismember(set, signum); rc < 0) { + LOG(ERROR) << "Sigismember error: " << strerror(errno); + } + return rc; +} + +/******************************** + * Wrappers for Unix I/O routines + ********************************/ + +int Open(const char* pathname, int flags, mode_t mode) { + int rc; + + if ((rc = open(pathname, flags, mode)) < 0) { + LOG(ERROR) << "Open error: " << strerror(errno); + } + return rc; +} + +ssize_t Read(int fd, void* buf, size_t count) { + ssize_t rc; + + if ((rc = read(fd, buf, count)) < 0) { + LOG(ERROR) << "Read error: " << strerror(errno); + } + return rc; +} + +ssize_t Write(int fd, const void* buf, size_t count) { + ssize_t rc; + + if ((rc = write(fd, buf, count)) < 0) { + LOG(ERROR) << "Write error: " << strerror(errno); + } + return rc; +} + +off_t Lseek(int fildes, off_t offset, int whence) { + off_t rc; + + if ((rc = lseek(fildes, offset, whence)) < 0) { + LOG(ERROR) << "Lseek error: " << strerror(errno); + } + return rc; +} + +void Close(int fd) { + int rc; + + if ((rc = close(fd)) < 0) { + LOG(ERROR) << "Close error: " << strerror(errno); + } +} + +int Select(int n, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, struct timeval* timeout) { + int rc; + + if ((rc = select(n, readfds, writefds, exceptfds, timeout)) < 0) { + LOG(ERROR) << "Select error: " << strerror(errno); + } + return rc; +} + +int Dup2(int fd1, int fd2) { + int rc; + + if ((rc = dup2(fd1, fd2)) < 0) { + LOG(ERROR) << "Dup2 error: " << strerror(errno); + } + return rc; +} + +void Stat(const char* filename, struct stat* buf) { + if (stat(filename, buf) < 0) { + LOG(ERROR) << "Stat error: " << strerror(errno); + } +} + +void Fstat(int fd, struct stat* buf) { + if (fstat(fd, buf) < 0) { + LOG(ERROR) << "Fstat error: " << strerror(errno); + } +} + +/*************************************** + * Wrappers for memory mapping functions + ***************************************/ +void* Mmap(void* addr, size_t len, int prot, int flags, int fd, off_t offset) { + void* ptr; + + if ((ptr = mmap(addr, len, prot, flags, fd, offset)) == ((void*)-1)) { // NOLINT + LOG(ERROR) << "mmap error: " << strerror(errno); + } + return (ptr); +} + +void Munmap(void* start, size_t length) { + if (munmap(start, length) < 0) { + LOG(ERROR) << "munmap error: " << strerror(errno); + } +} + +/*************************************************** + * Wrappers for dynamic storage allocation functions + ***************************************************/ + +void* Malloc(size_t size) { + void* p; + + if (!(p = malloc(size))) { + LOG(ERROR) << "Malloc error: " << strerror(errno); + } + return p; +} + +void* Realloc(void* ptr, size_t size) { + void* p; + + if (!(p = realloc(ptr, size))) { + LOG(ERROR) << "Realloc error: " << strerror(errno); + } + return p; +} + +void* Calloc(size_t nmemb, size_t size) { + void* p; + + if (!(p = calloc(nmemb, size))) { + LOG(ERROR) << "Calloc error: " << strerror(errno); + } + return p; +} + +void Free(void* ptr) { free(ptr); } + +/****************************************** + * Wrappers for the Standard I/O functions. + ******************************************/ +void Fclose(FILE* fp) { + if (fclose(fp) != 0) { + LOG(ERROR) << "Fclose error: " << strerror(errno); + } +} + +FILE* Fdopen(int fd, const char* type) { + FILE* fp; + + if (!(fp = fdopen(fd, type))) { + LOG(ERROR) << "Fdopen error: " << strerror(errno); + } + + return fp; +} + +char* Fgets(char* ptr, int n, FILE* stream) { + char* rptr; + + if (!(rptr = fgets(ptr, n, stream)) && ferror(stream)) { + LOG(ERROR) << "Fgets error"; + } + + return rptr; +} + +FILE* Fopen(const char* filename, const char* mode) { + FILE* fp; + + if (!(fp = fopen(filename, mode))) { + LOG(ERROR) << "Fopen error: " << strerror(errno); + } + + return fp; +} + +void Fputs(const char* ptr, FILE* stream) { + if (fputs(ptr, stream) == EOF) { + LOG(ERROR) << "Fputs error: " << strerror(errno); + } +} + +size_t Fread(void* ptr, size_t size, size_t nmemb, FILE* stream) { + size_t n; + + if (((n = fread(ptr, size, nmemb, stream)) < nmemb) && (ferror(stream) != 0)) { + LOG(ERROR) << "Fread error: " << strerror(errno); + } + return n; +} + +void Fwrite(const void* ptr, size_t size, size_t nmemb, FILE* stream) { + if (fwrite(ptr, size, nmemb, stream) < nmemb) { + LOG(ERROR) << "Fwrite error: " << strerror(errno); + } +} + +/**************************** + * Sockets interface wrappers + ****************************/ + +int Socket(int domain, int type, int protocol) { + int rc; + + if ((rc = socket(domain, type, protocol)) < 0) { + LOG(ERROR) << "Socket error: " << strerror(errno); + } + return rc; +} + +void Setsockopt(int s, int level, int optname, const void* optval, int optlen) { + if (setsockopt(s, level, optname, optval, optlen) < 0) { + LOG(ERROR) << "Setsockopt error: " << strerror(errno); + } +} + +void Bind(int sockfd, struct sockaddr* my_addr, int addrlen) { + if (bind(sockfd, my_addr, addrlen) < 0) { + LOG(ERROR) << "Bind error: " << strerror(errno); + } +} + +void Listen(int s, int backlog) { + if (listen(s, backlog) < 0) { + LOG(ERROR) << "Listen error: " << strerror(errno); + } +} + +int Accept(int s, struct sockaddr* addr, socklen_t* addrlen) { + int rc; + + if (rc = accept(s, addr, addrlen); rc < 0) { + LOG(ERROR) << "Accept error: " << strerror(errno); + } + return rc; +} + +void Connect(int sockfd, struct sockaddr* serv_addr, int addrlen) { + if (connect(sockfd, serv_addr, addrlen) < 0) { + LOG(ERROR) << "Connect error: " << strerror(errno); + } +} + +/************************ + * DNS interface wrappers + ***********************/ + +/* $begin gethostbyname */ +struct hostent* Gethostbyname(const char* name) { + struct hostent* p; + + if (!(p = gethostbyname(name))) { + LOG(ERROR) << "Gethostbyname error: DNS error " << h_errno; + } + return p; +} +/* $end gethostbyname */ + +struct hostent* Gethostbyaddr(const char* addr, int len, int type) { + struct hostent* p; + + if (!(p = gethostbyaddr(addr, len, type))) { + LOG(ERROR) << "Gethostbyaddr error: DNS error " << h_errno; + } + return p; +} + +/************************************************ + * Wrappers for Pthreads thread control functions + ************************************************/ + +void Pthread_create(pthread_t* tidp, pthread_attr_t* attrp, void* (*routine)(void*), void* argp) { + int rc; + + if (rc = pthread_create(tidp, attrp, routine, argp); rc != 0) { + LOG(ERROR) << "Pthread_create error: " << strerror(rc); + } +} + +void Pthread_cancel(pthread_t tid) { + int rc; + + if (rc = pthread_cancel(tid); rc != 0) { + LOG(ERROR) << "Pthread_cancel error: " << strerror(rc); + } +} + +void Pthread_join(pthread_t tid, void** thread_return) { + int rc; + + if ((rc = pthread_join(tid, thread_return)) != 0) { + LOG(ERROR) << "Pthread_join error: " << strerror(rc); + } +} + +/* $begin detach */ +void Pthread_detach(pthread_t tid) { + int rc; + + if ((rc = pthread_detach(tid)) != 0) { + LOG(ERROR) << "Pthread_detach error: " << strerror(rc); + } +} +/* $end detach */ + +void Pthread_exit(void* retval) { pthread_exit(retval); } + +pthread_t Pthread_self() { return pthread_self(); } + +void Pthread_once(pthread_once_t* once_control, void (*init_function)()) { pthread_once(once_control, init_function); } + +/******************************* + * Wrappers for Posix semaphores + *******************************/ + +void Sem_init(sem_t* sem, int pshared, unsigned int value) { +// TODO(clang-tidy) : should use c11 cond or mutex instead of Posix sem + if (sem_init(sem, pshared, value) < 0) { // NOLINT + LOG(ERROR) << "Sem_init error: " << strerror(errno); + } +} + +void P(sem_t* sem) { + if (sem_wait(sem) < 0) { + LOG(ERROR) << "P error: " << strerror(errno); + } +} + +void V(sem_t* sem) { + if (sem_post(sem) < 0) { + LOG(ERROR) << "V error: " << strerror(errno); + } +} + +/********************************************************************* + * The Rio package - robust I/O functions + **********************************************************************/ +/* + * rio_readn - robustly read n bytes (unbuffered) + */ +/* $begin rio_readn */ +ssize_t rio_readn(int fd, void* usrbuf, size_t n) { + size_t nleft = n; + ssize_t nread; + char* bufp = static_cast(usrbuf); + + while (nleft > 0) { + if ((nread = read(fd, bufp, nleft)) < 0) { + if (errno == EINTR) { /* interrupted by sig handler return */ + nread = 0; /* and call read() again */ + } else { + return -1; /* errno set by read() */ +} + } else if (nread == 0) { + break; /* EOF */ +} + nleft -= nread; + bufp += nread; + } + return static_cast(n - nleft); /* return >= 0 */ +} +/* $end rio_readn */ + +/* + * rio_writen - robustly write n bytes (unbuffered) + */ +/* $begin rio_writen */ +ssize_t rio_writen(int fd, void* usrbuf, size_t n) { + size_t nleft = n; + ssize_t nwritten; + char* bufp = static_cast(usrbuf); + + while (nleft > 0) { + if ((nwritten = write(fd, bufp, nleft)) <= 0) { + if (errno == EINTR) { /* interrupted by sig handler return */ + nwritten = 0; /* and call write() again */ + } else { + return -1; /* errorno set by write() */ +} + } + nleft -= nwritten; + bufp += nwritten; + } + return static_cast(n); +} +/* $end rio_writen */ + +/* + * rio_read - This is a wrapper for the Unix read() function that + * transfers min(n, rio_cnt) bytes from an internal buffer to a user + * buffer, where n is the number of bytes requested by the user and + * rio_cnt is the number of unread bytes in the internal buffer. On + * entry, rio_read() refills the internal buffer via a call to + * read() if the internal buffer is empty. + */ +/* $begin rio_read */ +static ssize_t rio_read(rio_t* rp, char* usrbuf, size_t n) { + int cnt; + + while (rp->rio_cnt <= 0) { /* refill if buf is empty */ + rp->rio_cnt = static_cast(read(rp->rio_fd, rp->rio_buf, sizeof(rp->rio_buf))); + if (rp->rio_cnt < 0) { + if (errno != EINTR) { /* interrupted by sig handler return */ + return -1; +} + } else if (rp->rio_cnt == 0) { /* EOF */ + return 0; + } else { + rp->rio_bufptr = rp->rio_buf; /* reset buffer ptr */ +} + } + + /* Copy min(n, rp->rio_cnt) bytes from internal buf to user buf */ + cnt = static_cast(n); + if (rp->rio_cnt < static_cast(n)) { + cnt = rp->rio_cnt; + } + memcpy(usrbuf, rp->rio_bufptr, cnt); + rp->rio_bufptr += cnt; + rp->rio_cnt -= cnt; + return cnt; +} +/* $end rio_read */ + +/* + * rio_readinitb - Associate a descriptor with a read buffer and reset buffer + */ +/* $begin rio_readinitb */ +void rio_readinitb(rio_t* rp, int fd) { + rp->rio_fd = fd; + rp->rio_cnt = 0; + rp->rio_bufptr = rp->rio_buf; +} +/* $end rio_readinitb */ + +/* + * rio_readnb - Robustly read n bytes (buffered) + */ +/* $begin rio_readnb */ +ssize_t rio_readnb(rio_t* rp, void* usrbuf, size_t n) { + size_t nleft = n; + ssize_t nread; + char* bufp = static_cast(usrbuf); + + while (nleft > 0) { + if ((nread = rio_read(rp, bufp, nleft)) < 0) { + if (errno == EINTR) { /* interrupted by sig handler return */ + nread = 0; /* call read() again */ + } else { + return -1; /* errno set by read() */ + } + } else if (nread == 0) { + break; /* EOF */ + } + nleft -= nread; + bufp += nread; + } + return static_cast(n - nleft); /* return >= 0 */ +} +/* $end rio_readnb */ + +/* + * rio_readlineb - robustly read a text line (buffered) + */ +/* $begin rio_readlineb */ +ssize_t rio_readlineb(rio_t* rp, void* usrbuf, size_t maxlen) { + size_t n; + int rc; + char c; + char *bufp = static_cast(usrbuf); + + for (n = 1; n < maxlen; n++) { + if ((rc = static_cast(rio_read(rp, &c, 1))) == 1) { + *bufp++ = c; + if (c == '\n') { break; +} + } else if (rc == 0) { + if (n == 1) { + return 0; /* EOF, no data read */ + } else { + break; /* EOF, some data was read */ +} + } else { + return -1; /* error */ +} + } + *bufp = 0; + return static_cast(n); +} +/* $end rio_readlineb */ + +/********************************** + * Wrappers for robust I/O routines + **********************************/ +ssize_t Rio_readn(int fd, void* ptr, size_t nbytes) { + ssize_t n; + + if ((n = rio_readn(fd, ptr, nbytes)) < 0) { + LOG(ERROR) << "Rio_readn error: " << strerror(errno); + } + return n; +} + +void Rio_writen(int fd, void* usrbuf, size_t n) { + if (rio_writen(fd, usrbuf, n) != static_cast(n)) { + LOG(ERROR) << "Rio_writen error: " << strerror(errno); + } +} + +void Rio_readinitb(rio_t* rp, int fd) { rio_readinitb(rp, fd); } + +ssize_t Rio_readnb(rio_t* rp, void* usrbuf, size_t n) { + ssize_t rc; + + if ((rc = rio_readnb(rp, usrbuf, n)) < 0) { + LOG(ERROR) << "Rio_readnb error: " << strerror(errno); + } + return rc; +} + +ssize_t Rio_readlineb(rio_t* rp, void* usrbuf, size_t maxlen) { + ssize_t rc; + + if ((rc = rio_readlineb(rp, usrbuf, maxlen)) < 0) { + LOG(ERROR) << "Rio_readlineb error: " << strerror(errno); + } + return rc; +} + +/******************************** + * Client/server helper functions + ********************************/ +/* + * open_clientfd - open connection to server at + * and return a socket descriptor ready for reading and writing. + * Returns -1 and sets errno on Unix error. + * Returns -2 and sets h_errno on DNS (gethostbyname) error. + */ +/* $begin open_clientfd */ +int open_clientfd(char* hostname, int port) { + int clientfd; + struct hostent* hp; + struct sockaddr_in serveraddr; + + if ((clientfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + return -1; /* check errno for cause of error */ + } + + /* Fill in the server's IP address and port */ + if (!(hp = gethostbyname(hostname))) { + return -2; /* check h_errno for cause of error */ + } + memset(&serveraddr, 0, sizeof(serveraddr)); + serveraddr.sin_family = AF_INET; + memmove(&serveraddr.sin_addr.s_addr, hp->h_addr_list[0], hp->h_length); + serveraddr.sin_port = htons(port); + + /* Establish a connection with the server */ + if (connect(clientfd, reinterpret_cast(&serveraddr), sizeof(serveraddr)) < 0) { + return -1; + } + return clientfd; +} +/* $end open_clientfd */ + +/* + * open_listenfd - open and return a listening socket on port + * Returns -1 and sets errno on Unix error. + */ +/* $begin open_listenfd */ +int open_listenfd(int port) { + int listenfd; + int optval = 1; + struct sockaddr_in serveraddr; + + /* Create a socket descriptor */ + if ((listenfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + return -1; + } + + /* Eliminates "Address already in use" error from bind. */ + if (setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(int)) < 0) { + return -1; + } + + /* Listenfd will be an endpoint for all requests to port + on any IP address for this host */ + memset(&serveraddr, 0, sizeof(serveraddr)); + serveraddr.sin_family = AF_INET; + serveraddr.sin_addr.s_addr = htonl(INADDR_ANY); + serveraddr.sin_port = htons(static_cast(port)); + if (bind(listenfd, reinterpret_cast(&serveraddr), sizeof(serveraddr)) < 0) { + return -1; + } + + /* Make it a listening socket ready to accept connection requests */ + if (listen(listenfd, LISTENQ) < 0) { + return -1; +} + return listenfd; +} +/* $end open_listenfd */ + +/****************************************** + * Wrappers for the client/server helper routines + ******************************************/ +int Open_clientfd(char* hostname, int port) { + int rc; + + if ((rc = open_clientfd(hostname, port)) < 0) { + if (rc == -1) { + LOG(ERROR) << "Open_clientfd Unix error: " << strerror(errno); + } else { + LOG(ERROR) << "Open_clientfd DNS error: DNS error " << h_errno; + } + } + return rc; +} + +int Open_listenfd(int port) { + int rc; + + if ((rc = open_listenfd(port)) < 0) { + LOG(ERROR) << "Open_listenfd error: " << strerror(errno); + } + return rc; +} diff --git a/tools/pika_migrate/src/pstd/src/pstd_coding.cc b/tools/pika_migrate/src/pstd/src/pstd_coding.cc new file mode 100644 index 0000000000..8d2b0e67f3 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/pstd_coding.cc @@ -0,0 +1,204 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "pstd/include/pstd_coding.h" +#include "pstd/include/pstd_slice.h" + +namespace pstd { + +void EncodeFixed16(char* buf, uint16_t value) { memcpy(buf, &value, sizeof(value)); } + +void EncodeFixed32(char* buf, uint32_t value) { memcpy(buf, &value, sizeof(value)); } + +void EncodeFixed64(char* buf, uint64_t value) { memcpy(buf, &value, sizeof(value)); } + +void PutFixed16(std::string* dst, uint16_t value) { + char buf[sizeof(value)]; + EncodeFixed16(buf, value); + dst->append(buf, sizeof(buf)); +} + +void PutFixed32(std::string* dst, uint32_t value) { + char buf[sizeof(value)]; + EncodeFixed32(buf, value); + dst->append(buf, sizeof(buf)); +} + +void PutFixed64(std::string* dst, uint64_t value) { + char buf[sizeof(value)]; + EncodeFixed64(buf, value); + dst->append(buf, sizeof(buf)); +} + +char* EncodeVarint32(char* dst, uint32_t v) { + // Operate on characters as unsigneds + auto ptr = reinterpret_cast(dst); + static const int B = 128; + if (v < (1 << 7)) { + *(ptr++) = v; + } else if (v < (1 << 14)) { + *(ptr++) = v | B; + *(ptr++) = v >> 7; + } else if (v < (1 << 21)) { + *(ptr++) = v | B; + *(ptr++) = (v >> 7) | B; + *(ptr++) = v >> 14; + } else if (v < (1 << 28)) { + *(ptr++) = v | B; + *(ptr++) = (v >> 7) | B; + *(ptr++) = (v >> 14) | B; + *(ptr++) = v >> 21; + } else { + *(ptr++) = v | B; + *(ptr++) = (v >> 7) | B; + *(ptr++) = (v >> 14) | B; + *(ptr++) = (v >> 21) | B; + *(ptr++) = v >> 28; + } + return reinterpret_cast(ptr); +} + +void PutVarint32(std::string* dst, uint32_t v) { + char buf[5]; + char* ptr = EncodeVarint32(buf, v); + dst->append(buf, ptr - buf); +} + +char* EncodeVarint64(char* dst, uint64_t v) { + static const int B = 128; + auto ptr = reinterpret_cast(dst); + while (v >= B) { + *(ptr++) = (v & (B - 1)) | B; + v >>= 7; + } + *(ptr++) = static_cast(v); + return reinterpret_cast(ptr); +} + +void PutVarint64(std::string* dst, uint64_t v) { + char buf[10]; + char* ptr = EncodeVarint64(buf, v); + dst->append(buf, ptr - buf); +} + +void PutLengthPrefixedString(std::string* dst, const std::string& value) { + PutVarint32(dst, value.size()); + dst->append(value.data(), value.size()); +} + +int VarintLength(uint64_t v) { + int len = 1; + while (v >= 128) { + v >>= 7; + len++; + } + return len; +} + +const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32_t* value) { + uint32_t result = 0; + for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) { + uint32_t byte = *(reinterpret_cast(p)); + p++; + if ((byte & 128) != 0U) { + // More bytes are present + result |= ((byte & 127) << shift); + } else { + result |= (byte << shift); + *value = result; + return reinterpret_cast(p); + } + } + return nullptr; +} + +bool GetVarint32(std::string* input, uint32_t* value) { + const char* p = input->data(); + const char* limit = p + input->size(); + const char* q = GetVarint32Ptr(p, limit, value); + if (!q) { + return false; + } else { + (*input).erase(0, q - p); + return true; + } +} + +bool GetVarint32(Slice* input, uint32_t* value) { + const char* p = input->data(); + const char* limit = p + input->size(); + const char* q = GetVarint32Ptr(p, limit, value); + if (!q) { + return false; + } else { + *input = Slice(q, limit - q); + return true; + } +} + +const char* GetVarint64Ptr(const char* p, const char* limit, uint64_t* value) { + uint64_t result = 0; + for (uint32_t shift = 0; shift <= 63 && p < limit; shift += 7) { + uint64_t byte = *(reinterpret_cast(p)); + p++; + if ((byte & 128) != 0U) { + // More bytes are present + result |= ((byte & 127) << shift); + } else { + result |= (byte << shift); + *value = result; + return reinterpret_cast(p); + } + } + return nullptr; +} + +bool GetVarint64(Slice* input, uint64_t* value) { + const char* p = input->data(); + const char* limit = p + input->size(); + const char* q = GetVarint64Ptr(p, limit, value); + if (!q) { + return false; + } else { + *input = Slice(q, limit - q); + return true; + } +} + +const char* GetLengthPrefixedSlice(const char* p, const char* limit, Slice* result) { + uint32_t len; + p = GetVarint32Ptr(p, limit, &len); + if (!p) { + return nullptr; + } + if (p + len > limit) { + return nullptr; + } + *result = Slice(p, len); + return p + len; +} + +bool GetLengthPrefixedSlice(Slice* input, Slice* result) { + uint32_t len; + if (GetVarint32(input, &len) && input->size() >= len) { + *result = Slice(input->data(), len); + input->remove_prefix(len); + return true; + } else { + return false; + } +} + +bool GetLengthPrefixedString(std::string* input, std::string* result) { + uint32_t len; + if (GetVarint32(input, &len) && input->size() >= len) { + *result = (*input).substr(0, len); + input->erase(0, len); + return true; + } else { + return false; + } +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/pstd_hash.cc b/tools/pika_migrate/src/pstd/src/pstd_hash.cc new file mode 100644 index 0000000000..9fb4cba77d --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/pstd_hash.cc @@ -0,0 +1,583 @@ +/* + * Updated to C++, zedwood.com 2012 + * Based on Olivier Gay's version + * See Modified BSD License below: + * + * FIPS 180-2 SHA-224/256/384/512 implementation + * Issue date: 04/30/2005 + * http://www.ouah.org/ogay/sha2/ + * + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* MD5 + converted to C++ class by Frank Thilo (thilo@unix-ag.org) + for bzflag (http://www.bzflag.org) + + based on: + + md5.h and md5.c + reference implemantion of RFC 1321 + + Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All + rights reserved. + + License to copy and use this software is granted provided that it + is identified as the "RSA Data Security, Inc. MD5 Message-Digest + Algorithm" in all material mentioning or referencing this software + or this function. + + License is also granted to make and use derivative works provided + that such works are identified as "derived from the RSA Data + Security, Inc. MD5 Message-Digest Algorithm" in all material + mentioning or referencing the derived work. + + RSA Data Security, Inc. makes no representations concerning either + the merchantability of this software or the suitability of this + software for any particular purpose. It is provided "as is" + without express or implied warranty of any kind. + + These notices must be retained in any copies of any part of this + documentation and/or software. +*/ + +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "pstd/include/pstd_hash.h" +#include +#include +#include +#include + +namespace pstd { + +class SHA256 { + protected: + using uint8 = unsigned char; + using uint32 = unsigned int; + using uint64 = uint64_t; + + const static uint32 sha256_k[]; + static const unsigned int SHA224_256_BLOCK_SIZE = (512 / 8); + + public: + void init(); + void update(const unsigned char* message, unsigned int len); + void final(unsigned char* digest); + static const unsigned int DIGEST_SIZE = (256 / 8); + + protected: + void transform(const unsigned char* message, unsigned int block_nb); + unsigned int m_tot_len; + unsigned int m_len; + unsigned char m_block[2 * SHA224_256_BLOCK_SIZE]; + uint32 m_h[8]; +}; + +#define SHA2_SHFR(x, n) ((x) >> (n)) +#define SHA2_ROTR(x, n) (((x) >> (n)) | ((x) << ((sizeof(x) << 3) - (n)))) +#define SHA2_ROTL(x, n) (((x) << (n)) | ((x) >> ((sizeof(x) << 3) - (n)))) +#define SHA2_CH(x, y, z) (((x) & (y)) ^ (~(x) & (z))) +#define SHA2_MAJ(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) +#define SHA256_F1(x) (SHA2_ROTR(x, 2) ^ SHA2_ROTR(x, 13) ^ SHA2_ROTR(x, 22)) +#define SHA256_F2(x) (SHA2_ROTR(x, 6) ^ SHA2_ROTR(x, 11) ^ SHA2_ROTR(x, 25)) +#define SHA256_F3(x) (SHA2_ROTR(x, 7) ^ SHA2_ROTR(x, 18) ^ SHA2_SHFR(x, 3)) +#define SHA256_F4(x) (SHA2_ROTR(x, 17) ^ SHA2_ROTR(x, 19) ^ SHA2_SHFR(x, 10)) +#define SHA2_UNPACK32(x, str) \ + { \ + *((str) + 3) = (uint8)((x)); \ + *((str) + 2) = (uint8)((x) >> 8); \ + *((str) + 1) = (uint8)((x) >> 16); \ + *((str) + 0) = (uint8)((x) >> 24); \ + } +#define SHA2_PACK32(str, x) \ + { \ + *(x) = ((uint32) * ((str) + 3)) | ((uint32) * ((str) + 2) << 8) | ((uint32) * ((str) + 1) << 16) | \ + ((uint32) * ((str) + 0) << 24); \ + } + +const unsigned int SHA256::sha256_k[64] = { // UL = uint32 + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; + +void SHA256::transform(const unsigned char* message, unsigned int block_nb) { + uint32 w[64]; + uint32 wv[8]; + uint32 t1; + uint32 t2; + const unsigned char* sub_block; + int i; + int j; + for (i = 0; i < static_cast(block_nb); i++) { + sub_block = message + (i << 6); + for (j = 0; j < 16; j++) { + SHA2_PACK32(&sub_block[j << 2], &w[j]); + } + for (j = 16; j < 64; j++) { + w[j] = SHA256_F4(w[j - 2]) + w[j - 7] + SHA256_F3(w[j - 15]) + w[j - 16]; + } + for (j = 0; j < 8; j++) { + wv[j] = m_h[j]; + } + for (j = 0; j < 64; j++) { + t1 = wv[7] + SHA256_F2(wv[4]) + SHA2_CH(wv[4], wv[5], wv[6]) + sha256_k[j] + w[j]; + t2 = SHA256_F1(wv[0]) + SHA2_MAJ(wv[0], wv[1], wv[2]); + wv[7] = wv[6]; + wv[6] = wv[5]; + wv[5] = wv[4]; + wv[4] = wv[3] + t1; + wv[3] = wv[2]; + wv[2] = wv[1]; + wv[1] = wv[0]; + wv[0] = t1 + t2; + } + for (j = 0; j < 8; j++) { + m_h[j] += wv[j]; + } + } +} + +void SHA256::init() { + m_h[0] = 0x6a09e667; + m_h[1] = 0xbb67ae85; + m_h[2] = 0x3c6ef372; + m_h[3] = 0xa54ff53a; + m_h[4] = 0x510e527f; + m_h[5] = 0x9b05688c; + m_h[6] = 0x1f83d9ab; + m_h[7] = 0x5be0cd19; + m_len = 0; + m_tot_len = 0; +} + +void SHA256::update(const unsigned char* message, unsigned int len) { + unsigned int block_nb; + unsigned int new_len; + unsigned int rem_len; + unsigned int tmp_len; + const unsigned char* shifted_message; + tmp_len = SHA224_256_BLOCK_SIZE - m_len; + rem_len = len < tmp_len ? len : tmp_len; + memcpy(&m_block[m_len], message, rem_len); + if (m_len + len < SHA224_256_BLOCK_SIZE) { + m_len += len; + return; + } + new_len = len - rem_len; + block_nb = new_len / SHA224_256_BLOCK_SIZE; + shifted_message = message + rem_len; + transform(m_block, 1); + transform(shifted_message, block_nb); + rem_len = new_len % SHA224_256_BLOCK_SIZE; + memcpy(m_block, &shifted_message[block_nb << 6], rem_len); + m_len = rem_len; + m_tot_len += (block_nb + 1) << 6; +} + +void SHA256::final(unsigned char* digest) { + unsigned int block_nb; + unsigned int pm_len; + unsigned int len_b; + int i; + block_nb = (1 + static_cast((SHA224_256_BLOCK_SIZE - 9) < (m_len % SHA224_256_BLOCK_SIZE))); + len_b = (m_tot_len + m_len) << 3; + pm_len = block_nb << 6; + memset(m_block + m_len, 0, pm_len - m_len); + m_block[m_len] = 0x80; + SHA2_UNPACK32(len_b, m_block + pm_len - 4); + transform(m_block, block_nb); + for (i = 0; i < 8; i++) { + SHA2_UNPACK32(m_h[i], &digest[i << 2]); + } +} + +std::string sha256(const std::string& input, bool raw) { + unsigned char digest[SHA256::DIGEST_SIZE]; + memset(digest, 0, SHA256::DIGEST_SIZE); + + SHA256 ctx = SHA256(); + ctx.init(); + ctx.update((unsigned char*)input.c_str(), input.length()); // NOLINT + ctx.final(digest); + + if (raw) { + std::string res; + for (unsigned char i : digest) { + res.append(1, static_cast(i)); + } + return res; + } + char buf[2 * SHA256::DIGEST_SIZE + 1]; + buf[2 * SHA256::DIGEST_SIZE] = 0; + for (size_t i = 0; i < SHA256::DIGEST_SIZE; i++) { + sprintf(buf + i * 2, "%02x", digest[i]); + } + return {buf}; +} + +bool isSha256(const std::string& input) { + if (input.size() != SHA256::DIGEST_SIZE * 2) { + return false; + } + for (const auto& item : input) { + if ((item < 'a' || item > 'f') && (item < '0' || item > '9')) { + return false; + } + } + return true; +} +// MD5 hash function + +// Constants for MD5Transform routine. +#define S11 7 +#define S12 12 +#define S13 17 +#define S14 22 +#define S21 5 +#define S22 9 +#define S23 14 +#define S24 20 +#define S31 4 +#define S32 11 +#define S33 16 +#define S34 23 +#define S41 6 +#define S42 10 +#define S43 15 +#define S44 21 + +/////////////////////////////////////////////// + +// F, G, H and I are basic MD5 functions. +inline MD5::uint4 MD5::F(uint4 x, uint4 y, uint4 z) { return (x & y) | (~x & z); } + +inline MD5::uint4 MD5::G(uint4 x, uint4 y, uint4 z) { return (x & z) | (y & ~z); } + +inline MD5::uint4 MD5::H(uint4 x, uint4 y, uint4 z) { return x ^ y ^ z; } + +inline MD5::uint4 MD5::I(uint4 x, uint4 y, uint4 z) { return y ^ (x | ~z); } + +// rotate_left rotates x left n bits. +inline MD5::uint4 MD5::rotate_left(uint4 x, int n) { return (x << n) | (x >> (32 - n)); } + +// FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. +// Rotation is separate from addition to prevent recomputation. +inline void MD5::FF(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { + a = rotate_left(a + F(b, c, d) + x + ac, static_cast(s)) + b; +} + +inline void MD5::GG(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { + a = rotate_left(a + G(b, c, d) + x + ac, static_cast(s)) + b; +} + +inline void MD5::HH(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { + a = rotate_left(a + H(b, c, d) + x + ac, static_cast(s)) + b; +} + +inline void MD5::II(uint4& a, uint4 b, uint4 c, uint4 d, uint4 x, uint4 s, uint4 ac) { + a = rotate_left(a + I(b, c, d) + x + ac, static_cast(s)) + b; +} + +////////////////////////////////////////////// + +// default ctor, just initailize +MD5::MD5() { init(); } + +////////////////////////////////////////////// + +// nifty shortcut ctor, compute MD5 for string and finalize it right away +MD5::MD5(const std::string& text) { + init(); + update(text.c_str(), text.length()); + finalize(); +} + +////////////////////////////// + +void MD5::init() { + finalized = false; + + count[0] = 0; + count[1] = 0; + + // load magic initialization constants. + state[0] = 0x67452301; + state[1] = 0xefcdab89; + state[2] = 0x98badcfe; + state[3] = 0x10325476; +} + +////////////////////////////// + +// decodes input (unsigned char) into output (uint4). Assumes len is a multiple of 4. +void MD5::decode(uint4 output[], const uint1 input[], size_type len) { + for (unsigned int i = 0, j = 0; j < len; i++, j += 4) { + output[i] = (static_cast(input[j])) | ((static_cast(input[j + 1])) << 8) | + ((static_cast(input[j + 2])) << 16) | ((static_cast(input[j + 3])) << 24); + } +} + +////////////////////////////// + +// encodes input (uint4) into output (unsigned char). Assumes len is +// a multiple of 4. +void MD5::encode(uint1 output[], const uint4 input[], size_type len) { + for (size_type i = 0, j = 0; j < len; i++, j += 4) { + output[j] = input[i] & 0xff; + output[j + 1] = (input[i] >> 8) & 0xff; + output[j + 2] = (input[i] >> 16) & 0xff; + output[j + 3] = (input[i] >> 24) & 0xff; + } +} + +////////////////////////////// + +// apply MD5 algo on a block +void MD5::transform(const uint1 block[blocksize]) { + uint4 a = state[0]; + uint4 b = state[1]; + uint4 c = state[2]; + uint4 d = state[3]; + uint4 x[16]; + decode(x, block, blocksize); + + /* Round 1 */ + FF(a, b, c, d, x[0], S11, 0xd76aa478); /* 1 */ + FF(d, a, b, c, x[1], S12, 0xe8c7b756); /* 2 */ + FF(c, d, a, b, x[2], S13, 0x242070db); /* 3 */ + FF(b, c, d, a, x[3], S14, 0xc1bdceee); /* 4 */ + FF(a, b, c, d, x[4], S11, 0xf57c0faf); /* 5 */ + FF(d, a, b, c, x[5], S12, 0x4787c62a); /* 6 */ + FF(c, d, a, b, x[6], S13, 0xa8304613); /* 7 */ + FF(b, c, d, a, x[7], S14, 0xfd469501); /* 8 */ + FF(a, b, c, d, x[8], S11, 0x698098d8); /* 9 */ + FF(d, a, b, c, x[9], S12, 0x8b44f7af); /* 10 */ + FF(c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ + FF(b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ + FF(a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ + FF(d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ + FF(c, d, a, b, x[14], S13, 0xa679438e); /* 15 */ + FF(b, c, d, a, x[15], S14, 0x49b40821); /* 16 */ + + /* Round 2 */ + GG(a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */ + GG(d, a, b, c, x[6], S22, 0xc040b340); /* 18 */ + GG(c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ + GG(b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */ + GG(a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */ + GG(d, a, b, c, x[10], S22, 0x2441453); /* 22 */ + GG(c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */ + GG(b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */ + GG(a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */ + GG(d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */ + GG(c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */ + GG(b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */ + GG(a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ + GG(d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */ + GG(c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */ + GG(b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ + + /* Round 3 */ + HH(a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */ + HH(d, a, b, c, x[8], S32, 0x8771f681); /* 34 */ + HH(c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ + HH(b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */ + HH(a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */ + HH(d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */ + HH(c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */ + HH(b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ + HH(a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ + HH(d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */ + HH(c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */ + HH(b, c, d, a, x[6], S34, 0x4881d05); /* 44 */ + HH(a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */ + HH(d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ + HH(c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */ + HH(b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */ + + /* Round 4 */ + II(a, b, c, d, x[0], S41, 0xf4292244); /* 49 */ + II(d, a, b, c, x[7], S42, 0x432aff97); /* 50 */ + II(c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */ + II(b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */ + II(a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ + II(d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */ + II(c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ + II(b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */ + II(a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */ + II(d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */ + II(c, d, a, b, x[6], S43, 0xa3014314); /* 59 */ + II(b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ + II(a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */ + II(d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ + II(c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */ + II(b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */ + + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; + + // Zeroize sensitive information. + memset(x, 0, sizeof x); +} + +////////////////////////////// + +// MD5 block update operation. Continues an MD5 message-digest +// operation, processing another message block +void MD5::update(const unsigned char input[], size_type length) { + // compute number of bytes mod 64 + size_type index = count[0] / 8 % blocksize; + + // Update number of bits + if ((count[0] += (length << 3)) < (length << 3)) { + count[1]++; + } + count[1] += (length >> 29); + + // number of bytes we need to fill in buffer + size_type firstpart = 64 - index; + + size_type i; + + // transform as many times as possible. + if (length >= firstpart) { + // fill buffer first, transform + memcpy(&buffer[index], input, firstpart); + transform(buffer); + + // transform chunks of blocksize (64 bytes) + for (i = firstpart; i + blocksize <= length; i += blocksize) { + transform(&input[i]); + } + + index = 0; + } else { + i = 0; + } + + // buffer remaining input + memcpy(&buffer[index], &input[i], length - i); +} + +////////////////////////////// + +// for convenience provide a verson with signed char +void MD5::update(const char input[], size_type length) { + update(reinterpret_cast(input), length); +} + +////////////////////////////// + +// MD5 finalization. Ends an MD5 message-digest operation, writing the +// the message digest and zeroizing the context. +MD5& MD5::finalize() { + static unsigned char padding[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + if (!finalized) { + // Save number of bits + unsigned char bits[8]; + encode(bits, count, 8); + + // pad out to 56 mod 64. + size_type index = count[0] / 8 % 64; + size_type padLen = (index < 56) ? (56 - index) : (120 - index); + update(padding, padLen); + + // Append length (before padding) + update(bits, 8); + + // Store state in digest + encode(digest, state, 16); + + // Zeroize sensitive information. + memset(buffer, 0, sizeof buffer); + memset(count, 0, sizeof count); + + finalized = true; + } + + return *this; +} + +////////////////////////////// + +// return hex representation of digest as string +std::string MD5::hexdigest() const { + if (!finalized) { + return ""; + } + + char buf[33]; + for (int i = 0; i < 16; i++) { + sprintf(buf + i * 2, "%02x", digest[i]); + } + buf[32] = 0; + + return {buf}; +} + +std::string MD5::rawdigest() const { + if (!finalized) { + return ""; + } + std::string res; + for (unsigned char i : digest) { + res.append(1, static_cast(i)); + } + return res; +} + +////////////////////////////// + +std::ostream& operator<<(std::ostream& out, MD5 md5) { return out << md5.hexdigest(); } + +////////////////////////////// + +std::string md5(const std::string& str, bool raw) { + MD5 md5 = MD5(str); + + if (raw) { + return md5.rawdigest(); + } + return md5.hexdigest(); +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/pstd_mutex.cc b/tools/pika_migrate/src/pstd/src/pstd_mutex.cc new file mode 100644 index 0000000000..1734c6eedb --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/pstd_mutex.cc @@ -0,0 +1,74 @@ +#include "pstd/include/pstd_mutex.h" + +#include +#include +#include + +#include + +#include + +namespace pstd { + +void RefMutex::Ref() { refs_++; } + +void RefMutex::Unref() { + --refs_; + if (refs_ == 0) { + delete this; + } +} + +void RefMutex::Lock() { mu_.lock(); } + +void RefMutex::Unlock() { mu_.unlock(); } + +RecordMutex::~RecordMutex() { + mutex_.lock(); + + auto it = records_.begin(); + for (; it != records_.end(); it++) { + delete it->second; + } + mutex_.unlock(); +} + +void RecordMutex::Lock(const std::string& key) { + mutex_.lock(); + auto it = records_.find(key); + + if (it != records_.end()) { + RefMutex* ref_mutex = it->second; + ref_mutex->Ref(); + mutex_.unlock(); + + ref_mutex->Lock(); + } else { + auto ref_mutex = new RefMutex(); + + records_.emplace(key, ref_mutex); + ref_mutex->Ref(); + mutex_.unlock(); + + ref_mutex->Lock(); + } +} + +void RecordMutex::Unlock(const std::string& key) { + mutex_.lock(); + auto it = records_.find(key); + + if (it != records_.end()) { + RefMutex* ref_mutex = it->second; + + if (ref_mutex->IsLastRef()) { + records_.erase(it); + } + ref_mutex->Unlock(); + ref_mutex->Unref(); + } + + mutex_.unlock(); +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/pstd_status.cc b/tools/pika_migrate/src/pstd/src/pstd_status.cc new file mode 100644 index 0000000000..7cfd37d6ee --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/pstd_status.cc @@ -0,0 +1,95 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "pstd/include/pstd_status.h" +#include +#include + +namespace pstd { + +const char* Status::CopyState(const char* state) { + uint32_t size; + memcpy(&size, state, sizeof(size)); + char* result = new char[size + 5]; + memcpy(result, state, size + 5); + return result; +} + +Status::Status(Code code, const Slice& msg, const Slice& msg2) { + assert(code != kOk); + const uint32_t len1 = static_cast(msg.size()); + const uint32_t len2 = static_cast(msg2.size()); + const uint32_t size = len1 + (len2 != 0U ? (2 + len2) : 0); + char* result = new char[size + 5]; + memcpy(result, &size, sizeof(size)); + result[4] = static_cast(code); + memcpy(result + 5, msg.data(), len1); + if (len2 != 0U) { + result[5 + len1] = ':'; + result[6 + len1] = ' '; + memcpy(result + 7 + len1, msg2.data(), len2); + } + state_ = result; +} + +std::string Status::ToString() const { + if (!state_) { + return "OK"; + } else { + char tmp[30]; + const char* type; + switch (code()) { + case kOk: + type = "OK"; + break; + case kNotFound: + type = "NotFound: "; + break; + case kCorruption: + type = "Corruption: "; + break; + case kNotSupported: + type = "Not implemented: "; + break; + case kInvalidArgument: + type = "Invalid argument: "; + break; + case kIOError: + type = "IO error: "; + break; + case kEndFile: + type = "End file: "; + break; + case kIncomplete: + type = "InComplete: "; + break; + case kComplete: + type = "Complete: "; + break; + case kTimeout: + type = "Timeout: "; + break; + case kAuthFailed: + type = "AuthFailed: "; + break; + case kBusy: + type = "Busy:"; + break; + case kError: + type = ""; + break; + default: + snprintf(tmp, sizeof(tmp), "Unknown code(%d): ", static_cast(code())); + type = tmp; + break; + } + std::string result(type); + uint32_t length; + memcpy(&length, state_, sizeof(length)); + result.append(state_ + 5, length); + return result; + } +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/pstd_string.cc b/tools/pika_migrate/src/pstd/src/pstd_string.cc new file mode 100644 index 0000000000..15c7f865c4 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/pstd_string.cc @@ -0,0 +1,763 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +/* + * Copyright (c) 2009-2012, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include "pstd/include/pstd_string.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "pstd/include/pstd_defer.h" + +namespace pstd { + +/* Glob-style pattern matching. */ +int stringmatchlen(const char* pattern, int patternLen, const char* string, int stringLen, int nocase) { + while (patternLen != 0) { + switch (pattern[0]) { + case '*': + while (pattern[1] == '*') { + pattern++; + patternLen--; + } + if (patternLen == 1) { + return 1; /* match */ + } + while (stringLen != 0) { + if (stringmatchlen(pattern + 1, patternLen - 1, string, stringLen, nocase) != 0) { + return 1; /* match */ + } + string++; + stringLen--; + } + return 0; /* no match */ + break; + case '?': + if (stringLen == 0) { + return 0; /* no match */ + } + string++; + stringLen--; + break; + case '[': { + int nott; + int match; + + pattern++; + patternLen--; + nott = static_cast(pattern[0] == '^'); + if (nott != 0) { + pattern++; + patternLen--; + } + match = 0; + while (true) { + if (pattern[0] == '\\') { + pattern++; + patternLen--; + if (pattern[0] == string[0]) { + match = 1; + } + } else if (pattern[0] == ']') { + break; + } else if (patternLen == 0) { + pattern--; + patternLen++; + break; + } else if (pattern[1] == '-' && patternLen >= 3) { + int start = pattern[0]; + int end = pattern[2]; + int c = string[0]; + if (start > end) { + int t = start; + start = end; + end = t; + } + if (nocase != 0) { + start = tolower(start); + end = tolower(end); + c = tolower(c); + } + pattern += 2; + patternLen -= 2; + if (c >= start && c <= end) { + match = 1; + } + } else { + if (nocase == 0) { + if (pattern[0] == string[0]) { + match = 1; + } + } else { + if (tolower(static_cast(pattern[0])) == tolower(static_cast(string[0]))) { + match = 1; + } + } + } + pattern++; + patternLen--; + } + if (nott != 0) { + match = static_cast(match == 0); + } + if (match == 0) { + return 0; /* no match */ + } + string++; + stringLen--; + break; + } + case '\\': + if (patternLen >= 2) { + pattern++; + patternLen--; + } + /* fall through */ + default: + if (nocase == 0) { + if (pattern[0] != string[0]) { + return 0; /* no match */ + } + } else { + if (tolower(static_cast(pattern[0])) != tolower(static_cast(string[0]))) { + return 0; /* no match */ + } + } + string++; + stringLen--; + break; + } + pattern++; + patternLen--; + if (stringLen == 0) { + while (*pattern == '*') { + pattern++; + patternLen--; + } + break; + } + } + if (patternLen == 0 && stringLen == 0) { + return 1; + } + return 0; +} + +int stringmatch(const char* pattern, const char* string, int nocase) { + return stringmatchlen(pattern, static_cast(strlen(pattern)), + string, static_cast(strlen(string)), nocase); +} + +/* Convert a string representing an amount of memory into the number of + * bytes, so for instance memtoll("1Gi") will return 1073741824 that is + * (1024*1024*1024). + * + * On parsing error, if *err is not null, it's set to 1, otherwise it's + * set to 0 */ +long long memtoll(const char* p, int* err) { + const char* u; + char buf[128]; + long mul; /* unit multiplier */ + long long val; + unsigned int digits; + + if (err) { + *err = 0; + } + /* Search the first non digit character. */ + u = p; + if (*u == '-') { + u++; + } + while ((*u != 0) && (isdigit(*u) != 0)) { + u++; + } + if (*u == '\0' || (strcasecmp(u, "b") == 0)) { + mul = 1; + } else if (strcasecmp(u, "k") == 0) { + mul = 1000; + } else if (strcasecmp(u, "kb") == 0) { + mul = 1024; + } else if (strcasecmp(u, "m") == 0) { + mul = 1000 * 1000; + } else if (strcasecmp(u, "mb") == 0) { + mul = 1024 * 1024; + } else if (strcasecmp(u, "g") == 0) { + mul = 1000L * 1000 * 1000; + } else if (strcasecmp(u, "gb") == 0) { + mul = 1024L * 1024 * 1024; + } else { + if (err) { + *err = 1; + } + mul = 1; + } + digits = u - p; + if (digits >= sizeof(buf)) { + if (err) { + *err = 1; + } + return LLONG_MAX; + } + memcpy(buf, p, digits); + buf[digits] = '\0'; + val = strtoll(buf, nullptr, 10); + return val * mul; +} + +/* Return the number of digits of 'v' when converted to string in radix 10. + * See ll2string() for more information. */ +uint32_t digits10(uint64_t v) { + if (v < 10) { + return 1; + } + if (v < 100) { + return 2; + } + if (v < 1000) { + return 3; + } + if (v < 1000000000000UL) { + if (v < 100000000UL) { + if (v < 1000000) { + if (v < 10000) { + return 4; + } + return 5 + static_cast(v >= 100000); + } + return 7 + static_cast(v >= 10000000UL); + } + if (v < 10000000000UL) { + return 9 + static_cast(v >= 1000000000UL); + } + return 11 + static_cast(v >= 100000000000UL); + } + return 12 + digits10(v / 1000000000000UL); +} + +/* Convert a long long into a string. Returns the number of + * characters needed to represent the number. + * If the buffer is not big enough to store the string, 0 is returned. + * + * Based on the following article (that apparently does not provide a + * novel approach but only publicizes an already used technique): + * + * https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920 + * + * Modified in order to handle signed integers since the original code was + * designed for unsigned integers. */ +int ll2string(char* dst, size_t dstlen, long long svalue) { + static const char digits[201] = + "0001020304050607080910111213141516171819" + "2021222324252627282930313233343536373839" + "4041424344454647484950515253545556575859" + "6061626364656667686970717273747576777879" + "8081828384858687888990919293949596979899"; + int negative; + unsigned long long value; + + /* The main loop works with 64bit unsigned integers for simplicity, so + * we convert the number here and remember if it is negative. */ + if (svalue < 0) { + if (svalue != LLONG_MIN) { + value = -svalue; + } else { + value = (static_cast(LLONG_MAX) + 1); + } + negative = 1; + } else { + value = svalue; + negative = 0; + } + + /* Check length. */ + uint32_t const length = digits10(value) + negative; + if (length >= dstlen) { + return 0; + } + + /* Null term. */ + uint32_t next = length; + dst[next] = '\0'; + next--; + while (value >= 100) { + int const i = static_cast((value % 100) * 2); + value /= 100; + dst[next] = digits[i + 1]; + dst[next - 1] = digits[i]; + next -= 2; + } + + /* Handle last 1-2 digits. */ + if (value < 10) { + dst[next] = static_cast('0' + value); + } else { + auto i = static_cast(value) * 2; + dst[next] = digits[i + 1]; + dst[next - 1] = digits[i]; + } + + /* Add sign. */ + if (negative != 0) { + dst[0] = '-'; + } + return static_cast(length); +} + +/* Convert a string into a long long. Returns 1 if the string could be parsed + * into a (non-overflowing) long long, 0 otherwise. The value will be set to + * the parsed value when appropriate. */ +int string2int(const char* s, size_t slen, long long* value) { + const char* p = s; + size_t plen = 0; + int negative = 0; + unsigned long long v; + + if (plen == slen) { + return 0; + } + + /* Special case: first and only digit is 0. */ + if (slen == 1 && p[0] == '0') { + if (value) { + *value = 0; + } + return 1; + } + + if (p[0] == '-') { + negative = 1; + p++; + plen++; + + /* Abort on only a negative sign. */ + if (plen == slen) { + return 0; + } + } + + while (plen < slen && p[0] == '0') { + p++; + plen++; + } + + if (plen == slen) { + if (value) { + *value = 0; + } + return 1; + } + + /* First digit should be 1-9, otherwise the string should just be 0. */ + if (p[0] >= '1' && p[0] <= '9') { + v = p[0] - '0'; + p++; + plen++; + } else if (p[0] == '0' && slen == 1) { + *value = 0; + return 1; + } else { + return 0; + } + + while (plen < slen && p[0] >= '0' && p[0] <= '9') { + if (v > (ULLONG_MAX / 10)) { /* Overflow. */ + return 0; + } + v *= 10; + + if (v > (ULLONG_MAX - (p[0] - '0'))) { /* Overflow. */ + return 0; + } + v += p[0] - '0'; + + p++; + plen++; + } + + /* Return if not all bytes were used. */ + if (plen < slen) { + return 0; + } + + if (negative != 0) { + if (v > (static_cast(-(LLONG_MIN + 1)) + 1)) { /* Overflow. */ + return 0; + } + if (value) { + *value = static_cast(-v); + } + } else { + if (v > LLONG_MAX) { /* Overflow. */ + return 0; + } + if (value) { + *value = static_cast(v); + } + } + return 1; +} + +/* Convert a string into a long. Returns 1 if the string could be parsed into a + * (non-overflowing) long, 0 otherwise. The value will be set to the parsed + * value when appropriate. */ +int string2int(const char* s, size_t slen, long* lval) { + long long llval; + + if (string2int(s, slen, &llval) == 0) { + return 0; + } + + if (llval < LONG_MIN || llval > LONG_MAX) { + return 0; + } + + *lval = static_cast(llval); + return 1; +} + +/* Convert a string into a unsigned long. Returns 1 if the string could be parsed into a + * (non-overflowing) unsigned long, 0 otherwise. The value will be set to the parsed + * value when appropriate. */ +int string2int(const char* s, size_t slen, unsigned long* lval) { + long long llval; + + if (string2int(s, slen, &llval) == 0) { + return 0; + } + + if (llval > static_cast(ULONG_MAX)) { + return 0; + } + + *lval = static_cast(llval); + return 1; +} + +/* Convert a double to a string representation. Returns the number of bytes + * required. The representation should always be parsable by strtod(3). */ +int d2string(char* buf, size_t len, double value) { + if (std::isnan(value)) { + len = snprintf(buf, len, "nan"); + } else if (std::isinf(value)) { + if (value < 0) { + len = snprintf(buf, len, "-inf"); + } else { + len = snprintf(buf, len, "inf"); + } + } else if (value == 0) { + /* See: http://en.wikipedia.org/wiki/Signed_zero, "Comparisons". */ + if (1.0 / value < 0) { + len = snprintf(buf, len, "-0"); + } else { + len = snprintf(buf, len, "0"); + } + } else { +#if (DBL_MANT_DIG >= 52) && (LLONG_MAX == 0x7fffffffffffffffLL) + /* Check if the float is in a safe range to be casted into a + * long long. We are assuming that long long is 64 bit here. + * Also we are assuming that there are no implementations around where + * double has precision < 52 bit. + * + * Under this assumptions we test if a double is inside an interval + * where casting to long long is safe. Then using two castings we + * make sure the decimal part is zero. If all this is true we use + * integer printing function that is much faster. */ + double min = -4503599627370495; /* (2^52)-1 */ + double max = 4503599627370496; /* -(2^52) */ + if (value > min && value < max && value == (static_cast(static_cast(value)))) { + len = ll2string(buf, len, static_cast(value)); + } else // NOLINT +#endif + len = snprintf(buf, len, "%.17g", value); + } + + return static_cast(len); +} + +int string2d(const char* s, size_t slen, double* dval) { + char* pEnd; + double d = strtod(s, &pEnd); + if (pEnd != s + slen) { + return 0; + } + + if (dval) { + *dval = d; + } + return 1; +} + +/* Generate the Redis "Run ID", a SHA1-sized random number that identifies a + * given execution of Redis, so that if you are talking with an instance + * having run_id == A, and you reconnect and it has run_id == B, you can be + * sure that it is either a different instance or it was restarted. */ +std::string getRandomHexChars(const size_t len) { + FILE* fp = fopen("/dev/urandom", "r"); + DEFER { + if (fp) { + fclose(fp); + fp = nullptr; + } + }; + + char charset[] = "0123456789abcdef"; + unsigned int j{0}; + std::string buf(len, '\0'); + char* p = buf.data(); + + if (!fp || !fread(p, len, 1, fp)) { + /* If we can't read from /dev/urandom, do some reasonable effort + * in order to create some entropy, since this function is used to + * generate run_id and cluster instance IDs */ + char* x = p; + unsigned int l = len; + struct timeval tv; + pid_t pid = getpid(); + + /* Use time and PID to fill the initial array. */ + gettimeofday(&tv, nullptr); + if (l >= sizeof(tv.tv_usec)) { + memcpy(x, &tv.tv_usec, sizeof(tv.tv_usec)); + l -= sizeof(tv.tv_usec); + x += sizeof(tv.tv_usec); + } + if (l >= sizeof(tv.tv_sec)) { + memcpy(x, &tv.tv_sec, sizeof(tv.tv_sec)); + l -= sizeof(tv.tv_sec); + x += sizeof(tv.tv_sec); + } + if (l >= sizeof(pid)) { + memcpy(x, &pid, sizeof(pid)); + l -= sizeof(pid); + x += sizeof(pid); + } + /* Finally xor it with rand() output, that was already seeded with + * time() at startup. */ + for (j = 0; j < len; j++) { + p[j] = static_cast(p[j] ^ rand()); + } + } + /* Turn it into hex digits taking just 4 bits out of 8 for every byte. */ + for (j = 0; j < len; j++) { + p[j] = charset[p[j] & 0x0F]; + } + return std::string(p, len); +} + +std::vector& StringSplit(const std::string& s, char delim, std::vector& elems) { + elems.clear(); + std::stringstream ss(s); + std::string item; + while (std::getline(ss, item, delim)) { + if (!item.empty()) { + elems.push_back(item); + } + } + return elems; +} + +void StringSplit2Set(const std::string& s, char delim, std::unordered_set& elems) { + elems.clear(); + std::stringstream ss(s); + std::string item; + while (std::getline(ss, item, delim)) { + item = pstd::StringTrim(item); + if (!item.empty()) { + elems.emplace(item); + } + } +} + +std::string Set2String(const std::unordered_set& elems, char delim) { + std::string value; + for (const auto &e : elems) { + value.append(e); + value.append(1, delim); + } + if (!value.empty()) { + value.resize(value.size() - 1); + } + return value; +} + +std::string StringConcat(const std::vector& elems, char delim) { + std::string result; + auto it = elems.begin(); + while (it != elems.end()) { + result.append(*it); + result.append(1, delim); + ++it; + } + if (!result.empty()) { + result.resize(result.size() - 1); + } + return result; +} + +std::string& StringToLower(std::string& ori) { + std::transform(ori.begin(), ori.end(), ori.begin(), ::tolower); + return ori; +} + +std::string& StringToUpper(std::string& ori) { + std::transform(ori.begin(), ori.end(), ori.begin(), ::toupper); + return ori; +} + +std::string IpPortString(const std::string& ip, int port) { + if (ip.empty()) { + return {}; + } + char buf[10]; + if (ll2string(buf, sizeof(buf), port) <= 0) { + return {}; + } + return (ip + ":" + buf); +} + +std::string ToRead(const std::string& str) { + std::string read; + if (str.empty()) { + return read; + } + read.append(1, '"'); + char buf[16]; + std::string::const_iterator iter = str.begin(); + while (iter != str.end()) { + switch (*iter) { + case '\\': + case '"': + read.append(1, '\\'); + read.append(1, *iter); + break; + case '\n': + read.append("\\n"); + break; + case '\r': + read.append("\\r"); + break; + case '\t': + read.append("\\t"); + break; + case '\a': + read.append("\\a"); + break; + case '\b': + read.append("\\b"); + break; + default: + if (isprint(*iter) != 0) { + read.append(1, *iter); + } else { + snprintf(buf, sizeof(buf), "\\x%02x", static_cast(*iter)); + read.append(buf); + } + break; + } + iter++; + } + read.append(1, '"'); + return read; +} + +bool ParseIpPortString(const std::string& ip_port, std::string& ip, int& port) { + if (ip_port.empty()) { + return false; + } + size_t pos = ip_port.find(':'); + if (pos == std::string::npos) { + return false; + } + ip = ip_port.substr(0, pos); + std::string port_str = ip_port.substr(pos + 1); + long lport = 0; + if (1 != string2int(port_str.data(), port_str.size(), &lport)) { + return false; + } + port = static_cast(lport); + return true; +} + +// Trim charlist +std::string StringTrim(const std::string& ori, const std::string& charlist) { + if (ori.empty()) { + return ori; + } + + size_t pos = 0; + size_t rpos = ori.size() - 1; + while (pos < ori.size()) { + bool meet = false; + for (char c : charlist) { + if (ori.at(pos) == c) { + meet = true; + break; + } + } + if (!meet) { + break; + } + ++pos; + } + while (rpos > 0) { + bool meet = false; + for (char c : charlist) { + if (ori.at(rpos) == c) { + meet = true; + break; + } + } + if (!meet) { + break; + } + --rpos; + } + return ori.substr(pos, rpos - pos + 1); +} + +bool isspace(const std::string& str) { + return std::count_if(str.begin(), str.end(), [](unsigned char c) { return std::isspace(c); }); +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/rsync.cc b/tools/pika_migrate/src/pstd/src/rsync.cc new file mode 100644 index 0000000000..5748cfa5ac --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/rsync.cc @@ -0,0 +1,174 @@ +#include +#include +#include +#include +#include + +#include "pstd/include/env.h" +#include "pstd/include/rsync.h" +#include "pstd/include/xdebug.h" + +#ifdef __FreeBSD__ +# include +# include +#endif + +namespace pstd { +// Clean files for rsync info, such as the lock, log, pid, conf file +static bool CleanRsyncInfo(const std::string& path) { return pstd::DeleteDirIfExist(path + kRsyncSubDir); } + +int StartRsync(const std::string& raw_path, const std::string& module, const std::string& ip, const int port, + const std::string& passwd) { + // Sanity check + if (raw_path.empty() || module.empty() || passwd.empty()) { + return -1; + } + std::string path(raw_path); + if (path.back() != '/') { + path += "/"; + } + std::string rsync_path = path + kRsyncSubDir + "/"; + CreatePath(rsync_path); + + // Generate secret file + std::string secret_file(rsync_path + kRsyncSecretFile); + std::ofstream secret_stream(secret_file.c_str()); + if (!secret_stream) { + LOG(WARNING) << "Open rsync secret file failed!"; + return -1; + } + secret_stream << kRsyncUser << ":" << passwd; + secret_stream.close(); + + // Generate conf file + std::string conf_file(rsync_path + kRsyncConfFile); + std::ofstream conf_stream(conf_file.c_str()); + if (!conf_stream) { + LOG(WARNING) << "Open rsync conf file failed!"; + return -1; + } + + if (geteuid() == 0) { + conf_stream << "uid = root" << std::endl; + conf_stream << "gid = root" << std::endl; + } + conf_stream << "use chroot = no" << std::endl; + conf_stream << "max connections = 10" << std::endl; + conf_stream << "lock file = " << rsync_path + kRsyncLockFile << std::endl; + conf_stream << "log file = " << rsync_path + kRsyncLogFile << std::endl; + conf_stream << "pid file = " << rsync_path + kRsyncPidFile << std::endl; + conf_stream << "list = no" << std::endl; + conf_stream << "strict modes = no" << std::endl; + conf_stream << "auth users = " << kRsyncUser << std::endl; + conf_stream << "secrets file = " << secret_file << std::endl; + conf_stream << "[" << module << "]" << std::endl; + conf_stream << "path = " << path << std::endl; + conf_stream << "read only = no" << std::endl; + conf_stream.close(); + + // Execute rsync command + std::stringstream ss; + ss << "rsync --daemon --config=" << conf_file; + ss << " --address=" << ip; + if (port != 873) { + ss << " --port=" << port; + } + std::string rsync_start_cmd = ss.str(); + int ret = system(rsync_start_cmd.c_str()); + if (ret == 0 || (WIFEXITED(ret) && !WEXITSTATUS(ret))) { + return 0; + } + LOG(WARNING) << "Start rsync deamon failed : " << ret << "!"; + return ret; +} + +int StopRsync(const std::string& raw_path) { + // Sanity check + if (raw_path.empty()) { + LOG(WARNING) << "empty rsync path!"; + return -1; + } + std::string path(raw_path); + if (path.back() != '/') { + path += "/"; + } + + std::string pid_file(path + kRsyncSubDir + "/" + kRsyncPidFile); + if (!FileExists(pid_file)) { + LOG(WARNING) << "no rsync pid file found"; + return 0; // Rsync deamon is not exist + } + + // Kill Rsync + std::unique_ptr sequential_file; + if (!NewSequentialFile(pid_file, sequential_file).ok()) { + LOG(WARNING) << "no rsync pid file found"; + return 0; + }; + + char line[32]; + if (!(sequential_file->ReadLine(line, 32))) { + LOG(WARNING) << "read rsync pid file err"; + return 0; + }; + + pid_t pid = atoi(line); + + if (pid <= 1) { + LOG(WARNING) << "read rsync pid err"; + return 0; + } + + std::string rsync_stop_cmd = "kill -- -$(ps -o pgid -p" + std::to_string(pid) + " | grep -o '[0-9]*')"; + int ret = system(rsync_stop_cmd.c_str()); + if (ret == 0 || (WIFEXITED(ret) && !WEXITSTATUS(ret))) { + LOG(INFO) << "Stop rsync success!"; + } else { + LOG(WARNING) << "Stop rsync deamon failed : " << ret << "!"; + } + CleanRsyncInfo(path); + return ret; +} + +int RsyncSendFile(const std::string& local_file_path, const std::string& remote_file_path, + const std::string& secret_file_path, const RsyncRemote& remote) { + std::stringstream ss; + ss << "" + "rsync -avP --bwlimit=" + << remote.kbps << " --password-file=" << secret_file_path << " --port=" << remote.port << " " << local_file_path + << " " << kRsyncUser << "@" << remote.host << "::" << remote.module << "/" << remote_file_path; + std::string rsync_cmd = ss.str(); + int ret = system(rsync_cmd.c_str()); + if (ret == 0 || (WIFEXITED(ret) && !WEXITSTATUS(ret))) { + return 0; + } + LOG(WARNING) << "Rsync send file failed : " << ret << "!"; + return ret; +} + +int RsyncSendClearTarget(const std::string& local_dir_path, const std::string& remote_dir_path, + const std::string& secret_file_path, const RsyncRemote& remote) { + if (local_dir_path.empty() || remote_dir_path.empty()) { + return -2; + } + std::string local_dir(local_dir_path); + std::string remote_dir(remote_dir_path); + if (local_dir_path.back() != '/') { + local_dir.append("/"); + } + if (remote_dir_path.back() != '/') { + remote_dir.append("/"); + } + std::stringstream ss; + ss << "rsync -avP --delete --port=" << remote.port << " --password-file=" << secret_file_path << " " << local_dir + << " " << kRsyncUser << "@" << remote.host << "::" << remote.module << "/" << remote_dir; + std::string rsync_cmd = ss.str(); + int ret = system(rsync_cmd.c_str()); + if (ret == 0 || (WIFEXITED(ret) && !WEXITSTATUS(ret))) { + return 0; + } + LOG(WARNING) << "Rsync send file failed : " << ret << "!"; + return ret; +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/src/scope_record_lock.cc b/tools/pika_migrate/src/pstd/src/scope_record_lock.cc new file mode 100644 index 0000000000..4aba3e5ca0 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/scope_record_lock.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "pstd/include/scope_record_lock.h" + +namespace pstd::lock { + +MultiScopeRecordLock::MultiScopeRecordLock(const std::shared_ptr& lock_mgr, const std::vector& keys) + : lock_mgr_(lock_mgr), keys_(keys) { + std::string pre_key; + std::sort(keys_.begin(), keys_.end()); + if (!keys_.empty() && keys_[0].empty()) { + lock_mgr_->TryLock(pre_key); + } + + for (const auto& key : keys_) { + if (pre_key != key) { + lock_mgr_->TryLock(key); + pre_key = key; + } + } +} +MultiScopeRecordLock::~MultiScopeRecordLock() { + std::string pre_key; + if (!keys_.empty() && keys_[0].empty()) { + lock_mgr_->UnLock(pre_key); + } + + for (const auto& key : keys_) { + if (pre_key != key) { + lock_mgr_->UnLock(key); + pre_key = key; + } + } +} + +void MultiRecordLock::Lock(const std::vector& keys) { + std::vector internal_keys = keys; + std::sort(internal_keys.begin(), internal_keys.end()); + // init to be "" + std::string pre_key; + // consider internal_keys "" "" "a" + if (!internal_keys.empty()) { + lock_mgr_->TryLock(internal_keys.front()); + pre_key = internal_keys.front(); + } + + for (const auto& key : internal_keys) { + if (pre_key != key) { + lock_mgr_->TryLock(key); + pre_key = key; + } + } +} + +void MultiRecordLock::Unlock(const std::vector& keys) { + std::vector internal_keys = keys; + std::sort(internal_keys.begin(), internal_keys.end()); + std::string pre_key; + if (!internal_keys.empty()) { + lock_mgr_->UnLock(internal_keys.front()); + pre_key = internal_keys.front(); + } + + for (const auto& key : internal_keys) { + if (pre_key != key) { + lock_mgr_->UnLock(key); + pre_key = key; + } + } +} +} // namespace pstd::lock diff --git a/tools/pika_migrate/src/pstd/src/testutil.cc b/tools/pika_migrate/src/pstd/src/testutil.cc new file mode 100644 index 0000000000..1618fa4bf4 --- /dev/null +++ b/tools/pika_migrate/src/pstd/src/testutil.cc @@ -0,0 +1,42 @@ +#include "pstd/include/testutil.h" + +#include +#include + +#include + +#include "pstd/include/random.h" + +namespace pstd { + +void current_time_str(char * str, size_t max_len) +{ + struct timeval tv; + struct tm tmm; + + gettimeofday(&tv, nullptr); + + localtime_r(&(tv.tv_sec), &tmm); + snprintf(str, max_len, "%04d-%02d-%02dT%02d:%02d:%02d.%06ld", + tmm.tm_year + 1900, + tmm.tm_mon+1, + tmm.tm_mday, + tmm.tm_hour, + tmm.tm_min, + tmm.tm_sec, + tv.tv_usec); // NOLINT cause different between macOS and ubuntu +} + +int GetTestDirectory(std::string* result) { + const char* env = getenv("TEST_TMPDIR"); + if (env && env[0] != '\0') { + *result = env; + } else { + char buf[100]; + snprintf(buf, sizeof(buf), "/tmp/pstdtest-%d", static_cast(geteuid())); + *result = buf; + } + return 0; +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/tests/CMakeLists.txt b/tools/pika_migrate/src/pstd/tests/CMakeLists.txt new file mode 100644 index 0000000000..2b68833202 --- /dev/null +++ b/tools/pika_migrate/src/pstd/tests/CMakeLists.txt @@ -0,0 +1,35 @@ +cmake_minimum_required(VERSION 3.18) + +include(GoogleTest) +aux_source_directory(../src DIR_SRCS) +set(CMAKE_CXX_STANDARD 17) + +file(GLOB_RECURSE PSTD_TEST_SOURCE "${PROJECT_SOURCE_DIR}/tests/*.cc") + + +foreach(pstd_test_source ${PSTD_TEST_SOURCE}) + get_filename_component(pstd_test_filename ${pstd_test_source} NAME) + string(REPLACE ".cc" "" pstd_test_name ${pstd_test_filename}) + + + add_executable(${pstd_test_name} ${pstd_test_source}) + target_include_directories(${pstd_test_name} + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + + add_dependencies(${pstd_test_name} pstd gtest glog gflags ${LIBUNWIND_NAME}) + target_link_libraries(${pstd_test_name} + PUBLIC pstd + PUBLIC ${GTEST_LIBRARY} + PUBLIC ${GTEST_MAIN_LIBRARY} + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + ) + add_test(NAME ${pstd_test_name} + COMMAND ${pstd_test_name} + WORKING_DIRECTORY .) +endforeach() diff --git a/tools/pika_migrate/src/pstd/tests/base_conf_test.cc b/tools/pika_migrate/src/pstd/tests/base_conf_test.cc new file mode 100644 index 0000000000..865883e736 --- /dev/null +++ b/tools/pika_migrate/src/pstd/tests/base_conf_test.cc @@ -0,0 +1,84 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "pstd/include/base_conf.h" +#include "pstd/include/env.h" +#include "pstd/include/testutil.h" + +namespace pstd { + +class BaseConfTest : public ::testing::Test { + public: + BaseConfTest() { + GetTestDirectory(&tmpdir_); + DeleteDirIfExist(tmpdir_); + CreateDir(tmpdir_); + test_conf_ = tmpdir_ + "/test.conf"; + } + + Status CreateSampleConf() { + std::vector sample_conf = { + "test_int : 1\n", + "test_str : abkxk\n", + "test_vec : four, five, six\n", + "test_bool : yes\n", + }; + + std::unique_ptr write_file; + Status ret = NewWritableFile(test_conf_, write_file); + if (!ret.ok()) { + return ret; + } + for (std::string& item : sample_conf) { + write_file->Append(item); + } + + return Status::OK(); + } + + void ASSERT_OK(const Status& s) { ASSERT_TRUE(s.ok()); } + + protected: + std::string tmpdir_; + std::string test_conf_; +}; + +TEST_F(BaseConfTest, WriteReadConf) { + ASSERT_OK(CreateSampleConf()); + auto conf = std::make_unique(test_conf_); + ASSERT_EQ(conf->LoadConf(), 0); + + // Write configuration + ASSERT_TRUE(conf->SetConfInt("test_int", 1345)); + ASSERT_TRUE(conf->SetConfStr("test_str", "kdkbixk")); + ASSERT_TRUE(conf->SetConfStr("test_vec", "one, two, three")); + ASSERT_TRUE(conf->SetConfBool("test_bool", false)); + // Cover test + ASSERT_TRUE(conf->SetConfInt("test_int", 13985)); + ASSERT_TRUE(conf->WriteBack()); + + // Read configuration + int test_int; + std::string test_str; + bool test_bool; + std::vector values; + ASSERT_TRUE(conf->GetConfInt("test_int", &test_int)); + ASSERT_EQ(test_int, 13985); + ASSERT_TRUE(conf->GetConfStr("test_str", &test_str)); + ASSERT_EQ(test_str, "kdkbixk"); + ASSERT_TRUE(conf->GetConfBool("test_bool", &test_bool)); + ASSERT_EQ(test_bool, false); + ASSERT_TRUE(conf->GetConfStrVec("test_vec", &values)); + ASSERT_EQ(values[0], "one"); + ASSERT_EQ(values[1], "two"); + ASSERT_EQ(values[2], "three"); +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/tests/slash_coding_test.cc b/tools/pika_migrate/src/pstd/tests/slash_coding_test.cc new file mode 100644 index 0000000000..1ddbedd341 --- /dev/null +++ b/tools/pika_migrate/src/pstd/tests/slash_coding_test.cc @@ -0,0 +1,199 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include + +#include "gtest/gtest.h" +#include "pstd/include/pstd_coding.h" +#include "pstd_status.h" + +namespace pstd { + +class Coding : public ::testing::Test { + public: + void ASSERT_OK(const Status& s) { ASSERT_TRUE(s.ok()); } +}; + +TEST_F(Coding, Fixed32) { + std::string s; + for (uint32_t v = 0; v < 100000; v++) { + PutFixed32(&s, v); + } + + const char* p = s.data(); + for (uint32_t v = 0; v < 100000; v++) { + uint32_t actual = DecodeFixed32(p); + ASSERT_EQ(v, actual); + p += sizeof(uint32_t); + } +} + +TEST_F(Coding, Fixed64) { + std::string s; + for (int power = 0; power <= 63; power++) { + uint64_t v = static_cast(1) << power; + PutFixed64(&s, v - 1); + PutFixed64(&s, v + 0); + PutFixed64(&s, v + 1); + } + + const char* p = s.data(); + for (int power = 0; power <= 63; power++) { + uint64_t v = static_cast(1) << power; + uint64_t actual; + actual = DecodeFixed64(p); + ASSERT_EQ(v - 1, actual); + p += sizeof(uint64_t); + + actual = DecodeFixed64(p); + ASSERT_EQ(v + 0, actual); + p += sizeof(uint64_t); + + actual = DecodeFixed64(p); + ASSERT_EQ(v + 1, actual); + p += sizeof(uint64_t); + } +} + +// Test that encoding routines generate little-endian encodings +TEST_F(Coding, EncodingOutput) { + std::string dst; + PutFixed32(&dst, 0x04030201); + ASSERT_EQ(4, dst.size()); + ASSERT_EQ(0x01, static_cast(dst[0])); + ASSERT_EQ(0x02, static_cast(dst[1])); + ASSERT_EQ(0x03, static_cast(dst[2])); + ASSERT_EQ(0x04, static_cast(dst[3])); + + dst.clear(); + PutFixed64(&dst, 0x0807060504030201ULL); + ASSERT_EQ(8, dst.size()); + ASSERT_EQ(0x01, static_cast(dst[0])); + ASSERT_EQ(0x02, static_cast(dst[1])); + ASSERT_EQ(0x03, static_cast(dst[2])); + ASSERT_EQ(0x04, static_cast(dst[3])); + ASSERT_EQ(0x05, static_cast(dst[4])); + ASSERT_EQ(0x06, static_cast(dst[5])); + ASSERT_EQ(0x07, static_cast(dst[6])); + ASSERT_EQ(0x08, static_cast(dst[7])); +} + +TEST_F(Coding, Varint32) { + std::string s; + for (uint32_t i = 0; i < (32 * 32); i++) { + uint32_t v = (i / 32) << (i % 32); + PutVarint32(&s, v); + } + + const char* p = s.data(); + const char* limit = p + s.size(); + for (uint32_t i = 0; i < (32 * 32); i++) { + uint32_t expected = (i / 32) << (i % 32); + uint32_t actual; + const char* start = p; + p = GetVarint32Ptr(p, limit, &actual); + ASSERT_TRUE(p != nullptr); + ASSERT_EQ(expected, actual); + ASSERT_EQ(VarintLength(actual), p - start); + } + ASSERT_EQ(p, s.data() + s.size()); +} + +TEST_F(Coding, Varint64) { + // Construct the list of values to check + std::vector values; + // Some special values + values.push_back(0); + values.push_back(100); + values.push_back(~static_cast(0)); + values.push_back(~static_cast(0) - 1); + for (uint32_t k = 0; k < 64; k++) { + // Test values near powers of two + const uint64_t power = 1ULL << k; + values.push_back(power); + values.push_back(power - 1); + values.push_back(power + 1); + } + + std::string s; + for (auto value : values) { + PutVarint64(&s, value); + } + + const char* p = s.data(); + const char* limit = p + s.size(); + for (auto & value : values) { + ASSERT_TRUE(p < limit); + uint64_t actual; + const char* start = p; + p = GetVarint64Ptr(p, limit, &actual); + ASSERT_TRUE(p != nullptr); + ASSERT_EQ(value, actual); + ASSERT_EQ(VarintLength(actual), p - start); + } + ASSERT_EQ(p, limit); +} + +TEST_F(Coding, Varint32Overflow) { + uint32_t result; + std::string input("\x81\x82\x83\x84\x85\x11"); + ASSERT_TRUE(GetVarint32Ptr(input.data(), input.data() + input.size(), &result) == nullptr); +} + +TEST_F(Coding, Varint32Truncation) { + uint32_t large_value = (1U << 31) + 100; + std::string s; + PutVarint32(&s, large_value); + uint32_t result; + for (size_t len = 0; len < s.size() - 1; len++) { + ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == nullptr); + } + ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != nullptr); + ASSERT_EQ(large_value, result); +} + +TEST_F(Coding, Varint64Overflow) { + uint64_t result; + std::string input("\x81\x82\x83\x84\x85\x81\x82\x83\x84\x85\x11"); + ASSERT_TRUE(GetVarint64Ptr(input.data(), input.data() + input.size(), &result) == nullptr); +} + +TEST_F(Coding, Varint64Truncation) { + uint64_t large_value = (1ULL << 63) + 100ULL; + std::string s; + PutVarint64(&s, large_value); + uint64_t result; + for (size_t len = 0; len < s.size() - 1; len++) { + ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == nullptr); + } + ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != nullptr); + ASSERT_EQ(large_value, result); +} + +TEST_F(Coding, Strings) { + std::string s; + PutLengthPrefixedString(&s, ""); + PutLengthPrefixedString(&s, "foo"); + PutLengthPrefixedString(&s, "bar"); + PutLengthPrefixedString(&s, std::string(200, 'x')); + + Slice input(s); + Slice v; + ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); + ASSERT_EQ("", v.ToString()); + ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); + ASSERT_EQ("foo", v.ToString()); + ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); + ASSERT_EQ("bar", v.ToString()); + ASSERT_TRUE(GetLengthPrefixedSlice(&input, &v)); + ASSERT_EQ(std::string(200, 'x'), v.ToString()); + ASSERT_EQ("", input.ToString()); +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/tests/slash_env_test.cc b/tools/pika_migrate/src/pstd/tests/slash_env_test.cc new file mode 100644 index 0000000000..e2d5ca4660 --- /dev/null +++ b/tools/pika_migrate/src/pstd/tests/slash_env_test.cc @@ -0,0 +1,33 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "gtest/gtest.h" +#include "pstd/include/env.h" +#include "pstd/include/testutil.h" + +namespace pstd { + +class EnvTest : public ::testing::Test {}; + +TEST_F(EnvTest, SetMaxFileDescriptorNum) { + ASSERT_EQ(0, SetMaxFileDescriptorNum(10)); + //ASSERT_NE(0, SetMaxFileDescriptorNum(2147483647)); +} + +TEST_F(EnvTest, FileOps) { + std::string tmp_dir; + GetTestDirectory(&tmp_dir); + + ASSERT_TRUE(DeleteDirIfExist(tmp_dir)); + ASSERT_TRUE(!FileExists(tmp_dir)); + ASSERT_EQ(-1, DeleteDir(tmp_dir)); + //ASSERT_NE(0, SetMaxFileDescriptorNum(2147483647)); +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/pstd/tests/slash_string_test.cc b/tools/pika_migrate/src/pstd/tests/slash_string_test.cc new file mode 100644 index 0000000000..01e428a2e0 --- /dev/null +++ b/tools/pika_migrate/src/pstd/tests/slash_string_test.cc @@ -0,0 +1,130 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include + +#include "gtest/gtest.h" +#include "pstd/include/pstd_string.h" + +namespace pstd { + +class StringTest : public ::testing::Test {}; + +TEST_F(StringTest, StringTrim) { + ASSERT_EQ(StringTrim(" computer "), "computer"); + ASSERT_EQ(StringTrim(" comp uter "), "comp uter"); + ASSERT_EQ(StringTrim(" \n computer \n ", "\n "), "computer"); + ASSERT_EQ(StringTrim(" \n", "\r\n "), ""); +} + +TEST_F(StringTest, ParseIpPort) { + std::string ip; + int port; + ASSERT_TRUE(ParseIpPortString("192.168.1.1:9221", ip, port)); + ASSERT_EQ(ip, "192.168.1.1"); + ASSERT_EQ(port, 9221); +} + +TEST_F(StringTest, test_string2ll) { + char buf[32]; + long long v; + + /* May not start with +. */ + strcpy(buf, "+1"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); + + /* Leading space. */ + strcpy(buf, " 1"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); + + /* Trailing space. */ + strcpy(buf, "1 "); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); + + strcpy(buf, "-1"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, -1); + + strcpy(buf, "0"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, 0); + + strcpy(buf, "1"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, 1); + + strcpy(buf, "99"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, 99); + + strcpy(buf, "-99"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, -99); + + strcpy(buf, "-9223372036854775808"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, LLONG_MIN); + + strcpy(buf, "-9223372036854775809"); /* overflow */ + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); + + strcpy(buf, "9223372036854775807"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, LLONG_MAX); + + strcpy(buf, "9223372036854775808"); /* overflow */ + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); +} + +TEST_F(StringTest, test_string2l) { + char buf[32]; + long v; + + /* May not start with +. */ + strcpy(buf, "+1"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); + + strcpy(buf, "-1"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, -1); + + strcpy(buf, "0"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, 0); + + strcpy(buf, "1"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, 1); + + strcpy(buf, "99"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, 99); + + strcpy(buf, "-99"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, -99); + +#if LONG_MAX != LLONG_MAX + strcpy(buf, "-2147483648"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, LONG_MIN); + + strcpy(buf, "-2147483649"); /* overflow */ + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); + + strcpy(buf, "2147483647"); + ASSERT_EQ(string2int(buf, strlen(buf), &v), 1); + ASSERT_EQ(v, LONG_MAX); + + strcpy(buf, "2147483648"); /* overflow */ + ASSERT_EQ(string2int(buf, strlen(buf), &v), 0); +#endif +} + +} // namespace pstd diff --git a/tools/pika_migrate/src/rsync_client.cc b/tools/pika_migrate/src/rsync_client.cc new file mode 100644 index 0000000000..61fab0e0d1 --- /dev/null +++ b/tools/pika_migrate/src/rsync_client.cc @@ -0,0 +1,526 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include "rocksdb/env.h" +#include "pstd/include/pstd_defer.h" +#include "include/pika_server.h" +#include "include/rsync_client.h" + +using namespace net; +using namespace pstd; +using namespace RsyncService; + +extern PikaServer* g_pika_server; + +const int kFlushIntervalUs = 10 * 1000 * 1000; +const int kBytesPerRequest = 4 << 20; +const int kThrottleCheckCycle = 10; + +namespace rsync { +RsyncClient::RsyncClient(const std::string& dir, const std::string& db_name) + : snapshot_uuid_(""), dir_(dir), db_name_(db_name), + state_(IDLE), max_retries_(10), master_ip_(""), master_port_(0), + parallel_num_(g_pika_conf->max_rsync_parallel_num()) { + wo_mgr_.reset(new WaitObjectManager()); + client_thread_ = std::make_unique(3000, 60, wo_mgr_.get()); + client_thread_->set_thread_name("RsyncClientThread"); + work_threads_.resize(GetParallelNum()); + finished_work_cnt_.store(0); +} + +void RsyncClient::Copy(const std::set& file_set, int index) { + Status s = Status::OK(); + for (const auto& file : file_set) { + while (state_.load() == RUNNING) { + LOG(INFO) << "copy remote file, filename: " << file; + s = CopyRemoteFile(file, index); + if (!s.ok()) { + LOG(WARNING) << "copy remote file failed, msg: " << s.ToString(); + continue; + } + break; + } + if (state_.load() != RUNNING) { + break; + } + } + if (!error_stopped_.load()) { + LOG(INFO) << "work_thread index: " << index << " copy remote files done"; + } + finished_work_cnt_.fetch_add(1); + cond_.notify_all(); +} + +bool RsyncClient::Init() { + if (state_ != IDLE) { + LOG(WARNING) << "State should be IDLE when Init"; + return false; + } + master_ip_ = g_pika_server->master_ip(); + master_port_ = g_pika_server->master_port() + kPortShiftRsync2; + file_set_.clear(); + client_thread_->StartThread(); + bool ret = ComparisonUpdate(); + if (!ret) { + LOG(WARNING) << "RsyncClient recover failed"; + client_thread_->StopThread(); + state_.store(IDLE); + return false; + } + finished_work_cnt_.store(0); + LOG(INFO) << "RsyncClient recover success"; + return true; +} + +void* RsyncClient::ThreadMain() { + if (file_set_.empty()) { + LOG(INFO) << "No remote files need copy, RsyncClient exit and going to delete dir:" << dir_; + DeleteDirIfExist(dir_); + state_.store(STOP); + all_worker_exited_.store(true); + return nullptr; + } + + Status s = Status::OK(); + LOG(INFO) << "RsyncClient begin to copy remote files"; + std::vector > file_vec(GetParallelNum()); + int index = 0; + for (const auto& file : file_set_) { + file_vec[index++ % GetParallelNum()].insert(file); + } + all_worker_exited_.store(false); + for (int i = 0; i < GetParallelNum(); i++) { + work_threads_[i] = std::move(std::thread(&RsyncClient::Copy, this, file_vec[i], i)); + } + + std::string meta_file_path = GetLocalMetaFilePath(); + std::ofstream outfile; + outfile.open(meta_file_path, std::ios_base::app); + if (!outfile.is_open()) { + LOG(ERROR) << "unable to open meta file " << meta_file_path << ", error:" << strerror(errno); + error_stopped_.store(true); + state_.store(STOP); + } + DEFER { + outfile.close(); + }; + + std::string meta_rep; + uint64_t start_time = pstd::NowMicros(); + + while (state_.load() == RUNNING) { + uint64_t elapse = pstd::NowMicros() - start_time; + if (elapse < kFlushIntervalUs) { + int wait_for_us = kFlushIntervalUs - elapse; + std::unique_lock lock(mu_); + cond_.wait_for(lock, std::chrono::microseconds(wait_for_us)); + } + + if (state_.load() != RUNNING) { + break; + } + + start_time = pstd::NowMicros(); + std::map files_map; + { + std::lock_guard guard(mu_); + files_map.swap(meta_table_); + } + for (const auto& file : files_map) { + meta_rep.append(file.first + ":" + file.second); + meta_rep.append("\n"); + } + outfile << meta_rep; + outfile.flush(); + meta_rep.clear(); + + if (finished_work_cnt_.load() == GetParallelNum()) { + break; + } + } + + for (int i = 0; i < GetParallelNum(); i++) { + work_threads_[i].join(); + } + finished_work_cnt_.store(0); + state_.store(STOP); + if (!error_stopped_.load()) { + LOG(INFO) << "RsyncClient copy remote files done"; + } else { + if (DeleteDirIfExist(dir_)) { + //the dir_ doesn't not exist OR it's existing but successfully deleted + LOG(ERROR) << "RsyncClient stopped with errors, deleted:" << dir_; + } else { + //the dir_ exists but failed to delete + LOG(ERROR) << "RsyncClient stopped with errors, but failed to delete " << dir_ << " when cleaning"; + } + } + all_worker_exited_.store(true); + return nullptr; +} + +Status RsyncClient::CopyRemoteFile(const std::string& filename, int index) { + const std::string filepath = dir_ + "/" + filename; + std::unique_ptr writer(new RsyncWriter(filepath)); + Status s = Status::OK(); + size_t offset = 0; + int retries = 0; + + DEFER { + if (writer) { + writer->Close(); + writer.reset(); + } + if (!s.ok()) { + DeleteFile(filepath); + } + }; + + while (retries < max_retries_) { + if (state_.load() != RUNNING) { + break; + } + size_t copy_file_begin_time = pstd::NowMicros(); + size_t count = Throttle::GetInstance().ThrottledByThroughput(kBytesPerRequest); + if (count == 0) { + std::this_thread::sleep_for(std::chrono::milliseconds(1000 / kThrottleCheckCycle)); + continue; + } + RsyncRequest request; + request.set_reader_index(index); + request.set_type(kRsyncFile); + request.set_db_name(db_name_); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + request.set_slot_id(0); + FileRequest* file_req = request.mutable_file_req(); + file_req->set_filename(filename); + file_req->set_offset(offset); + file_req->set_count(count); + + std::string to_send; + request.SerializeToString(&to_send); + WaitObject* wo = wo_mgr_->UpdateWaitObject(index, filename, kRsyncFile, offset); + s = client_thread_->Write(master_ip_, master_port_, to_send); + if (!s.ok()) { + LOG(WARNING) << "send rsync request failed"; + continue; + } + + std::shared_ptr resp = nullptr; + s = wo->Wait(resp); + if (s.IsTimeout() || resp == nullptr) { + LOG(WARNING) << s.ToString(); + retries++; + continue; + } + + if (resp->code() != RsyncService::kOk) { + return Status::IOError("kRsyncFile request failed, master response error code"); + } + + size_t ret_count = resp->file_resp().count(); + size_t elaspe_time_us = pstd::NowMicros() - copy_file_begin_time; + Throttle::GetInstance().ReturnUnusedThroughput(count, ret_count, elaspe_time_us); + + if (resp->snapshot_uuid() != snapshot_uuid_) { + LOG(WARNING) << "receive newer dump, reset state to STOP, local_snapshot_uuid:" + << snapshot_uuid_ << ", remote snapshot uuid: " << resp->snapshot_uuid(); + state_.store(STOP); + error_stopped_.store(true); + return s; + } + + s = writer->Write((uint64_t)offset, ret_count, resp->file_resp().data().c_str()); + if (!s.ok()) { + LOG(WARNING) << "rsync client write file error"; + break; + } + + offset += resp->file_resp().count(); + if (resp->file_resp().eof()) { + s = writer->Fsync(); + if (!s.ok()) { + return s; + } + mu_.lock(); + meta_table_[filename] = ""; + mu_.unlock(); + break; + } + retries = 0; + } + + return s; +} + +Status RsyncClient::Start() { + StartThread(); + return Status::OK(); +} + +Status RsyncClient::Stop() { + if (state_ == IDLE) { + return Status::OK(); + } + LOG(WARNING) << "RsyncClient stop ..."; + state_ = STOP; + cond_.notify_all(); + StopThread(); + client_thread_->StopThread(); + JoinThread(); + client_thread_->JoinThread(); + state_ = IDLE; + return Status::OK(); +} + +bool RsyncClient::ComparisonUpdate() { + std::string local_snapshot_uuid; + std::string remote_snapshot_uuid; + std::set local_file_set; + std::set remote_file_set; + std::map local_file_map; + + Status s = PullRemoteMeta(&remote_snapshot_uuid, &remote_file_set); + if (!s.ok()) { + LOG(WARNING) << "copy remote meta failed! error:" << s.ToString(); + return false; + } + + s = LoadLocalMeta(&local_snapshot_uuid, &local_file_map); + if (!s.ok()) { + LOG(WARNING) << "load local meta failed"; + return false; + } + for (auto const& file : local_file_map) { + local_file_set.insert(file.first); + } + + std::set expired_files; + if (remote_snapshot_uuid != local_snapshot_uuid) { + snapshot_uuid_ = remote_snapshot_uuid; + file_set_ = remote_file_set; + expired_files = local_file_set; + } else { + std::set newly_files; + set_difference(remote_file_set.begin(), remote_file_set.end(), + local_file_set.begin(), local_file_set.end(), + inserter(newly_files, newly_files.begin())); + set_difference(local_file_set.begin(), local_file_set.end(), + remote_file_set.begin(), remote_file_set.end(), + inserter(expired_files, expired_files.begin())); + file_set_.insert(newly_files.begin(), newly_files.end()); + } + + s = CleanUpExpiredFiles(local_snapshot_uuid != remote_snapshot_uuid, expired_files); + if (!s.ok()) { + LOG(WARNING) << "clean up expired files failed"; + return false; + } + s = UpdateLocalMeta(snapshot_uuid_, expired_files, &local_file_map); + if (!s.ok()) { + LOG(WARNING) << "update local meta failed"; + return false; + } + + state_.store(RUNNING); + error_stopped_.store(false); + LOG(INFO) << "copy meta data done, db name: " << db_name_ + << " snapshot_uuid: " << snapshot_uuid_ + << " file count: " << file_set_.size() + << " expired file count: " << expired_files.size() + << " local file count: " << local_file_set.size() + << " remote file count: " << remote_file_set.size() + << " remote snapshot_uuid: " << remote_snapshot_uuid + << " local snapshot_uuid: " << local_snapshot_uuid + << " file_set_: " << file_set_.size(); + for_each(file_set_.begin(), file_set_.end(), + [](auto& file) {LOG(WARNING) << "file_set: " << file;}); + return true; +} + +Status RsyncClient::PullRemoteMeta(std::string* snapshot_uuid, std::set* file_set) { + Status s; + int retries = 0; + RsyncRequest request; + request.set_reader_index(0); + request.set_db_name(db_name_); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + request.set_slot_id(0); + request.set_type(kRsyncMeta); + std::string to_send; + request.SerializeToString(&to_send); + while (retries < max_retries_) { + WaitObject* wo = wo_mgr_->UpdateWaitObject(0, "", kRsyncMeta, kInvalidOffset); + s = client_thread_->Write(master_ip_, master_port_, to_send); + if (!s.ok()) { + retries++; + } + std::shared_ptr resp; + s = wo->Wait(resp); + if (s.IsTimeout()) { + LOG(WARNING) << "rsync PullRemoteMeta request timeout, " + << "retry times: " << retries; + retries++; + continue; + } + + if (resp.get() == nullptr || resp->code() != RsyncService::kOk) { + s = Status::IOError("kRsyncMeta request failed! db is not exist or doing bgsave"); + LOG(WARNING) << s.ToString() << ", retries:" << retries; + sleep(1); + retries++; + continue; + } + LOG(INFO) << "receive rsync meta infos, snapshot_uuid: " << resp->snapshot_uuid() + << "files count: " << resp->meta_resp().filenames_size(); + for (std::string item : resp->meta_resp().filenames()) { + file_set->insert(item); + } + + *snapshot_uuid = resp->snapshot_uuid(); + s = Status::OK(); + break; + } + return s; +} + +Status RsyncClient::LoadLocalMeta(std::string* snapshot_uuid, std::map* file_map) { + std::string meta_file_path = GetLocalMetaFilePath(); + if (!FileExists(meta_file_path)) { + LOG(WARNING) << kDumpMetaFileName << " not exist"; + return Status::OK(); + } + + FILE* fp; + char* line = nullptr; + size_t len = 0; + size_t read = 0; + int32_t line_num = 0; + + std::atomic_int8_t retry_times = 5; + + while (retry_times > 0) { + retry_times--; + fp = fopen(meta_file_path.c_str(), "r"); + if (fp == nullptr) { + LOG(WARNING) << "open meta file failed, meta_path: " << dir_; + } else { + break; + } + } + + // if the file cannot be read from disk, use the remote file directly + if (fp == nullptr) { + LOG(WARNING) << "open meta file failed, meta_path: " << meta_file_path << ", retry times: " << retry_times; + return Status::IOError("open meta file failed, dir: ", meta_file_path); + } + + while ((read = getline(&line, &len, fp)) != -1) { + std::string str(line); + std::string::size_type pos; + while ((pos = str.find("\r")) != std::string::npos) { + str.erase(pos, 1); + } + while ((pos = str.find("\n")) != std::string::npos) { + str.erase(pos, 1); + } + + if (str.empty()) { + continue; + } + + if (line_num == 0) { + *snapshot_uuid = str.erase(0, kUuidPrefix.size()); + } else { + if ((pos = str.find(":")) != std::string::npos) { + std::string filename = str.substr(0, pos); + std::string shecksum = str.substr(pos + 1, str.size()); + (*file_map)[filename] = shecksum; + } + } + + line_num++; + } + fclose(fp); + return Status::OK(); +} + +Status RsyncClient::CleanUpExpiredFiles(bool need_reset_path, const std::set& files) { + if (need_reset_path) { + std::string db_path = dir_ + (dir_.back() == '/' ? "" : "/"); + pstd::DeleteDirIfExist(db_path); + int db_instance_num = g_pika_conf->db_instance_num(); + for (int idx = 0; idx < db_instance_num; idx++) { + pstd::CreatePath(db_path + std::to_string(idx)); + } + return Status::OK(); + } + + std::string db_path = dir_ + (dir_.back() == '/' ? "" : "/"); + for (const auto& file : files) { + bool b = pstd::DeleteDirIfExist(db_path + file); + if (!b) { + LOG(WARNING) << "delete file failed, file: " << file; + return Status::IOError("delete file failed"); + } + } + return Status::OK(); +} + +Status RsyncClient::UpdateLocalMeta(const std::string& snapshot_uuid, const std::set& expired_files, + std::map* localFileMap) { + if (localFileMap->empty()) { + return Status::OK(); + } + + for (const auto& item : expired_files) { + localFileMap->erase(item); + } + + std::string meta_file_path = GetLocalMetaFilePath(); + pstd::DeleteFile(meta_file_path); + + std::unique_ptr file; + pstd::Status s = pstd::NewWritableFile(meta_file_path, file); + if (!s.ok()) { + LOG(WARNING) << "create meta file failed, meta_file_path: " << meta_file_path; + return s; + } + file->Append(kUuidPrefix + snapshot_uuid + "\n"); + + for (const auto& item : *localFileMap) { + std::string line = item.first + ":" + item.second + "\n"; + file->Append(line); + } + s = file->Close(); + if (!s.ok()) { + LOG(WARNING) << "flush meta file failed, meta_file_path: " << meta_file_path; + return s; + } + return Status::OK(); +} + +std::string RsyncClient::GetLocalMetaFilePath() { + std::string db_path = dir_ + (dir_.back() == '/' ? "" : "/"); + return db_path + kDumpMetaFileName; +} + +int RsyncClient::GetParallelNum() { + return parallel_num_; +} + +} // end namespace rsync + diff --git a/tools/pika_migrate/src/rsync_client_thread.cc b/tools/pika_migrate/src/rsync_client_thread.cc new file mode 100644 index 0000000000..8e93a4c69b --- /dev/null +++ b/tools/pika_migrate/src/rsync_client_thread.cc @@ -0,0 +1,45 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/rsync_client_thread.h" +#include "include/rsync_client.h" +#include "include/pika_define.h" + +using namespace pstd; +using namespace net; +using namespace RsyncService; + +namespace rsync { +class RsyncClient; +RsyncClientConn::RsyncClientConn(int fd, const std::string& ip_port, + net::Thread* thread, void* worker_specific_data, NetMultiplexer* mpx) + : PbConn(fd, ip_port, thread, mpx), cb_handler_(worker_specific_data) {} + +RsyncClientConn::~RsyncClientConn() {} + +int RsyncClientConn::DealMessage() { + RsyncResponse* response = new RsyncResponse(); + ::google::protobuf::io::ArrayInputStream input(rbuf_ + cur_pos_ - header_len_, header_len_); + ::google::protobuf::io::CodedInputStream decoder(&input); + decoder.SetTotalBytesLimit(PIKA_MAX_CONN_RBUF); + bool success = response->ParseFromCodedStream(&decoder) && decoder.ConsumedEntireMessage(); + if (!success) { + delete response; + LOG(WARNING) << "ParseFromArray FAILED! " + << " msg_len: " << header_len_; + return -1; + } + WaitObjectManager* handler = (WaitObjectManager*)cb_handler_; + handler->WakeUp(response); + return 0; +} + +RsyncClientThread::RsyncClientThread(int cron_interval, int keepalive_timeout, void* scheduler) + : ClientThread(&conn_factory_, cron_interval, keepalive_timeout, &handle_, nullptr), + conn_factory_(scheduler) {} + +RsyncClientThread::~RsyncClientThread() {} +} //end namespace rsync + diff --git a/tools/pika_migrate/src/rsync_server.cc b/tools/pika_migrate/src/rsync_server.cc new file mode 100644 index 0000000000..5696719980 --- /dev/null +++ b/tools/pika_migrate/src/rsync_server.cc @@ -0,0 +1,249 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include +#include + +#include "pstd_hash.h" +#include "include/pika_server.h" +#include "include/rsync_server.h" +#include "pstd/include/pstd_defer.h" + +extern PikaServer* g_pika_server; +namespace rsync { + +using namespace net; +using namespace pstd; +using namespace RsyncService; + +void RsyncWriteResp(RsyncService::RsyncResponse& response, std::shared_ptr conn) { + std::string reply_str; + if (!response.SerializeToString(&reply_str) || (conn->WriteResp(reply_str) != 0)) { + LOG(WARNING) << "Process FileRsync request serialization failed"; + conn->NotifyClose(); + return; + } + conn->NotifyWrite(); +} + +RsyncServer::RsyncServer(const std::set& ips, const int port) { + work_thread_ = std::make_unique(2, 100000, "RsyncServerWork"); + rsync_server_thread_ = std::make_unique(ips, port, 1 * 1000, this); +} + +RsyncServer::~RsyncServer() { + //TODO: handle destory + LOG(INFO) << "Rsync server destroyed"; +} + +void RsyncServer::Schedule(net::TaskFunc func, void* arg) { + work_thread_->Schedule(func, arg); +} + +int RsyncServer::Start() { + LOG(INFO) << "start RsyncServer ..."; + rsync_server_thread_->set_thread_name("RsyncServerThread"); + int res = rsync_server_thread_->StartThread(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start rsync Server Thread Error. ret_code: " << res << " message: " + << (res == net::kBindError ? ": bind port conflict" : ": other error"); + } + res = work_thread_->start_thread_pool(); + if (res != net::kSuccess) { + LOG(FATAL) << "Start rsync Server ThreadPool Error, ret_code: " << res << " message: " + << (res == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + LOG(INFO) << "RsyncServer started ..."; + return res; +} + +int RsyncServer::Stop() { + LOG(INFO) << "stop RsyncServer ..."; + work_thread_->stop_thread_pool(); + rsync_server_thread_->StopThread(); + return 0; +} + +RsyncServerConn::RsyncServerConn(int connfd, const std::string& ip_port, Thread* thread, + void* worker_specific_data, NetMultiplexer* mpx) + : PbConn(connfd, ip_port, thread, mpx), data_(worker_specific_data) { + readers_.resize(kMaxRsyncParallelNum); + for (int i = 0; i < kMaxRsyncParallelNum; i++) { + readers_[i].reset(new RsyncReader()); + } +} + +RsyncServerConn::~RsyncServerConn() { + std::lock_guard guard(mu_); + for (int i = 0; i < readers_.size(); i++) { + readers_[i].reset(); + } +} + +int RsyncServerConn::DealMessage() { + std::shared_ptr req = std::make_shared(); + bool parse_res = req->ParseFromArray(rbuf_ + cur_pos_ - header_len_, header_len_); + if (!parse_res) { + LOG(WARNING) << "Pika rsync server connection pb parse error."; + return -1; + } + switch (req->type()) { + case RsyncService::kRsyncMeta: { + auto task_arg = + new RsyncServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + ((RsyncServer*)(data_))->Schedule(&RsyncServerConn::HandleMetaRsyncRequest, task_arg); + break; + } + case RsyncService::kRsyncFile: { + auto task_arg = + new RsyncServerTaskArg(req, std::dynamic_pointer_cast(shared_from_this())); + ((RsyncServer*)(data_))->Schedule(&RsyncServerConn::HandleFileRsyncRequest, task_arg); + break; + } + default: { + LOG(WARNING) << "Invalid RsyncRequest type"; + } + } + return 0; +} + +void RsyncServerConn::HandleMetaRsyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + std::string db_name = req->db_name(); + std::shared_ptr db = g_pika_server->GetDB(db_name); + + RsyncService::RsyncResponse response; + response.set_reader_index(req->reader_index()); + response.set_code(RsyncService::kOk); + response.set_type(RsyncService::kRsyncMeta); + response.set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + response.set_slot_id(0); + + std::string snapshot_uuid; + if (!db || db->IsBgSaving()) { + LOG(WARNING) << "waiting bgsave done..."; + response.set_snapshot_uuid(snapshot_uuid); + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + return; + } + + std::vector filenames; + g_pika_server->GetDumpMeta(db_name, &filenames, &snapshot_uuid); + response.set_snapshot_uuid(snapshot_uuid); + + LOG(INFO) << "Rsync Meta request, snapshot_uuid: " << snapshot_uuid + << " files count: " << filenames.size() << " file list: "; + std::for_each(filenames.begin(), filenames.end(), [](auto& file) { + LOG(INFO) << "rsync snapshot file: " << file; + }); + + RsyncService::MetaResponse* meta_resp = response.mutable_meta_resp(); + for (const auto& filename : filenames) { + meta_resp->add_filenames(filename); + } + RsyncWriteResp(response, conn); +} + +void RsyncServerConn::HandleFileRsyncRequest(void* arg) { + std::unique_ptr task_arg(static_cast(arg)); + const std::shared_ptr req = task_arg->req; + std::shared_ptr conn = task_arg->conn; + + std::string db_name = req->db_name(); + std::string filename = req->file_req().filename(); + size_t offset = req->file_req().offset(); + size_t count = req->file_req().count(); + + RsyncService::RsyncResponse response; + response.set_reader_index(req->reader_index()); + response.set_code(RsyncService::kOk); + response.set_type(RsyncService::kRsyncFile); + response.set_db_name(db_name); + /* + * Since the slot field is written in protobuffer, + * slot_id is set to the default value 0 for compatibility + * with older versions, but slot_id is not used + */ + response.set_slot_id(0); + + std::string snapshot_uuid; + Status s = g_pika_server->GetDumpUUID(db_name, &snapshot_uuid); + response.set_snapshot_uuid(snapshot_uuid); + if (!s.ok()) { + LOG(WARNING) << "rsyncserver get snapshotUUID failed"; + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + return; + } + + std::shared_ptr db = g_pika_server->GetDB(db_name); + if (!db) { + LOG(WARNING) << "cannot find db for db_name: " << db_name; + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + } + + const std::string filepath = db->bgsave_info().path + "/" + filename; + char* buffer = new char[req->file_req().count() + 1]; + size_t bytes_read{0}; + std::string checksum = ""; + bool is_eof = false; + std::shared_ptr reader = conn->readers_[req->reader_index()]; + s = reader->Read(filepath, offset, count, buffer, + &bytes_read, &checksum, &is_eof); + if (!s.ok()) { + response.set_code(RsyncService::kErr); + RsyncWriteResp(response, conn); + delete []buffer; + return; + } + + RsyncService::FileResponse* file_resp = response.mutable_file_resp(); + file_resp->set_data(buffer, bytes_read); + file_resp->set_eof(is_eof); + file_resp->set_checksum(checksum); + file_resp->set_filename(filename); + file_resp->set_count(bytes_read); + file_resp->set_offset(offset); + + RsyncWriteResp(response, conn); + delete []buffer; +} + +RsyncServerThread::RsyncServerThread(const std::set& ips, int port, int cron_interval, RsyncServer* arg) + : HolyThread(ips, port, &conn_factory_, cron_interval, &handle_, true), conn_factory_(arg) {} + +RsyncServerThread::~RsyncServerThread() { + LOG(WARNING) << "RsyncServerThread destroyed"; +} + +void RsyncServerThread::RsyncServerHandle::FdClosedHandle(int fd, const std::string& ip_port) const { + LOG(WARNING) << "ip_port: " << ip_port << " connection closed"; +} + +void RsyncServerThread::RsyncServerHandle::FdTimeoutHandle(int fd, const std::string& ip_port) const { + LOG(WARNING) << "ip_port: " << ip_port << " connection timeout"; +} + +bool RsyncServerThread::RsyncServerHandle::AccessHandle(int fd, std::string& ip_port) const { + LOG(WARNING) << "fd: "<< fd << " ip_port: " << ip_port << " connection accepted"; + return true; +} + +void RsyncServerThread::RsyncServerHandle::CronHandle() const { +} + +} // end namespace rsync + diff --git a/tools/pika_migrate/src/rsync_service.proto b/tools/pika_migrate/src/rsync_service.proto new file mode 100644 index 0000000000..ee23b3e8a4 --- /dev/null +++ b/tools/pika_migrate/src/rsync_service.proto @@ -0,0 +1,51 @@ +syntax = "proto2"; +package RsyncService; + +enum Type { + kRsyncMeta = 1; + kRsyncFile = 2; +} + +enum StatusCode { + kOk = 1; + kErr = 2; +} + +message MetaResponse { + repeated string filenames = 1; +} + +message FileRequest { + required string filename = 1; + required uint64 count = 2; + required uint64 offset = 3; +} + +message FileResponse { + required int32 eof = 1; + required uint64 count = 2; + required uint64 offset = 3; + required bytes data = 4; + required string checksum = 5; + required string filename = 6; +} + +message RsyncRequest { + required Type type = 1; + required int32 reader_index = 2; + required string db_name = 3; + required uint32 slot_id = 4; + optional FileRequest file_req = 5; +} + +message RsyncResponse { + required Type type = 1; + required int32 reader_index = 2; + required string snapshot_uuid = 3; + required string db_name = 4; + required uint32 slot_id = 5; + required StatusCode code = 6; + optional MetaResponse meta_resp = 7; + optional FileResponse file_resp = 8; +} + diff --git a/tools/pika_migrate/src/storage/CMakeLists.txt b/tools/pika_migrate/src/storage/CMakeLists.txt new file mode 100644 index 0000000000..e12cae9b7d --- /dev/null +++ b/tools/pika_migrate/src/storage/CMakeLists.txt @@ -0,0 +1,44 @@ +cmake_minimum_required(VERSION 3.18) + +set (CMAKE_CXX_STANDARD 17) +project (storage) + +# Other CMake modules +add_subdirectory(tests) +# add_subdirectory(examples) +# add_subdirectory(benchmark) + +add_definitions(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX) +add_compile_options("-fno-builtin-memcmp") + +set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_HOST_SYSTEM_PROCESSOR}) +if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") + add_compile_options(-msse) +endif() + +aux_source_directory(./src DIR_SRCS) + +add_library(storage STATIC ${DIR_SRCS} ) + +add_dependencies(storage rocksdb gtest glog gflags fmt ${LIBUNWIND_NAME} pstd) +# TODO fix rocksdb include path +target_include_directories(storage + PUBLIC ${CMAKE_SOURCE_DIR} + PUBLIC ${PROJECT_SOURCE_DIR} + PUBLIC ${PROJECT_SOURCE_DIR}/include + ${INSTALL_INCLUDEDIR} + ${ROCKSDB_SOURCE_DIR} +) + +target_link_libraries(storage + PUBLIC ${ROCKSDB_LIBRARY} + ${SNAPPY_LIBRARY} + ${ZSTD_LIBRARY} + ${LZ4_LIBRARY} + ${ZLIB_LIBRARY} + ${JEMALLOC_LIBRARY} + ${GLOG_LIBRARY} + ${GFLAGS_LIBRARY} + ${FMT_LIBRARY} + ${LIBUNWIND_LIBRARY} + PUBLIC pstd) diff --git a/tools/pika_migrate/src/storage/LICENSE b/tools/pika_migrate/src/storage/LICENSE new file mode 100644 index 0000000000..75c1e56bfa --- /dev/null +++ b/tools/pika_migrate/src/storage/LICENSE @@ -0,0 +1,15 @@ +Copyright (c) 2015-2020, Qihoo360 +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/tools/pika_migrate/src/storage/README.md b/tools/pika_migrate/src/storage/README.md new file mode 100644 index 0000000000..e70eb783bf --- /dev/null +++ b/tools/pika_migrate/src/storage/README.md @@ -0,0 +1,8 @@ +# storage +[![Build Status](https://travis-ci.org/KernelMaker/storage.svg?branch=master)](https://travis-ci.org/KernelMaker/storage) + +
+ + + +
Qihoo
diff --git a/tools/pika_migrate/src/storage/benchmark/CMakeLists.txt b/tools/pika_migrate/src/storage/benchmark/CMakeLists.txt new file mode 100644 index 0000000000..67afc0ea06 --- /dev/null +++ b/tools/pika_migrate/src/storage/benchmark/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required (VERSION 3.18) + +aux_source_directory(../src DIR_SRCS) + + +file(GLOB_RECURSE STORAGE_BENCHMARK_SOURCE "${PROJECT_SOURCE_DIR}/benchmark/*.cc") + + +foreach(storage_benchmark_source ${STORAGE_BENCHMARK_SOURCE}) + get_filename_component(storage_benchmark_filename ${storage_benchmark_source} NAME) + string(REPLACE ".cc" "" storage_benchmark_name ${storage_benchmark_filename}) + + add_executable(${storage_benchmark_name} EXCLUDE_FROM_ALL ${storage_benchmark_filename}) + target_include_directories(${storage_benchmark_name} + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + add_dependencies(${storage_benchmark_name} storage pstd glog gflags ${LIBUNWIND_NAME}) + + target_link_libraries(${storage_benchmark_name} + PUBLIC storage + PUBLIC pstd + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + PUBLIC pthread + ) +endforeach() diff --git a/tools/pika_migrate/src/storage/benchmark/storage_bench.cc b/tools/pika_migrate/src/storage/benchmark/storage_bench.cc new file mode 100644 index 0000000000..eb50080e64 --- /dev/null +++ b/tools/pika_migrate/src/storage/benchmark/storage_bench.cc @@ -0,0 +1,238 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "storage/storage.h" + +const int KEYLENGTH = 1024 * 10; +const int VALUELENGTH = 1024 * 10; +const int THREADNUM = 20; +const int HASH_TABLE_FIELD_SIZE = 10000000; + +using namespace storage; +using namespace std::chrono; + +static const std::string key(KEYLENGTH, 'a'); +static const std::string value(VALUELENGTH, 'a'); + +void BenchSet() { + printf("====== Set ======\n"); + storage::Options options; + options.create_if_missing = true; + storage::Storage db; + storage::Status s = db.Open(options, "./db"); + + if (!s.ok()) { + printf("Open db failed, error: %s\n", s.ToString().c_str()); + return; + } + + std::vector jobs; + size_t kv_num = 10000; + jobs.clear(); + auto start = std::chrono::system_clock::now(); + for (size_t i = 0; i < THREADNUM; ++i) { + jobs.emplace_back( + [&db](size_t kv_num) { + for (size_t j = 0; j < kv_num; ++j) { + db.Set(key, value); + } + }, + kv_num); + } + + for (auto& job : jobs) { + job.join(); + } + auto end = system_clock::now(); + duration elapsed_seconds = end - start; + auto cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 1, Set " << THREADNUM * kv_num << " Cost: " << cost + << "s QPS: " << (THREADNUM * kv_num) / cost << std::endl; + + kv_num = 100000; + jobs.clear(); + start = system_clock::now(); + for (size_t i = 0; i < THREADNUM; ++i) { + jobs.emplace_back( + [&db](size_t kv_num) { + for (size_t j = 0; j < kv_num; ++j) { + db.Set(key, value); + } + }, + kv_num); + } + + for (auto& job : jobs) { + job.join(); + } + end = system_clock::now(); + elapsed_seconds = end - start; + cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 2, Set " << THREADNUM * kv_num << " Cost: " << cost + << "s QPS: " << (THREADNUM * kv_num) / cost << std::endl; +} + +void BenchHGetall() { + printf("====== HGetall ======\n"); + storage::Options options; + options.create_if_missing = true; + storage::Storage db; + storage::Status s = db.Open(options, "./db"); + + if (!s.ok()) { + printf("Open db failed, error: %s\n", s.ToString().c_str()); + return; + } + + int32_t ret = 0; + Storage::FieldValue fv; + std::vector fields; + std::vector fvs_in; + std::vector fvs_out; + + // 1. Create the hash table then insert hash table 10000 field + // 2. HGetall the hash table 10000 field (statistics cost time) + fvs_in.clear(); + for (size_t i = 0; i < 10000; ++i) { + fv.field = "field_" + std::to_string(i); + fv.value = "value_" + std::to_string(i); + fvs_in.push_back(fv); + } + db.HMSet("HGETALL_KEY1", fvs_in); + + fvs_out.clear(); + auto start = system_clock::now(); + db.HGetall("HGETALL_KEY1", &fvs_out); + auto end = system_clock::now(); + duration elapsed_seconds = end - start; + auto cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 1, HGetall " << fvs_out.size() << " Field HashTable Cost: " << cost << "ms" << std::endl; + + // 1. Create the hash table then insert hash table 10000000 field + // 2. Delete the hash table + // 3. Create the hash table whos key same as before, + // then insert the hash table 10000 field + // 4. HGetall the hash table 10000 field (statistics cost time) + fvs_in.clear(); + for (size_t i = 0; i < HASH_TABLE_FIELD_SIZE; ++i) { + fv.field = "field_" + std::to_string(i); + fv.value = "value_" + std::to_string(i); + fvs_in.push_back(fv); + } + db.HMSet("HGETALL_KEY2", fvs_in); + std::vector del_keys({"HGETALL_KEY2"}); + std::map type_status; + db.Del(del_keys); + fvs_in.clear(); + for (size_t i = 0; i < 10000; ++i) { + fv.field = "field_" + std::to_string(i); + fv.value = "value_" + std::to_string(i); + fvs_in.push_back(fv); + } + db.HMSet("HGETALL_KEY2", fvs_in); + + fvs_out.clear(); + start = system_clock::now(); + db.HGetall("HGETALL_KEY2", &fvs_out); + end = system_clock::now(); + elapsed_seconds = end - start; + cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 2, HGetall " << fvs_out.size() << " Field HashTable Cost: " << cost << "ms" << std::endl; + + // 1. Create the hash table then insert hash table 10000000 field + // 2. Delete hash table 9990000 field, the hash table remain 10000 field + // 3. HGetall the hash table 10000 field (statistics cost time) + fvs_in.clear(); + for (size_t i = 0; i < HASH_TABLE_FIELD_SIZE; ++i) { + fv.field = "field_" + std::to_string(i); + fv.value = "value_" + std::to_string(i); + fvs_in.push_back(fv); + } + db.HMSet("HGETALL_KEY3", fvs_in); + fields.clear(); + for (size_t i = 0; i < HASH_TABLE_FIELD_SIZE - 10000; ++i) { + fields.push_back("field_" + std::to_string(i)); + } + db.HDel("HGETALL_KEY3", fields, &ret); + + fvs_out.clear(); + start = system_clock::now(); + db.HGetall("HGETALL_KEY3", &fvs_out); + end = system_clock::now(); + elapsed_seconds = end - start; + cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 3, HGetall " << fvs_out.size() << " Field HashTable Cost: " << cost << "ms" << std::endl; +} + +void BenchScan() { + printf("====== Scan ======\n"); + storage::Options options; + options.create_if_missing = true; + storage::Storage db; + storage::Status s = db.Open(options, "./db"); + + if (!s.ok()) { + printf("Open db failed, error: %s\n", s.ToString().c_str()); + return; + } + + std::vector jobs; + size_t kv_num = 10000000; + jobs.clear(); + auto start = std::chrono::system_clock::now(); + for (size_t i = 0; i < THREADNUM; ++i) { + jobs.emplace_back( + [&db](size_t kv_num) { + for (size_t j = 0; j < kv_num; ++j) { + std::string key_prefix = key + std::to_string(j); + db.Set(key_prefix, value); + } + }, + kv_num); + } + + for (auto& job : jobs) { + job.join(); + } + auto end = system_clock::now(); + duration elapsed_seconds = end - start; + auto cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 1, Set " << THREADNUM * kv_num << " Cost: " << cost + << "s QPS: " << (THREADNUM * kv_num) / cost << std::endl; + + // Scan 100000 + std::vector keys; + start = system_clock::now(); + db.Scan(0, "*", 100000, &keys); + end = system_clock::now(); + elapsed_seconds = end - start; + cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 2, Scan " << 100000 << " Cost: " << cost << "s" << std::endl; + + // Scan 10000000 + keys.clear(); + start = system_clock::now(); + db.Scan(0, "*", kv_num, &keys); + end = system_clock::now(); + elapsed_seconds = end - start; + cost = duration_cast(elapsed_seconds).count(); + std::cout << "Test case 3, Scan " << kv_num << " Cost: " << cost << "s" << std::endl; +} + +int main(int argc, char** argv) { + // keys + BenchSet(); + + // hashes + BenchHGetall(); + + // Iterator + BenchScan(); +} diff --git a/tools/pika_migrate/src/storage/detect_environment b/tools/pika_migrate/src/storage/detect_environment new file mode 100755 index 0000000000..e002020726 --- /dev/null +++ b/tools/pika_migrate/src/storage/detect_environment @@ -0,0 +1,92 @@ +#!/bin/sh + +OUTPUT=$1 +if test -z "$OUTPUT"; then + echo "usage: $0 " >&2 + exit 1 +fi + +# Delete existing output, if it exists +rm -f "$OUTPUT" +touch "$OUTPUT" + +if test -z "$CXX"; then + CXX=g++ +fi + +# Test whether Snappy library is installed +# http://code.google.com/p/snappy/ +$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF +if [ "$?" = 0 ]; then + ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lsnappy" +fi + +# Test whether gflags library is installed +# http://gflags.github.io/gflags/ +# check if the namespace is gflags +$CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF + #include + using namespace gflags; + int main() {} +EOF +if [ "$?" = 0 ]; then + ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lgflags" +else + # check if namespace is google + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF + #include + using namespace google; + int main() {} +EOF +if [ "$?" = 0 ]; then + ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lgflags" +fi +fi + +# Test whether zlib library is installed +$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF +if [ "$?" = 0 ]; then + ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lz" +fi + +# Test whether bzip library is installed +$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF +if [ "$?" = 0 ]; then + ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lbz2" +fi + +# Test whether lz4 library is installed +$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + #include + int main() {} +EOF +if [ "$?" = 0 ]; then + ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -llz4" +fi + +# Test whether zstd library is installed +$CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF +if [ "$?" = 0 ]; then + ROCKSDB_LDFLAGS="$ROCKSDB_LDFLAGS -lzstd" +fi + + + +# Test processor nums +PROCESSOR_NUMS=$(cat /proc/cpuinfo | grep processor | wc -l) + +echo "ROCKSDB_LDFLAGS=$ROCKSDB_LDFLAGS" >> "$OUTPUT" +echo "PROCESSOR_NUMS=$PROCESSOR_NUMS" >> "$OUTPUT" diff --git a/tools/pika_migrate/src/storage/examples/CMakeLists.txt b/tools/pika_migrate/src/storage/examples/CMakeLists.txt new file mode 100644 index 0000000000..a356c34729 --- /dev/null +++ b/tools/pika_migrate/src/storage/examples/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required (VERSION 3.18) + +aux_source_directory(../src DIR_SRCS) + + +file(GLOB_RECURSE STORAGE_EXAMPLES_SOURCE "${PROJECT_SOURCE_DIR}/examples/*.cc") + + +foreach(storage_example_source ${STORAGE_EXAMPLES_SOURCE}) + get_filename_component(storage_example_filename ${storage_example_source} NAME) + string(REPLACE ".cc" "" storage_example_name ${storage_example_filename}) + + add_executable(${storage_example_name} EXCLUDE_FROM_ALL ${storage_example_filename}) + target_include_directories(${storage_example_name} + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + add_dependencies(${storage_example_name} storage pstd glog gflags ${LIBUNWIND_NAME}) + + target_link_libraries(${storage_example_name} + PUBLIC storage + PUBLIC pstd + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + PUBLIC pthread + ) +endforeach() diff --git a/tools/pika_migrate/src/storage/examples/hashes_example.cc b/tools/pika_migrate/src/storage/examples/hashes_example.cc new file mode 100644 index 0000000000..0a766e595f --- /dev/null +++ b/tools/pika_migrate/src/storage/examples/hashes_example.cc @@ -0,0 +1,113 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "storage/storage.h" + +using namespace storage; + +int main() { + storage::Storage db; + StorageOptions storage_options; + storage_options.options.create_if_missing = true; + storage::Status s = db.Open(storage_options, "./db"); + if (s.ok()) { + printf("Open success\n"); + } else { + printf("Open failed, error: %s\n", s.ToString().c_str()); + return -1; + } + // HSet + int32_t res; + s = db.HSet("TEST_KEY1", "TEST_FIELD1", "TEST_VALUE1", &res); + printf("HSet return: %s, res = %d\n", s.ToString().c_str(), res); + s = db.HSet("TEST_KEY1", "TEST_FIELD2", "TEST_VALUE2", &res); + printf("HSet return: %s, res = %d\n", s.ToString().c_str(), res); + + s = db.HSet("TEST_KEY2", "TEST_FIELD1", "TEST_VALUE1", &res); + printf("HSet return: %s, res = %d\n", s.ToString().c_str(), res); + s = db.HSet("TEST_KEY2", "TEST_FIELD2", "TEST_VALUE2", &res); + printf("HSet return: %s, res = %d\n", s.ToString().c_str(), res); + s = db.HSet("TEST_KEY2", "TEST_FIELD3", "TEST_VALUE3", &res); + printf("HSet return: %s, res = %d\n", s.ToString().c_str(), res); + + // HGet + std::string value; + s = db.HGet("TEST_KEY1", "TEST_FIELD1", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY1", "TEST_FIELD2", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY1", "TEST_FIELD3", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY_NOT_EXIST", "TEST_FIELD", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + + // HMSet + std::vector fvs; + fvs.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + s = db.HMSet("TEST_HASH", fvs); + printf("HMset return: %s\n", s.ToString().c_str()); + + // HMGet + std::vector vss; + std::vector fields; + fields.push_back("TEST_FIELD1"); + fields.push_back("TEST_FIELD2"); + s = db.HMGet("TEST_HASH", fields, &vss); + printf("HMget return: %s\n", s.ToString().c_str()); + for (uint32_t idx = 0; idx != fields.size(); idx++) { + printf("idx = %d, field = %s, value = %s\n", idx, fields[idx].c_str(), vss[idx].value.c_str()); + } + + // HLEN + s = db.HLen("TEST_HASH", &res); + printf("HLen return : %s, len = %d\n", s.ToString().c_str(), res); + + // Compact + s = db.Compact(storage::DataType::kHashes); + printf("Compact return: %s\n", s.ToString().c_str()); + + // Expire + std::map key_status; + db.Expire("TEST_KEY1", 1, &key_status); + printf("Expire return: %s\n", s.ToString().c_str()); + std::this_thread::sleep_for(std::chrono::milliseconds(2500)); + s = db.HGet("TEST_KEY1", "TEST_FIELD1", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY1", "TEST_FIELD2", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + + s = db.HGet("TEST_KEY2", "TEST_FIELD1", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY2", "TEST_FIELD2", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY2", "TEST_FIELD3", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + + // Compact + s = db.Compact(storage::DataType::kHashes); + printf("Compact return: %s\n", s.ToString().c_str()); + + s = db.HGet("TEST_KEY2", "TEST_FIELD1", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY2", "TEST_FIELD2", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + s = db.HGet("TEST_KEY2", "TEST_FIELD3", &value); + printf("HGet return: %s, value = %s\n", s.ToString().c_str(), value.c_str()); + + // Exists + s = db.HSet("TEST_KEY1", "TEST_FIELD1", "TEST_VALUE1", &res); + printf("HSet return: %s, res = %d\n", s.ToString().c_str(), res); + s = db.HExists("TEST_KEY1", "TEST_FIELD1"); + printf("HExists return: %s\n", s.ToString().c_str()); + + // HIncrby + int64_t hincrby_value; + s = db.HIncrby("TEST_KEY1", "TEST_HINCRBY_FIELD", 100, &hincrby_value); + printf("HIncrby return: %s, value = %lld\n", s.ToString().c_str(), hincrby_value); + return 0; +} diff --git a/tools/pika_migrate/src/storage/examples/sets_example.cc b/tools/pika_migrate/src/storage/examples/sets_example.cc new file mode 100644 index 0000000000..af002adc59 --- /dev/null +++ b/tools/pika_migrate/src/storage/examples/sets_example.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "storage/storage.h" + +using namespace storage; + +int main() { + storage::Storage db; + StorageOptions storage_options; + storage_options.options.create_if_missing = true; + storage::Status s = db.Open(storage_options, "./db"); + if (s.ok()) { + printf("Open success\n"); + } else { + printf("Open failed, error: %s\n", s.ToString().c_str()); + return -1; + } + // SAdd + int32_t ret = 0; + std::vector members{"MM1", "MM2", "MM3", "MM2"}; + s = db.SAdd("SADD_KEY", members, &ret); + printf("SAdd return: %s, ret = %d\n", s.ToString().c_str(), ret); + + // SCard + ret = 0; + s = db.SCard("SADD_KEY", &ret); + printf("SCard, return: %s, scard ret = %d\n", s.ToString().c_str(), ret); + + return 0; +} diff --git a/tools/pika_migrate/src/storage/examples/strings_example.cc b/tools/pika_migrate/src/storage/examples/strings_example.cc new file mode 100644 index 0000000000..a4241b8b95 --- /dev/null +++ b/tools/pika_migrate/src/storage/examples/strings_example.cc @@ -0,0 +1,173 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "storage/storage.h" + +using namespace storage; + +int main() { + storage::Storage db; + StorageOptions storage_options; + storage_options.options.create_if_missing = true; + storage::Status s = db.Open(storage_options, "./db"); + if (s.ok()) { + printf("Open success\n"); + } else { + printf("Open failed, error: %s\n", s.ToString().c_str()); + return -1; + } + + int32_t ret; + // Set + s = db.Set("TEST_KEY", "TEST_VALUE"); + printf("Set return: %s\n", s.ToString().c_str()); + + // Get + std::string value; + s = db.Get("TEST_KEY", &value); + printf("Get return: %s, value: %s\n", s.ToString().c_str(), value.c_str()); + + // SetBit + s = db.SetBit("SETBIT_KEY", 7, 1, &ret); + printf("SetBit return: %s, ret: %d\n", s.ToString().c_str(), ret); + + // GetSet + s = db.GetSet("TEST_KEY", "Hello", &value); + printf("GetSet return: %s, old_value: %s", s.ToString().c_str(), value.c_str()); + + // SetBit + s = db.SetBit("SETBIT_KEY", 7, 1, &ret); + printf("Setbit return: %s\n", s.ToString().c_str()); + + // GetBit + s = db.GetBit("SETBIT_KEY", 7, &ret); + printf("GetBit return: %s, ret: %d\n", s.ToString().c_str(), ret); + + // MSet + std::vector kvs; + kvs.push_back({"TEST_KEY1", "TEST_VALUE1"}); + kvs.push_back({"TEST_KEY2", "TEST_VALUE2"}); + s = db.MSet(kvs); + printf("MSet return: %s\n", s.ToString().c_str()); + + // MGet + std::vector vss; + std::vector keys{"TEST_KEY1", "TEST_KEY2", "TEST_KEY_NOT_EXIST"}; + s = db.MGet(keys, &vss); + printf("MGet return: %s\n", s.ToString().c_str()); + for (size_t idx = 0; idx != keys.size(); idx++) { + printf("idx = %d, keys = %s, value = %s\n", idx, keys[idx].c_str(), vss[idx].value.c_str()); + } + + // Setnx + s = db.Setnx("TEST_KEY", "TEST_VALUE", &ret); + printf("Setnx return: %s, value: %s, ret: %d\n", s.ToString().c_str(), value.c_str(), ret); + + // MSetnx + s = db.MSetnx(kvs, &ret); + printf("MSetnx return: %s, ret: %d\n", s.ToString().c_str(), ret); + + // Setrange + s = db.Setrange("TEST_KEY", 10, "APPEND_VALUE", &ret); + printf("Setrange return: %s, ret: %d\n", s.ToString().c_str(), ret); + + // Getrange + s = db.Getrange("TEST_KEY", 0, -1, &value); + printf("Getrange return: %s, value: %s\n", s.ToString().c_str(), value.c_str()); + + // Append + std::string append_value; + s = db.Set("TEST_KEY", "TEST_VALUE"); + s = db.Append("TEST_KEY", "APPEND_VALUE", &ret); + s = db.Get("TEST_KEY", &append_value); + printf("Append return: %s, value: %s, ret: %d\n", s.ToString().c_str(), append_value.c_str(), ret); + + // BitCount + s = db.BitCount("TEST_KEY", 0, -1, &ret, false); + printf("BitCount return: %s, ret: %d\n", s.ToString().c_str(), ret); + + // BitCount + s = db.BitCount("TEST_KEY", 0, -1, &ret, true); + printf("BitCount return: %s, ret: %d\n", s.ToString().c_str(), ret); + + // BitOp + int64_t bitop_ret; + s = db.Set("BITOP_KEY1", "FOOBAR"); + s = db.Set("BITOP_KEY2", "ABCDEF"); + s = db.Set("BITOP_KEY3", "STORAGE"); + std::vector src_keys{"BITOP_KEY1", "BITOP_KEY2", "BITOP_KEY3"}; + // and + s = db.BitOp(storage::BitOpType::kBitOpAnd, "BITOP_DESTKEY", src_keys, &bitop_ret); + printf("BitOp return: %s, ret: %d\n", s.ToString().c_str(), bitop_ret); + // or + s = db.BitOp(storage::BitOpType::kBitOpOr, "BITOP_DESTKEY", src_keys, &bitop_ret); + printf("BitOp return: %s, ret: %d\n", s.ToString().c_str(), bitop_ret); + // xor + s = db.BitOp(storage::BitOpType::kBitOpXor, "BITOP_DESTKEY", src_keys, &bitop_ret); + printf("BitOp return: %s, ret: %d\n", s.ToString().c_str(), bitop_ret); + // not + std::vector not_keys{"BITOP_KEY1"}; + s = db.BitOp(storage::BitOpType::kBitOpNot, "BITOP_DESTKEY", not_keys, &bitop_ret); + printf("BitOp return: %s, ret: %d\n", s.ToString().c_str(), bitop_ret); + + // BitPos + int64_t bitpos_ret; + s = db.Set("BITPOS_KEY", "\xff\x00\x00"); + // bitpos key bit + s = db.BitPos("BITPOS_KEY", 1, &bitpos_ret); + printf("BitPos return: %s, ret: %d\n", s.ToString().c_str(), bitpos_ret); + // bitpos key bit [start] + s = db.BitPos("BITPOS_KEY", 1, 0, &bitpos_ret); + printf("BitPos return: %s, ret: %d\n", s.ToString().c_str(), bitpos_ret); + // bitpos key bit [start] [end] + s = db.BitPos("BITPOS_KEY", 1, 0, 4, &bitpos_ret); + printf("BitPos return: %s, ret: %d\n", s.ToString().c_str(), bitpos_ret); + + // Decrby + int64_t decrby_ret; + s = db.Set("TEST_KEY", "12345"); + s = db.Decrby("TEST_KEY", 5, &decrby_ret); + printf("Decrby return: %s, ret: %d\n", s.ToString().c_str(), decrby_ret); + + // Incrby + int64_t incrby_ret; + s = db.Incrby("INCRBY_KEY", 5, &incrby_ret); + printf("Incrby return: %s, ret: %d\n", s.ToString().c_str(), incrby_ret); + + // Incrbyfloat + s = db.Set("INCRBYFLOAT_KEY", "10.50"); + s = db.Incrbyfloat("INCRBYFLOAT_KEY", "0.1", &value); + printf("Incrbyfloat return: %s, value: %s\n", s.ToString().c_str(), value.c_str()); + + // Setex + s = db.Setex("TEST_KEY", "TEST_VALUE", 1); + printf("Setex return: %s\n", s.ToString().c_str()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.Get("TEST_KEY", &value); + printf("Get return: %s, value: %s\n", s.ToString().c_str(), value.c_str()); + + // Strlen + s = db.Set("TEST_KEY", "TEST_VALUE"); + int32_t len = 0; + s = db.Strlen("TEST_KEY", &len); + printf("Strlen return: %s, strlen: %d\n", s.ToString().c_str(), len); + + // Expire + std::map key_status; + s = db.Set("EXPIRE_KEY", "EXPIREVALUE"); + printf("Set return: %s\n", s.ToString().c_str()); + db.Expire("EXPIRE_KEY", 1, &key_status); + std::this_thread::sleep_for(std::chrono::milliseconds(2500)); + s = db.Get("EXPIRE_KEY", &value); + printf("Get return: %s, value: %s\n", s.ToString().c_str(), value.c_str()); + + // Compact + s = db.Compact(storage::DataType::kStrings); + printf("Compact return: %s\n", s.ToString().c_str()); + + return 0; +} diff --git a/tools/pika_migrate/src/storage/include/storage/backupable.h b/tools/pika_migrate/src/storage/include/storage/backupable.h new file mode 100644 index 0000000000..e190993c29 --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/backupable.h @@ -0,0 +1,73 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BACKUPABLE_H_ +#define SRC_BACKUPABLE_H_ + +#include + +#include "rocksdb/db.h" + +#include "db_checkpoint.h" +#include "storage.h" +#include "util.h" + +namespace storage { + +inline const std::string DEFAULT_BK_PATH = "dump"; // Default backup root dir +inline const std::string DEFAULT_RS_PATH = "db"; // Default restore root dir + +// Arguments which will used by BackupSave Thread +// p_engine for BackupEngine handler +// backup_dir +struct BackupSaveArgs { + void* p_engine = nullptr; + const std::string backup_dir; + // rocksdb instance number, consistent will instance index in storage. + int index_ = 0; + Status res; + + BackupSaveArgs(void* _p_engine, std::string _backup_dir, int index) + : p_engine(_p_engine), backup_dir(std::move(_backup_dir)), index_(index) {} +}; + +struct BackupContent { + std::vector live_files; + rocksdb::VectorLogPtr live_wal_files; + uint64_t manifest_file_size = 0; + uint64_t sequence_number = 0; +}; + +class BackupEngine { + public: + ~BackupEngine(); + static Status Open(Storage* db, std::shared_ptr& backup_engine_ret, int inst_count); + + Status SetBackupContent(); + + Status CreateNewBackup(const std::string& dir); + + void StopBackup(); + + Status CreateNewBackupSpecify(const std::string& dir, int index); + + private: + BackupEngine() = default; + + std::map> engines_; + std::map backup_content_; + std::map backup_pthread_ts_; + + Status NewCheckpoint(rocksdb::DB* rocksdb_db, int index); + std::string GetSaveDirByIndex(const std::string& _dir, int index) const { + std::string backup_dir = _dir.empty() ? DEFAULT_BK_PATH : _dir; + return backup_dir + ((backup_dir.back() != '/') ? "/" : "") + std::to_string(index); + } + Status WaitBackupPthread(); +}; + +} // namespace storage +#endif // SRC_BACKUPABLE_H_ + diff --git a/tools/pika_migrate/src/storage/include/storage/build_version.h b/tools/pika_migrate/src/storage/include/storage/build_version.h new file mode 100644 index 0000000000..351b22f134 --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/build_version.h @@ -0,0 +1,15 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef INCLUDE_STORAGE_BUILD_VERSION_H_ +#define INCLUDE_STORAGE_BUILD_VERSION_H_ + +// this variable tells us about the git revision +extern const char* blackwidow_build_git_sha; + +// Date on which the code was compiled: +extern const char* blackwidow_build_compile_date; + +#endif // INCLUDE_STORAGE_BUILD_VERSION_H_ diff --git a/tools/pika_migrate/src/storage/include/storage/db_checkpoint.h b/tools/pika_migrate/src/storage/include/storage/db_checkpoint.h new file mode 100644 index 0000000000..100081052e --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/db_checkpoint.h @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// A checkpoint is an openable snapshot of a database at a point in time. + +#ifndef ROCKSDB_LITE + +# include +# include "rocksdb/status.h" +# include "rocksdb/transaction_log.h" + +namespace rocksdb { + +class DB; + +class DBCheckpoint { + public: + // Creates a Checkpoint object to be used for creating openable sbapshots + static Status Create(DB* db, DBCheckpoint** checkpoint_ptr); + + // Builds an openable snapshot of RocksDB on the same disk, which + // accepts an output directory on the same disk, and under the directory + // (1) hard-linked SST files pointing to existing live SST files + // SST files will be copied if output directory is on a different filesystem + // (2) a copied manifest files and other files + // The directory should not already exist and will be created by this API. + // The directory will be an absolute path + virtual Status CreateCheckpoint(const std::string& checkpoint_dir) = 0; + + virtual Status GetCheckpointFiles(std::vector& live_files, VectorLogPtr& live_wal_files, + uint64_t& manifest_file_size, uint64_t& sequence_number) = 0; + + virtual Status CreateCheckpointWithFiles(const std::string& checkpoint_dir, std::vector& live_files, + VectorLogPtr& live_wal_files, uint64_t manifest_file_size, + uint64_t sequence_number) = 0; + + virtual ~DBCheckpoint() = default; +}; + +} // namespace rocksdb +#endif // !ROCKSDB_LITE diff --git a/tools/pika_migrate/src/storage/include/storage/slot_indexer.h b/tools/pika_migrate/src/storage/include/storage/slot_indexer.h new file mode 100644 index 0000000000..92a49aeda2 --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/slot_indexer.h @@ -0,0 +1,28 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __SLOT_INDEXER_H__ +#define __SLOT_INDEXER_H__ + +#include +#include + +namespace storage { +// Manage slots to rocksdb indexes +// TODO(wangshaoyi): temporarily mock return +class SlotIndexer { +public: + SlotIndexer() = delete; + SlotIndexer(uint32_t inst_num) : inst_num_(inst_num) {} + ~SlotIndexer() {} + uint32_t GetInstanceID(uint32_t slot_id) {return slot_id % inst_num_; } + void ReshardSlots(const std::vector& slots) {} + +private: + uint32_t inst_num_ = 3; +}; +} // namespace storage end + +#endif diff --git a/tools/pika_migrate/src/storage/include/storage/storage.h b/tools/pika_migrate/src/storage/include/storage/storage.h new file mode 100644 index 0000000000..dd41b3ea94 --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/storage.h @@ -0,0 +1,1156 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef INCLUDE_STORAGE_STORAGE_H_ +#define INCLUDE_STORAGE_STORAGE_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "rocksdb/convenience.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/rate_limiter.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" + +#include "slot_indexer.h" +#include "pstd/include/pstd_mutex.h" +#include "src/base_data_value_format.h" + +namespace storage { + +inline constexpr double ZSET_SCORE_MAX = std::numeric_limits::max(); +inline constexpr double ZSET_SCORE_MIN = std::numeric_limits::lowest(); + +inline const std::string PROPERTY_TYPE_ROCKSDB_CUR_SIZE_ALL_MEM_TABLES = "rocksdb.cur-size-all-mem-tables"; +inline const std::string PROPERTY_TYPE_ROCKSDB_ESTIMATE_TABLE_READER_MEM = "rocksdb.estimate-table-readers-mem"; +inline const std::string PROPERTY_TYPE_ROCKSDB_BACKGROUND_ERRORS = "rocksdb.background-errors"; +inline const std::string PROPERTY_TYPE_ROCKSDB_BlOCK_CACHE_USAGE = "rocksdb.block-cache-usage"; + +inline const std::string ALL_DB = "all"; +inline const std::string STRINGS_DB = "strings"; +inline const std::string HASHES_DB = "hashes"; +inline const std::string LISTS_DB = "lists"; +inline const std::string ZSETS_DB = "zsets"; +inline const std::string SETS_DB = "sets"; +inline const std::string STREAMS_DB = "streams"; + +inline constexpr size_t BATCH_DELETE_LIMIT = 100; +inline constexpr size_t COMPACT_THRESHOLD_COUNT = 2000; + +using Options = rocksdb::Options; +using BlockBasedTableOptions = rocksdb::BlockBasedTableOptions; +using Status = rocksdb::Status; +using Slice = rocksdb::Slice; + +class Redis; +enum class OptionType; + +struct StreamAddTrimArgs; +struct StreamReadGroupReadArgs; +struct StreamScanArgs; +struct streamID; +struct StreamInfoResult; + +template +class LRUCache; + +struct StorageOptions { + rocksdb::Options options; + rocksdb::BlockBasedTableOptions table_options; + size_t block_cache_size = 0; + bool share_block_cache = false; + size_t statistics_max_size = 0; + int db_statistics_level = 0; + bool enable_db_statistics = false; + size_t small_compaction_threshold = 5000; + size_t small_compaction_duration_threshold = 10000; + struct CompactParam { + // for LongestNotCompactionSstCompact function + int compact_every_num_of_files_; + int force_compact_file_age_seconds_; + int force_compact_min_delete_ratio_; + int dont_compact_sst_created_in_seconds_; + int best_delete_min_ratio_; + }; + CompactParam compact_param_; + Status ResetOptions(const OptionType& option_type, const std::unordered_map& options_map); +}; + +struct KeyValue { + std::string key; + std::string value; + bool operator==(const KeyValue& kv) const { return (kv.key == key && kv.value == value); } + bool operator<(const KeyValue& kv) const { return key < kv.key; } +}; + +struct KeyInfo { + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t avg_ttl = 0; + uint64_t invaild_keys = 0; + + KeyInfo() : keys(0), expires(0), avg_ttl(0), invaild_keys(0) {} + + KeyInfo(uint64_t k, uint64_t e, uint64_t a, uint64_t i) : keys(k), expires(e), avg_ttl(a), invaild_keys(i) {} + + KeyInfo operator + (const KeyInfo& info) { + KeyInfo res; + res.keys = keys + info.keys; + res.expires = expires + info.expires; + res.avg_ttl = avg_ttl + info.avg_ttl; + res.invaild_keys = invaild_keys + info.invaild_keys; + return res; + } +}; + +struct ValueStatus { + std::string value; + Status status; + int64_t ttl_millsec; + bool operator==(const ValueStatus& vs) const { return (vs.value == value && vs.status == status && vs.ttl_millsec == ttl_millsec); } +}; + +struct FieldValue { + std::string field; + std::string value; + FieldValue() = default; + FieldValue(const std::string& k, const std::string& v) : field(k), value(v) {} + FieldValue(std::string&& k, std::string&& v) : field(std::move(k)), value(std::move(v)) {} + bool operator==(const FieldValue& fv) const { return (fv.field == field && fv.value == value); } +}; + +struct IdMessage { + std::string field; + std::string value; + bool operator==(const IdMessage& fv) const { return (fv.field == field && fv.value == value); } +}; + +struct KeyVersion { + std::string key; + uint64_t version = 0; + bool operator==(const KeyVersion& kv) const { return (kv.key == key && kv.version == version); } +}; + +struct ScoreMember { + ScoreMember() : score(0.0), member("") {} + ScoreMember(double t_score, const std::string& t_member) : score(t_score), member(t_member) {} + double score; + std::string member; + bool operator==(const ScoreMember& sm) const { return (sm.score == score && sm.member == member); } +}; + +enum BeforeOrAfter { Before, After }; + +enum class OptionType { + kDB, + kColumnFamily, +}; + +enum ColumnFamilyType { kMeta, kData, kMetaAndData }; + +enum AGGREGATE { SUM, MIN, MAX }; + +enum BitOpType { kBitOpAnd = 1, kBitOpOr, kBitOpXor, kBitOpNot, kBitOpDefault }; + +enum Operation { + kNone = 0, + kCleanAll, + kCompactRange, + kCompactOldestOrBestDeleteRatioSst, +}; + +struct BGTask { + DataType type; + Operation operation; + std::vector argv; + + BGTask(const DataType& _type = DataType::kAll, const Operation& _opeation = Operation::kNone, + const std::vector& _argv = {}) + : type(_type), operation(_opeation), argv(_argv) {} +}; + +class Storage { + public: + Storage(); // for unit test only + Storage(int db_instance_num, int slot_num, bool is_classic_mode); + ~Storage(); + + Status Open(const StorageOptions& storage_options, const std::string& db_path); + + Status LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key); + + Status StoreCursorStartKey(const DataType& dtype, int64_t cursor, char type, const std::string& next_key); + + std::unique_ptr& GetDBInstance(const Slice& key); + + std::unique_ptr& GetDBInstance(const std::string& key); + + // Strings Commands + + // Set key to hold the string value. if key + // already holds a value, it is overwritten + Status Set(const Slice& key, const Slice& value); + + // Set key to hold the string value. if key exist + Status Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec = 0); + + // Get the value of key. If the key does not exist + // the special value nil is returned + Status Get(const Slice& key, std::string* value); + + // Get the value and ttl of key. If the key does not exist + // the special value nil is returned. If the key has no ttl, ttl is -1 + Status GetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec); + + // Atomically sets key to value and returns the old value stored at key + // Returns an error when key exists but does not hold a string value. + Status GetSet(const Slice& key, const Slice& value, std::string* old_value); + + // Sets or clears the bit at offset in the string value stored at key + Status SetBit(const Slice& key, int64_t offset, int32_t value, int32_t* ret); + + // Returns the bit value at offset in the string value stored at key + Status GetBit(const Slice& key, int64_t offset, int32_t* ret); + + // Sets the given keys to their respective values + // MSET replaces existing values with new values + Status MSet(const std::vector& kvs); + + // Returns the values of all specified keys. For every key + // that does not hold a string value or does not exist, the + // special value nil is returned + Status MGet(const std::vector& keys, std::vector* vss); + + // Returns the values of all specified keyswithTTL. For every key + // that does not hold a string value or does not exist, the + // special value nil is returned + Status MGetWithTTL(const std::vector& keys, std::vector* vss); + + // Set key to hold string value if key does not exist + // return 1 if the key was set + // return 0 if the key was not set + Status Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec = 0); + + // Sets the given keys to their respective values. + // MSETNX will not perform any operation at all even + // if just a single key already exists. + Status MSetnx(const std::vector& kvs, int32_t* ret); + + // Set key to hold string new_value if key currently hold the give value + // return 1 if the key currently hold the give value And override success + // return 0 if the key doesn't exist And override fail + // return -1 if the key currently does not hold the given value And override fail + Status Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int64_t ttl_millsec = 0); + + // delete the key that holds a given value + // return 1 if the key currently hold the give value And delete success + // return 0 if the key doesn't exist And del fail + // return -1 if the key currently does not hold the given value And del fail + Status Delvx(const Slice& key, const Slice& value, int32_t* ret); + + // Set key to hold string value if key does not exist + // return the length of the string after it was modified by the command + Status Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret); + + // Returns the substring of the string value stored at key, + // determined by the offsets start and end (both are inclusive) + Status Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret); + + Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, + std::string* ret, std::string* value, int64_t* ttl_millsec); + + // If key already exists and is a string, this command appends the value at + // the end of the string + // return the length of the string after the append operation + Status Append(const Slice& key, const Slice& value, int32_t* ret, int64_t* expired_timestamp_millsec, std::string& out_new_value); + + // Count the number of set bits (population counting) in a string. + // return the number of bits set to 1 + // note: if need to specified offset, set have_range to true + Status BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range); + + // Perform a bitwise operation between multiple keys + // and store the result in the destination key + Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret); + + // Return the position of the first bit set to 1 or 0 in a string + // BitPos key 0 + Status BitPos(const Slice& key, int32_t bit, int64_t* ret); + // BitPos key 0 [start] + Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret); + // BitPos key 0 [start] [end] + Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret); + + // Decrements the number stored at key by decrement + // return the value of key after the decrement + Status Decrby(const Slice& key, int64_t value, int64_t* ret); + + // Increments the number stored at key by increment. + // If the key does not exist, it is set to 0 before performing the operation + Status Incrby(const Slice& key, int64_t value, int64_t* ret, int64_t* expired_timestamp_millsec); + + // Increment the string representing a floating point number + // stored at key by the specified increment. + Status Incrbyfloat(const Slice& key, const Slice& value, std::string* ret, int64_t* expired_timestamp_sec); + + // Set key to hold the string value and set key to timeout after a given + // number of seconds + Status Setex(const Slice& key, const Slice& value, int64_t ttl_millsec); + + // Returns the length of the string value stored at key. An error + // is returned when key holds a non-string value. + Status Strlen(const Slice& key, int32_t* len); + + // PKSETEXAT has the same effect and semantic as SETEX, but instead of + // specifying the number of seconds representing the TTL (time to live), it + // takes an absolute Unix timestamp (seconds since January 1, 1970). A + // timestamp in the past will delete the key immediately. + Status PKSetexAt(const Slice& key, const Slice& value, int64_t time_stamp_millsec_); + + // Hashes Commands + + // Sets field in the hash stored at key to value. If key does not exist, a new + // key holding a hash is created. If field already exists in the hash, it is + // overwritten. + Status HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); + + // Returns the value associated with field in the hash stored at key. + // the value associated with field, or nil when field is not present in the + // hash or key does not exist. + Status HGet(const Slice& key, const Slice& field, std::string* value); + + // Sets the specified fields to their respective values in the hash stored at + // key. This command overwrites any specified fields already existing in the + // hash. If key does not exist, a new key holding a hash is created. + Status HMSet(const Slice& key, const std::vector& fvs); + + // Returns the values associated with the specified fields in the hash stored + // at key. + // For every field that does not exist in the hash, a nil value is returned. + // Because a non-existing keys are treated as empty hashes, running HMGET + // against a non-existing key will return a list of nil values. + Status HMGet(const Slice& key, const std::vector& fields, std::vector* vss); + + // Returns all fields and values of the hash stored at key. In the returned + // value, every field name is followed by its value, so the length of the + // reply is twice the size of the hash. + Status HGetall(const Slice& key, std::vector* fvs); + + Status HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl_millsec); + + // Returns all field names in the hash stored at key. + Status HKeys(const Slice& key, std::vector* fields); + + // Returns all values in the hash stored at key. + Status HVals(const Slice& key, std::vector* values); + + // Sets field in the hash stored at key to value, only if field does not yet + // exist. If key does not exist, a new key holding a hash is created. If field + // already exists, this operation has no effect. + Status HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret); + + // Returns the number of fields contained in the hash stored at key. + // Return 0 when key does not exist. + Status HLen(const Slice& key, int32_t* ret); + + // Returns the string length of the value associated with field in the hash + // stored at key. If the key or the field do not exist, 0 is returned. + Status HStrlen(const Slice& key, const Slice& field, int32_t* len); + + // Returns if field is an existing field in the hash stored at key. + // Return Status::Ok() if the hash contains field. + // Return Status::NotFound() if the hash does not contain field, + // or key does not exist. + Status HExists(const Slice& key, const Slice& field); + + // Increments the number stored at field in the hash stored at key by + // increment. If key does not exist, a new key holding a hash is created. If + // field does not exist the value is set to 0 before the operation is + // performed. + Status HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret); + + // Increment the specified field of a hash stored at key, and representing a + // floating point number, by the specified increment. If the increment value + // is negative, the result is to have the hash field value decremented instead + // of incremented. If the field does not exist, it is set to 0 before + // performing the operation. An error is returned if one of the following + // conditions occur: + // + // The field contains a value of the wrong type (not a string). + // The current field content or the specified increment are not parsable as a + // double precision floating point number. + Status HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value); + + // Removes the specified fields from the hash stored at key. Specified fields + // that do not exist within this hash are ignored. If key does not exist, it + // is treated as an empty hash and this command returns 0. + Status HDel(const Slice& key, const std::vector& fields, int32_t* ret); + + // See SCAN for HSCAN documentation. + Status HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* field_values, int64_t* next_cursor); + + // Iterate over a Hash table of fields + // return next_field that the user need to use as the start_field argument + // in the next call + Status HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, + std::vector* field_values, std::string* next_field); + + // Iterate over a Hash table of fields by specified range + // return next_field that the user need to use as the start_field argument + // in the next call + Status PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, + int32_t limit, std::vector* field_values, std::string* next_field); + + // part from the reversed ordering, PKHRSCANRANGE is similar to PKHScanRange + Status PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, + int32_t limit, std::vector* field_values, std::string* next_field); + + // Sets Commands + + // Add the specified members to the set stored at key. Specified members that + // are already a member of this set are ignored. If key does not exist, a new + // set is created before adding the specified members. + Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); + + // Returns the set cardinality (number of elements) of the set stored at key. + Status SCard(const Slice& key, int32_t* ret); + + // Returns the members of the set resulting from the difference between the + // first set and all the successive sets. + // + // For example: + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SDIFF key1 key2 key3 = {b, d} + Status SDiff(const std::vector& keys, std::vector* members); + + // This command is equal to SDIFF, but instead of returning the resulting set, + // it is stored in destination. + // If destination already exists, it is overwritten. + // + // For example: + // destination = {}; + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SDIFFSTORE destination key1 key2 key3 + // destination = {b, d} + Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + + // Returns the members of the set resulting from the intersection of all the + // given sets. + // + // For example: + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SINTER key1 key2 key3 = {c} + Status SInter(const std::vector& keys, std::vector* members); + + // This command is equal to SINTER, but instead of returning the resulting + // set, it is stored in destination. + // If destination already exists, it is overwritten. + // + // For example: + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SINTERSTORE destination key1 key2 key3 + // destination = {a, c} + Status SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + + // Returns if member is a member of the set stored at key. + Status SIsmember(const Slice& key, const Slice& member, int32_t* ret); + + // Returns all the members of the set value stored at key. + // This has the same effect as running SINTER with one argument key. + Status SMembers(const Slice& key, std::vector* members); + + Status SMembersWithTTL(const Slice& key, std::vector* members, int64_t * ttl_millsec); + + // Remove the specified members from the set stored at key. Specified members + // that are not a member of this set are ignored. If key does not exist, it is + // treated as an empty set and this command returns 0. + Status SRem(const Slice& key, const std::vector& members, int32_t* ret); + + // Removes and returns several random elements specified by count from the set value store at key. + Status SPop(const Slice& key, std::vector* members, int64_t count); + + // When called with just the key argument, return a random element from the + // set value stored at key. + // when called with the additional count argument, return an array of count + // distinct elements if count is positive. If called with a negative count the + // behavior changes and the command is allowed to return the same element + // multiple times. In this case the number of returned elements is the + // absolute value of the specified count + Status SRandmember(const Slice& key, int32_t count, std::vector* members); + + // Move member from the set at source to the set at destination. This + // operation is atomic. In every given moment the element will appear to be a + // member of source or destination for other clients. + // + // If the source set does not exist or does not contain the specified element, + // no operation is performed and 0 is returned. Otherwise, the element is + // removed from the source set and added to the destination set. When the + // specified element already exists in the destination set, it is only removed + // from the source set. + Status SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret); + + // Returns the members of the set resulting from the union of all the given + // sets. + // + // For example: + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SUNION key1 key2 key3 = {a, b, c, d, e} + Status SUnion(const std::vector& keys, std::vector* members); + + // This command is equal to SUNION, but instead of returning the resulting + // set, it is stored in destination. + // If destination already exists, it is overwritten. + // + // For example: + // key1 = {a, b} + // key2 = {c, d} + // key3 = {c, d, e} + // SUNIONSTORE destination key1 key2 key3 + // destination = {a, b, c, d, e} + Status SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + + // See SCAN for SSCAN documentation. + Status SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* members, int64_t* next_cursor); + + // Lists Commands + + // Insert all the specified values at the head of the list stored at key. If + // key does not exist, it is created as empty list before performing the push + // operations. + Status LPush(const Slice& key, const std::vector& values, uint64_t* ret); + + // Insert all the specified values at the tail of the list stored at key. If + // key does not exist, it is created as empty list before performing the push + // operation. + Status RPush(const Slice& key, const std::vector& values, uint64_t* ret); + + // Returns the specified elements of the list stored at key. The offsets start + // and stop are zero-based indexes, with 0 being the first element of the list + // (the head of the list), 1 being the next element and so on. + Status LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret); + + Status LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t * ttl_millsec); + + // Removes the first count occurrences of elements equal to value from the + // list stored at key. The count argument influences the operation in the + // following ways + Status LTrim(const Slice& key, int64_t start, int64_t stop); + + // Returns the length of the list stored at key. If key does not exist, it is + // interpreted as an empty list and 0 is returned. An error is returned when + // the value stored at key is not a list. + Status LLen(const Slice& key, uint64_t* len); + + // Removes and returns the first elements of the list stored at key. + Status LPop(const Slice& key, int64_t count, std::vector* elements); + + // Removes and returns the last elements of the list stored at key. + Status RPop(const Slice& key, int64_t count, std::vector* elements); + + // Returns the element at index index in the list stored at key. The index is + // zero-based, so 0 means the first element, 1 the second element and so on. + // Negative indices can be used to designate elements starting at the tail of + // the list. Here, -1 means the last element, -2 means the penultimate and so + // forth. + Status LIndex(const Slice& key, int64_t index, std::string* element); + + // Inserts value in the list stored at key either before or after the + // reference value pivot. + // When key does not exist, it is considered an empty list and no operation is + // performed. + // An error is returned when key exists but does not hold a list value. + Status LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, + const std::string& value, int64_t* ret); + + // Inserts value at the head of the list stored at key, only if key already + // exists and holds a list. In contrary to LPUSH, no operation will be + // performed when key does not yet exist. + Status LPushx(const Slice& key, const std::vector& values, uint64_t* len); + + // Inserts value at the tail of the list stored at key, only if key already + // exists and holds a list. In contrary to RPUSH, no operation will be + // performed when key does not yet exist. + Status RPushx(const Slice& key, const std::vector& values, uint64_t* len); + + // Removes the first count occurrences of elements equal to value from the + // list stored at key. The count argument influences the operation in the + // following ways: + // + // count > 0: Remove elements equal to value moving from head to tail. + // count < 0: Remove elements equal to value moving from tail to head. + // count = 0: Remove all elements equal to value. + // For example, LREM list -2 "hello" will remove the last two occurrences of + // "hello" in the list stored at list. + // + // Note that non-existing keys are treated like empty lists, so when key does + // not exist, the command will always return 0. + Status LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret); + + // Sets the list element at index to value. For more information on the index + // argument, see LINDEX. + // + // An error is returned for out of range indexes. + Status LSet(const Slice& key, int64_t index, const Slice& value); + + // Atomically returns and removes the last element (tail) of the list stored + // at source, and pushes the element at the first element (head) of the list + // stored at destination. + // + // For example: consider source holding the list a,b,c, and destination + // holding the list x,y,z. Executing RPOPLPUSH results in source holding a,b + // and destination holding c,x,y,z. + // + // If source does not exist, the value nil is returned and no operation is + // performed. If source and destination are the same, the operation is + // equivalent to removing the last element from the list and pushing it as + // first element of the list, so it can be considered as a list rotation + // command. + Status RPoplpush(const Slice& source, const Slice& destination, std::string* element); + + // Zsets Commands + + // Pop the maximum count score_members which have greater score in the sorted set. + // And return the result in the score_members,If the total number of the sorted + // set less than count, it will pop out the total number of sorted set. If two + // ScoreMember's score were the same, the lexicographic predominant elements will + // be pop out. + Status ZPopMax(const Slice& key, int64_t count, std::vector* score_members); + + // Pop the minimum count score_members which have less score in the sorted set. + // And return the result in the score_members,If the total number of the sorted + // set less than count, it will pop out the total number of sorted set. If two + // ScoreMember's score were the same, the lexicographic predominant elements will + // not be pop out. + Status ZPopMin(const Slice& key, int64_t count, std::vector* score_members); + + // Adds all the specified members with the specified scores to the sorted set + // stored at key. It is possible to specify multiple score / member pairs. If + // a specified member is already a member of the sorted set, the score is + // updated and the element reinserted at the right position to ensure the + // correct ordering. + // + // If key does not exist, a new sorted set with the specified members as sole + // members is created, like if the sorted set was empty. If the key exists but + // does not hold a sorted set, an error is returned. + // The score values should be the string representation of a double precision + // floating point number. +inf and -inf values are valid values as well. + Status ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret); + + // Returns the sorted set cardinality (number of elements) of the sorted set + // stored at key. + Status ZCard(const Slice& key, int32_t* ret); + + // Returns the number of elements in the sorted set at key with a score + // between min and max. + // + // The min and max arguments have the same semantic as described for + // ZRANGEBYSCORE. + // + // Note: the command has a complexity of just O(log(N)) because it uses + // elements ranks (see ZRANK) to get an idea of the range. Because of this + // there is no need to do a work proportional to the size of the range. + Status ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); + + // Increments the score of member in the sorted set stored at key by + // increment. If member does not exist in the sorted set, it is added with + // increment as its score (as if its previous score was 0.0). If key does not + // exist, a new sorted set with the specified member as its sole member is + // created. + // + // An error is returned when key exists but does not hold a sorted set. + // + // The score value should be the string representation of a numeric value, and + // accepts double precision floating point numbers. It is possible to provide + // a negative value to decrement the score. + Status ZIncrby(const Slice& key, const Slice& member, double increment, double* ret); + + // Returns the specified range of elements in the sorted set stored at key. + // The elements are considered to be ordered from the lowest to the highest + // score. Lexicographical order is used for elements with equal score. + // + // See ZREVRANGE when you need the elements ordered from highest to lowest + // score (and descending lexicographical order for elements with equal score). + // + // Both start and stop are zero-based indexes, where 0 is the first element, 1 + // is the next element and so on. They can also be negative numbers indicating + // offsets from the end of the sorted set, with -1 being the last element of + // the sorted set, -2 the penultimate element and so on. + // + // start and stop are inclusive ranges, so for example ZRANGE myzset 0 1 will + // return both the first and the second element of the sorted set. + // + // Out of range indexes will not produce an error. If start is larger than the + // largest index in the sorted set, or start > stop, an empty list is + // returned. If stop is larger than the end of the sorted set Redis will treat + // it like it is the last element of the sorted set. + // + // It is possible to pass the WITHSCORES option in order to return the scores + // of the elements together with the elements. The returned list will contain + // value1,score1,...,valueN,scoreN instead of value1,...,valueN. Client + // libraries are free to return a more appropriate data type (suggestion: an + // array with (value, score) arrays/tuples). + Status ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); + + Status ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, + int64_t * ttl_millsec); + + // Returns all the elements in the sorted set at key with a score between min + // and max (including elements with score equal to min or max). The elements + // are considered to be ordered from low to high scores. + // + // The elements having the same score are returned in lexicographical order + // (this follows from a property of the sorted set implementation in Redis and + // does not involve further computation). + // + // The optional LIMIT argument can be used to only get a range of the matching + // elements (similar to SELECT LIMIT offset, count in SQL). Keep in mind that + // if offset is large, the sorted set needs to be traversed for offset + // elements before getting to the elements to return, which can add up to O(N) + // time complexity. + // + // The optional WITHSCORES argument makes the command return both the element + // and its score, instead of the element alone. This option is available since + // Redis 2.0. + // + // Exclusive intervals and infinity + // min and max can be -inf and +inf, so that you are not required to know the + // highest or lowest score in the sorted set to get all elements from or up to + // a certain score. + // + // By default, the interval specified by min and max is closed (inclusive). It + // is possible to specify an open interval (exclusive) by prefixing the score + // with the character (. For example: + // + // ZRANGEBYSCORE zset (1 5 + // Will return all elements with 1 < score <= 5 while: + // + // ZRANGEBYSCORE zset (5 (10 + // Will return all the elements with 5 < score < 10 (5 and 10 excluded). + // + // Return value + // Array reply: list of elements in the specified score range (optionally with + // their scores). + Status ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + std::vector* score_members); + + // Returns all the elements in the sorted set at key with a score between min + // and max (including elements with score equal to min or max). The elements + // are considered to be ordered from low to high scores. + // + // The elements having the same score are returned in lexicographical order + // (this follows from a property of the sorted set implementation in Redis and + // does not involve further computation). + // + // The optional LIMIT argument can be used to only get a range of the matching + // elements (similar to SELECT LIMIT offset, count in SQL). Keep in mind that + // if offset is large, the sorted set needs to be traversed for offset + // elements before getting to the elements to return, which can add up to O(N) + // time complexity. + // + // The optional WITHSCORES argument makes the command return both the element + // and its score, instead of the element alone. This option is available since + // Redis 2.0. + // + // Exclusive intervals and infinity + // min and max can be -inf and +inf, so that you are not required to know the + // highest or lowest score in the sorted set to get all elements from or up to + // a certain score. + // + // By default, the interval specified by min and max is closed (inclusive). It + // is possible to specify an open interval (exclusive) by prefixing the score + // with the character (. For example: + // + // ZRANGEBYSCORE zset (1 5 + // Will return all elements with 1 < score <= 5 while: + // + // ZRANGEBYSCORE zset (5 (10 + // Will return all the elements with 5 < score < 10 (5 and 10 excluded). + // + // Return value + // Array reply: list of elements in the specified score range (optionally with + // their scores). + Status ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, + int64_t offset, std::vector* score_members); + + // Returns the rank of member in the sorted set stored at key, with the scores + // ordered from low to high. The rank (or index) is 0-based, which means that + // the member with the lowest score has rank 0. + // + // Use ZREVRANK to get the rank of an element with the scores ordered from + // high to low. + Status ZRank(const Slice& key, const Slice& member, int32_t* rank); + + // Removes the specified members from the sorted set stored at key. Non + // existing members are ignored. + // + // An error is returned when key exists and does not hold a sorted set. + Status ZRem(const Slice& key, const std::vector& members, int32_t* ret); + + // Removes all elements in the sorted set stored at key with rank between + // start and stop. Both start and stop are 0 -based indexes with 0 being the + // element with the lowest score. These indexes can be negative numbers, where + // they indicate offsets starting at the element with the highest score. For + // example: -1 is the element with the highest score, -2 the element with the + // second highest score and so forth. + Status ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret); + + // Removes all elements in the sorted set stored at key with a score between + // min and max (inclusive). + Status ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); + + // Returns the specified range of elements in the sorted set stored at key. + // The elements are considered to be ordered from the highest to the lowest + // score. Descending lexicographical order is used for elements with equal + // score. + // + // Apart from the reversed ordering, ZREVRANGE is similar to ZRANGE. + Status ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); + + // Returns all the elements in the sorted set at key with a score between max + // and min (including elements with score equal to max or min). In contrary to + // the default ordering of sorted sets, for this command the elements are + // considered to be ordered from high to low scores. + // + // The elements having the same score are returned in reverse lexicographical + // order. + // + // Apart from the reversed ordering, ZREVRANGEBYSCORE is similar to + // ZRANGEBYSCORE. + Status ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + std::vector* score_members); + + // Returns all the elements in the sorted set at key with a score between max + // and min (including elements with score equal to max or min). In contrary to + // the default ordering of sorted sets, for this command the elements are + // considered to be ordered from high to low scores. + // + // The elements having the same score are returned in reverse lexicographical + // order. + // + // Apart from the reversed ordering, ZREVRANGEBYSCORE is similar to + // ZRANGEBYSCORE. + Status ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, + int64_t offset, std::vector* score_members); + + // Returns the rank of member in the sorted set stored at key, with the scores + // ordered from high to low. The rank (or index) is 0-based, which means that + // the member with the highest score has rank 0. + Status ZRevrank(const Slice& key, const Slice& member, int32_t* rank); + + // Returns the score of member in the sorted set at key. + // + // If member does not exist in the sorted set, or key does not exist, nil is + // returned. + Status ZScore(const Slice& key, const Slice& member, double* ret); + + // Computes the union of numkeys sorted sets given by the specified keys, and + // stores the result in destination. It is mandatory to provide the number of + // input keys (numkeys) before passing the input keys and the other (optional) + // arguments. + // + // By default, the resulting score of an element is the sum of its scores in + // the sorted sets where it exists. + // + // Using the WEIGHTS option, it is possible to specify a multiplication factor + // for each input sorted set. This means that the score of every element in + // every input sorted set is multiplied by this factor before being passed to + // the aggregation function. When WEIGHTS is not given, the multiplication + // factors default to 1. + // + // With the AGGREGATE option, it is possible to specify how the results of the + // union are aggregated. This option defaults to SUM, where the score of an + // element is summed across the inputs where it exists. When this option is + // set to either MIN or MAX, the resulting set will contain the minimum or + // maximum score of an element across the inputs where it exists. + // + // If destination already exists, it is overwritten. + Status ZUnionstore(const Slice& destination, const std::vector& keys, const std::vector& weights, + AGGREGATE agg, std::map& value_to_dest, int32_t* ret); + + // Computes the intersection of numkeys sorted sets given by the specified + // keys, and stores the result in destination. It is mandatory to provide the + // number of input keys (numkeys) before passing the input keys and the other + // (optional) arguments. + // + // By default, the resulting score of an element is the sum of its scores in + // the sorted sets where it exists. Because intersection requires an element + // to be a member of every given sorted set, this results in the score of + // every element in the resulting sorted set to be equal to the number of + // input sorted sets. + // + // For a description of the WEIGHTS and AGGREGATE options, see ZUNIONSTORE. + // + // If destination already exists, it is overwritten. + Status ZInterstore(const Slice& destination, const std::vector& keys, const std::vector& weights, + AGGREGATE agg, std::vector& value_to_dest, int32_t* ret); + + // When all the elements in a sorted set are inserted with the same score, in + // order to force lexicographical ordering, this command returns all the + // elements in the sorted set at key with a value between min and max. + // + // If the elements in the sorted set have different scores, the returned + // elements are unspecified. + // + // The elements are considered to be ordered from lower to higher strings as + // compared byte-by-byte using the memcmp() C function. Longer strings are + // considered greater than shorter strings if the common part is identical. + // + // The optional LIMIT argument can be used to only get a range of the matching + // elements (similar to SELECT LIMIT offset, count in SQL). Keep in mind that + // if offset is large, the sorted set needs to be traversed for offset + // elements before getting to the elements to return, which can add up to O(N) + // time complexity. + Status ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + std::vector* members); + + // When all the elements in a sorted set are inserted with the same score, in + // order to force lexicographical ordering, this command returns the number of + // elements in the sorted set at key with a value between min and max. + // + // The min and max arguments have the same meaning as described for + // ZRANGEBYLEX. + // + // Note: the command has a complexity of just O(log(N)) because it uses + // elements ranks (see ZRANK) to get an idea of the range. Because of this + // there is no need to do a work proportional to the size of the range. + Status ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret); + + // When all the elements in a sorted set are inserted with the same score, in + // order to force lexicographical ordering, this command removes all elements + // in the sorted set stored at key between the lexicographical range specified + // by min and max. + // + // The meaning of min and max are the same of the ZRANGEBYLEX command. + // Similarly, this command actually returns the same elements that ZRANGEBYLEX + // would return if called with the same min and max arguments. + Status ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret); + + // See SCAN for ZSCAN documentation. + Status ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* score_members, int64_t* next_cursor); + + Status XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args); + Status XDel(const Slice& key, const std::vector& ids, int32_t& ret); + Status XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count); + Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); + Status XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); + Status XLen(const Slice& key, int32_t& len); + Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, + std::vector& reserved_keys); + Status XInfo(const Slice& key, StreamInfoResult &result); + // Keys Commands + + // Note: + // While any error happens, you need to check type_status for + // the error message + + // Set a timeout on key, milliseconds unit + // return -1 operation exception errors happen in database + // return >=0 success + int32_t Expire(const Slice& key, int64_t ttl_millsec); + + // Removes the specified keys + // return -1 operation exception errors happen in database + // return >=0 the number of keys that were removed + int64_t Del(const std::vector& keys); + + + // Iterate over a collection of elements + // return an updated cursor that the user need to use as the cursor argument + // in the next call + int64_t Scan(const DataType& dtype, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* keys); + + // Iterate over a collection of elements by specified range + // return a next_key that the user need to use as the key_start argument + // in the next call + Status PKScanRange(const DataType& data_type, const Slice& key_start, const Slice& key_end, const Slice& pattern, + int32_t limit, std::vector* keys, std::vector* kvs, std::string* next_key); + + // part from the reversed ordering, PKRSCANRANGE is similar to PKScanRange + Status PKRScanRange(const DataType& data_type, const Slice& key_start, const Slice& key_end, const Slice& pattern, + int32_t limit, std::vector* keys, std::vector* kvs, std::string* next_key); + + // Traverses the database of the specified type, removing the Key that matches + // the pattern + Status PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, std::vector* remove_keys, const int64_t& max_count); + + // Iterate over a collection of elements + // return next_key that the user need to use as the start_key argument + // in the next call + Status Scanx(const DataType& data_type, const std::string& start_key, const std::string& pattern, int64_t count, + std::vector* keys, std::string* next_key); + + // Returns if key exists. + // return -1 operation exception errors happen in database + // return >=0 the number of keys existing + int64_t Exists(const std::vector& keys); + + // Return the key exists type count + // return param type_status: return every type status + int64_t IsExist(const Slice& key, std::map* type_status); + + // EXPIREAT has the same effect and semantic as EXPIRE, but instead of + // specifying the number of seconds representing the TTL (time to live), it + // takes an absolute Unix timestamp (milliseconds since January 1, 1970). A + // timestamp in the past will delete the key immediately. + // return -1 operation exception errors happen in database + // return 0 if key does not exist + // return >=1 if the timueout was set + int32_t Expireat(const Slice& key, int64_t timestamp_millsec); + + // Remove the existing timeout on key, turning the key from volatile (a key + // with an expire set) to persistent (a key that will never expire as no + // timeout is associated). + // return -1 operation exception errors happen in database + // return 0 if key does not exist or does not have an associated timeout + // return >=1 if the timueout was set + int32_t Persist(const Slice& key); + + // Returns the remaining time to live of a key that has a timeout. + // return -3 operation exception errors happen in database + // return -2 if the key does not exist + // return -1 if the key exists but has not associated expire + // return > 0 TTL in seconds + int64_t TTL(const Slice& key); + + // Returns the remaining time to live of a key that has a timeout. + // return -3 operation exception errors happen in database + // return -2 if the key does not exist + // return -1 if the key exists but has not associated expire + // return > 0 TTL in milliseconds + int64_t PTTL(const Slice& key); + + // Reutrns the data all type of the key + // if single is true, the query will return the first one + Status GetType(const std::string& key, enum DataType& type); + + // Reutrns the data all type of the key + Status Type(const std::string& key, std::vector& types); + + Status Keys(const DataType& data_type, const std::string& pattern, std::vector* keys); + + // Dynamic switch WAL + void DisableWal(const bool is_wal_disable); + + // Iterate through all the data in the database. + void ScanDatabase(const DataType& type); + + // HyperLogLog + enum { + kMaxKeys = 255, + kPrecision = 17, + }; + // Adds all the element arguments to the HyperLogLog data structure stored + // at the variable name specified as first argument. + Status PfAdd(const Slice& key, const std::vector& values, bool* update); + + // When called with a single key, returns the approximated cardinality + // computed by the HyperLogLog data structure stored at the specified + // variable, which is 0 if the variable does not exist. + Status PfCount(const std::vector& keys, int64_t* result); + + // Merge multiple HyperLogLog values into an unique value that will + // approximate the cardinality of the union of the observed Sets of the source + // HyperLogLog structures. + Status PfMerge(const std::vector& keys, std::string& value_to_dest); + + // Admin Commands + Status StartBGThread(); + Status RunBGTask(); + Status AddBGTask(const BGTask& bg_task); + + Status Compact(const DataType& type, bool sync = false); + Status CompactRange(const DataType& type, const std::string& start, const std::string& end, bool sync = false); + Status DoCompactRange(const DataType& type, const std::string& start, const std::string& end); + Status DoCompactSpecificKey(const DataType& type, const std::string& key); + + /** + * LongestNotCompactionSstCompact will execute the compact command for any cf in the given type + * @param type. data type like `kStrings` + * @param sync. if true, block function + * @return Status + */ + Status LongestNotCompactionSstCompact(const DataType &type, bool sync = false); + + Status SetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys); + Status SetSmallCompactionThreshold(uint32_t small_compaction_threshold); + Status SetSmallCompactionDurationThreshold(uint32_t small_compaction_duration_threshold); + + std::string GetCurrentTaskType(); + Status GetUsage(const std::string& property, uint64_t* result); + Status GetUsage(const std::string& property, std::map* type_result); + uint64_t GetProperty(const std::string& property); + + Status GetKeyNum(std::vector* key_infos); + Status StopScanKeyNum(); + + rocksdb::DB* GetDBByIndex(int index); + + Status SetOptions(const OptionType& option_type, const std::string& db_type, + const std::unordered_map& options); + void SetCompactRangeOptions(const bool is_canceled); + Status EnableDymayticOptions(const OptionType& option_type, + const std::string& db_type, const std::unordered_map& options); + Status EnableAutoCompaction(const OptionType& option_type, + const std::string& db_type, const std::unordered_map& options); + void GetRocksDBInfo(std::string& info); + + const StorageOptions& GetStorageOptions(); + // get hash cf handle in insts_[idx] + std::vector GetHashCFHandles(const int idx); + // get DefaultWriteOptions in insts_[idx] + rocksdb::WriteOptions GetDefaultWriteOptions(const int idx) const; + + private: + std::vector> insts_; + std::unique_ptr slot_indexer_; + std::atomic is_opened_ = {false}; + int db_instance_num_ = 3; + int slot_num_ = 1024; + bool is_classic_mode_ = true; + StorageOptions storage_options_; + + std::unique_ptr> cursors_store_; + + // Storage start the background thread for compaction task + pthread_t bg_tasks_thread_id_ = 0; + pstd::Mutex bg_tasks_mutex_; + pstd::CondVar bg_tasks_cond_var_; + std::queue bg_tasks_queue_; + + std::atomic current_task_type_ = {kNone}; + std::atomic bg_tasks_should_exit_ = {false}; + + // For scan keys in data base + std::atomic scan_keynum_exit_ = {false}; + Status MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec); +}; + +} // namespace storage +#endif // INCLUDE_STORAGE_STORAGE_H_ diff --git a/tools/pika_migrate/src/storage/include/storage/storage_define.h b/tools/pika_migrate/src/storage/include/storage/storage_define.h new file mode 100644 index 0000000000..59fa44c495 --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/storage_define.h @@ -0,0 +1,135 @@ +// Copyright (c) 2023-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_DEFINE_H_ +#define STORAGE_DEFINE_H_ + +#include +#include +#include "stdint.h" + +#include "rocksdb/slice.h" + +namespace storage { +using Slice = rocksdb::Slice; + +// remove 'unused parameter' warning +#define UNUSED(expr) \ + do { \ + (void)(expr); \ + } while (0) + +const int kPrefixReserveLength = 8; +const int kVersionLength = 8; +const int kScoreLength = 8; +const int kSuffixReserveLength = 16; +const int kListValueIndexLength = 16; +/* + * Used to store a fixed-size value for the Type field + */ +const int kTypeLength = 1; +const int kTimestampLength = 8; + +/* + * kMetaCF is used to store the metadata of all types of + * data and all information of type string + */ +enum ColumnFamilyIndex { + kMetaCF = 0, + kHashesDataCF = 1, + kSetsDataCF = 2, + kListsDataCF = 3, + kZsetsDataCF = 4, + kZsetsScoreCF = 5, + kStreamsDataCF = 6, +}; + +const static char kNeedTransformCharacter = '\u0000'; +const static char* kEncodedTransformCharacter = "\u0000\u0001"; +const static char* kEncodedKeyDelim = "\u0000\u0000"; +const static int kEncodedKeyDelimSize = 2; + +inline char* EncodeUserKey(const Slice& user_key, char* dst_ptr, size_t nzero) { + // no \u0000 exists in user_key, memcopy user_key directly. + if (nzero == 0) { + memcpy(dst_ptr, user_key.data(), user_key.size()); + dst_ptr += user_key.size(); + memcpy(dst_ptr, kEncodedKeyDelim, 2); + dst_ptr += 2; + return dst_ptr; + } + + // \u0000 exists in user_key, iterate and replace. + size_t pos = 0; + const char* user_data = user_key.data(); + for (size_t i = 0; i < user_key.size(); i++) { + if (user_data[i] == kNeedTransformCharacter) { + size_t sub_len = i - pos; + if (sub_len != 0) { + memcpy(dst_ptr, user_data + pos, sub_len); + dst_ptr += sub_len; + } + memcpy(dst_ptr, kEncodedTransformCharacter, 2); + dst_ptr += 2; + pos = i + 1; + } + } + if (pos != user_key.size()) { + memcpy(dst_ptr, user_data + pos, user_key.size() - pos); + } + + memcpy(dst_ptr, kEncodedKeyDelim, 2); + dst_ptr += 2; + return dst_ptr; +} + +inline const char* DecodeUserKey(const char* ptr, int length, std::string* user_key) { + const char* ret_ptr = ptr; + user_key->resize(length - kEncodedKeyDelimSize); + bool zero_ahead = false; + bool delim_found = false; + int output_idx = 0; + + for (int idx = 0; idx < length; idx++) { + switch (ptr[idx]) { + case '\u0000': { + delim_found = zero_ahead ? true : false; + zero_ahead = true; + break; + } + case '\u0001': { + (*user_key)[output_idx++] = zero_ahead ? '\u0000' : ptr[idx]; + zero_ahead = false; + break; + } + default: { + (*user_key)[output_idx++] = ptr[idx]; + zero_ahead = false; + break; + } + } + if (delim_found) { + user_key->resize(output_idx); + ret_ptr = ptr + idx + 1; + break; + } + } + return ret_ptr; +} + +inline const char* SeekUserkeyDelim(const char* ptr, int length) { + bool zero_ahead = false; + for (int i = 0; i < length; i++) { + if (ptr[i] == kNeedTransformCharacter && zero_ahead) { + return ptr + i + 1; + } + zero_ahead = ptr[i] == kNeedTransformCharacter; + } + //TODO: handle invalid format + return ptr; +} + +} // end namespace storage +#endif diff --git a/tools/pika_migrate/src/storage/include/storage/util.h b/tools/pika_migrate/src/storage/include/storage/util.h new file mode 100644 index 0000000000..d50f0ea081 --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/util.h @@ -0,0 +1,33 @@ +// Copyright (c) 2017-present The storage Authors. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_UTIL_H_ +#define SRC_UTIL_H_ + +#include +#include +#include +#include +#include +#include + +namespace storage { + +int Int64ToStr(char* dst, size_t dstlen, int64_t svalue); +int StrToInt64(const char* s, size_t slen, int64_t* value); +int StringMatch(const char* pattern, uint64_t pattern_len, const char* string, uint64_t string_len, int nocase); +int StrToLongDouble(const char* s, size_t slen, long double* ldval); +int LongDoubleToStr(long double ldval, std::string* value); +int do_mkdir(const char* path, mode_t mode); +int mkpath(const char* path, mode_t mode); +int delete_dir(const char* dirname); +int is_dir(const char* filename); +int CalculateStartAndEndKey(const std::string& key, std::string* start_key, std::string* end_key); +bool isTailWildcard(const std::string& pattern); +void GetFilepath(const char* path, const char* filename, char* filepath); +bool DeleteFiles(const char* path); +} // namespace storage + +#endif // SRC_UTIL_H_ diff --git a/tools/pika_migrate/src/storage/include/storage/version.h b/tools/pika_migrate/src/storage/include/storage/version.h new file mode 100644 index 0000000000..7237eb2141 --- /dev/null +++ b/tools/pika_migrate/src/storage/include/storage/version.h @@ -0,0 +1,13 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef INCLUDE_STORAGE_VERSION_H_ +#define INCLUDE_STORAGE_VERSION_H_ + +#define STORAGE_MAJOR 1 +#define STORAGE_MINOR 0 +#define STORAGE_PATCH 0 + +#endif // INCLUDE_STORAGE_VERSION_H_ diff --git a/tools/pika_migrate/src/storage/src/backupable.cc b/tools/pika_migrate/src/storage/src/backupable.cc new file mode 100644 index 0000000000..4acd8dee72 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/backupable.cc @@ -0,0 +1,149 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include +#include + +#include "storage/backupable.h" +#include "storage/storage.h" + +namespace storage { + +BackupEngine::~BackupEngine() { + // Wait all children threads + StopBackup(); + WaitBackupPthread(); +} + +Status BackupEngine::NewCheckpoint(rocksdb::DB* rocksdb_db, int index) { + rocksdb::DBCheckpoint* checkpoint; + Status s = rocksdb::DBCheckpoint::Create(rocksdb_db, &checkpoint); + if (!s.ok()) { + return s; + } + engines_.insert(std::make_pair(index, std::unique_ptr(checkpoint))); + return s; +} + +Status BackupEngine::Open(storage::Storage* storage, std::shared_ptr& backup_engine_ret, int inst_count) { + // BackupEngine() is private, can't use make_shared + backup_engine_ret = std::shared_ptr(new BackupEngine()); + if (!backup_engine_ret) { + return Status::Corruption("New BackupEngine failed!"); + } + + // Create BackupEngine for each rocksdb instance + rocksdb::Status s; + rocksdb::DB* rocksdb_db; + for (int index = 0; index < inst_count; index++) { + if (!(rocksdb_db = storage->GetDBByIndex(index))) { + s = Status::Corruption("Invalid db index"); + } + + if (s.ok()) { + s = backup_engine_ret->NewCheckpoint(rocksdb_db, index); + } + + if (!s.ok()) { + backup_engine_ret = nullptr; + break; + } + } + return s; +} + +Status BackupEngine::SetBackupContent() { + Status s; + for (const auto& engine : engines_) { + // Get backup content + BackupContent bcontent; + s = engine.second->GetCheckpointFiles(bcontent.live_files, bcontent.live_wal_files, bcontent.manifest_file_size, + bcontent.sequence_number); + if (!s.ok()) { + return s; + } + backup_content_[engine.first] = std::move(bcontent); + } + return s; +} + +Status BackupEngine::CreateNewBackupSpecify(const std::string& backup_dir, int index) { + auto it_engine = engines_.find(index); + auto it_content = backup_content_.find(index); + std::string dir = GetSaveDirByIndex(backup_dir, index); + delete_dir(dir.c_str()); + + if (it_content != backup_content_.end() && it_engine != engines_.end()) { + Status s = it_engine->second->CreateCheckpointWithFiles( + dir, it_content->second.live_files, it_content->second.live_wal_files, it_content->second.manifest_file_size, + it_content->second.sequence_number); + if (!s.ok()) { + // type.c_str(), s.ToString().c_str()); + return s; + } + + } else { + return Status::Corruption("Invalid db index"); + } + return Status::OK(); +} + +void* ThreadFuncSaveSpecify(void* arg) { + auto arg_ptr = static_cast(arg); + auto p = static_cast(arg_ptr->p_engine); + arg_ptr->res = p->CreateNewBackupSpecify(arg_ptr->backup_dir, arg_ptr->index_); + pthread_exit(&(arg_ptr->res)); +} + +Status BackupEngine::WaitBackupPthread() { + int ret; + Status s = Status::OK(); + for (auto& pthread : backup_pthread_ts_) { + void* res; + if (pthread_join(pthread.second, &res) != 0) { + } + Status cur_s = *(static_cast(res)); + if (!cur_s.ok()) { + StopBackup(); // stop others when someone failed + s = cur_s; + } + } + backup_pthread_ts_.clear(); + return s; +} + +Status BackupEngine::CreateNewBackup(const std::string& dir) { + Status s = Status::OK(); + // ensure cleaning up the pointers after the function has finished. + std::vector> args; + args.reserve(engines_.size()); + for (const auto& engine : engines_) { + pthread_t tid; + auto arg = std::make_unique(reinterpret_cast(this), dir, engine.first); + args.push_back(std::move(arg)); + if (pthread_create(&tid, nullptr, &ThreadFuncSaveSpecify, args.back().get()) != 0) { + s = Status::Corruption("pthread_create failed."); + break; + } + if (!(backup_pthread_ts_.insert(std::make_pair(engine.first, tid)).second)) { + backup_pthread_ts_[engine.first] = tid; + } + } + + // Wait threads stop + if (!s.ok()) { + StopBackup(); + } + s = WaitBackupPthread(); + + return s; +} + +void BackupEngine::StopBackup() { + // DEPRECATED +} + +} // namespace storage + diff --git a/tools/pika_migrate/src/storage/src/base_data_key_format.h b/tools/pika_migrate/src/storage/src/base_data_key_format.h new file mode 100644 index 0000000000..32be63a909 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/base_data_key_format.h @@ -0,0 +1,188 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_DATA_KEY_FORMAT_H_ +#define SRC_BASE_DATA_KEY_FORMAT_H_ + +#include "src/coding.h" +#include "storage/storage_define.h" + +namespace storage { + +using Slice = rocksdb::Slice; +/* +* used for Hash/Set/Zset's member data key. format: +* | reserve1 | key | version | data | reserve2 | +* | 8B | | 8B | | 16B | +*/ +class BaseDataKey { + public: + BaseDataKey(const Slice& key, + uint64_t version, const Slice& data) + : key_(key), version_(version), data_(data) {} + + ~BaseDataKey() { + if (start_ != space_) { + delete[] start_; + } + } + + Slice EncodeSeekKey() { + size_t meta_size = sizeof(reserve1_) + sizeof(version_); + size_t usize = key_.size() + data_.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + + // Need to allocate space, delete previous space + if (start_ != space_) { + delete[] start_; + } + } + + start_ = dst; + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // data + memcpy(dst, data_.data(), data_.size()); + dst += data_.size(); + return Slice(start_, needed); + } + + Slice Encode() { + size_t meta_size = sizeof(reserve1_) + sizeof(version_) + sizeof(reserve2_); + size_t usize = key_.size() + data_.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + + // Need to allocate space, delete previous space + if (start_ != space_) { + delete[] start_; + } + } + + start_ = dst; + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // data + memcpy(dst, data_.data(), data_.size()); + dst += data_.size(); + // TODO(wangshaoyi): too much for reserve + // reserve2: 16 byte + memcpy(dst, reserve2_, sizeof(reserve2_)); + return Slice(start_, needed); + } + + private: + char* start_ = nullptr; + char space_[200]; + char reserve1_[8] = {0}; + Slice key_; + uint64_t version_ = uint64_t(-1); + Slice data_; + char reserve2_[16] = {0}; +}; + +class ParsedBaseDataKey { + public: + explicit ParsedBaseDataKey(const std::string* key) { + const char* ptr = key->data(); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); + } + + explicit ParsedBaseDataKey(const Slice& key) { + const char* ptr = key.data(); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + + void decode(const char* ptr, const char* end_ptr) { + const char* start = ptr; + // skip head reserve1_ + ptr += sizeof(reserve1_); + // skip tail reserve2_ + end_ptr -= kSuffixReserveLength; + // user key + ptr = DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + + version_ = DecodeFixed64(ptr); + ptr += sizeof(version_); + data_ = Slice(ptr, std::distance(ptr, end_ptr)); + } + + virtual ~ParsedBaseDataKey() = default; + + Slice Key() { return Slice(key_str_); } + + uint64_t Version() { return version_; } + + Slice Data() { return data_; } + + protected: + std::string key_str_; + char reserve1_[8] = {0}; + uint64_t version_ = (uint64_t)(-1); + Slice data_; +}; + +class ParsedHashesDataKey : public ParsedBaseDataKey { + public: + explicit ParsedHashesDataKey(const std::string* key) : ParsedBaseDataKey(key) {} + explicit ParsedHashesDataKey(const Slice& key) : ParsedBaseDataKey(key) {} + Slice field() { return data_; } +}; + +class ParsedSetsMemberKey : public ParsedBaseDataKey { + public: + explicit ParsedSetsMemberKey(const std::string* key) : ParsedBaseDataKey(key) {} + explicit ParsedSetsMemberKey(const Slice& key) : ParsedBaseDataKey(key) {} + Slice member() { return data_; } +}; + +class ParsedZSetsMemberKey : public ParsedBaseDataKey { + public: + explicit ParsedZSetsMemberKey(const std::string* key) : ParsedBaseDataKey(key) {} + explicit ParsedZSetsMemberKey(const Slice& key) : ParsedBaseDataKey(key) {} + Slice member() { return data_; } +}; + +class ParsedStreamDataKey : public ParsedBaseDataKey { + public: + explicit ParsedStreamDataKey(const std::string* key) : ParsedBaseDataKey(key) {} + explicit ParsedStreamDataKey(const Slice& key) : ParsedBaseDataKey(key) {} + Slice id() { return data_; } +}; + +using HashesDataKey = BaseDataKey; +using SetsMemberKey = BaseDataKey; +using ZSetsMemberKey = BaseDataKey; +using StreamDataKey = BaseDataKey; + +} // namespace storage +#endif // SRC_BASE_DATA_KEY_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/base_data_value_format.h b/tools/pika_migrate/src/storage/src/base_data_value_format.h new file mode 100644 index 0000000000..be6735f54c --- /dev/null +++ b/tools/pika_migrate/src/storage/src/base_data_value_format.h @@ -0,0 +1,115 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_DATA_VALUE_FORMAT_H_ +#define SRC_BASE_DATA_VALUE_FORMAT_H_ + +#include + +#include "rocksdb/env.h" +#include "rocksdb/slice.h" + +#include "base_value_format.h" +#include "src/coding.h" +#include "src/mutex.h" +#include "storage/storage_define.h" + +namespace storage { +/* +* hash/set/zset/list data value format +* | value | reserve | ctime | +* | | 16B | 8B | +*/ +class BaseDataValue : public InternalValue { +public: + /* + * The header of the Value field is initially initialized to knulltype + */ + explicit BaseDataValue(const rocksdb::Slice& user_value) : InternalValue(DataType::kNones, user_value) {} + virtual ~BaseDataValue() {} + + virtual rocksdb::Slice Encode() { + size_t usize = user_value_.size(); + size_t needed = usize + kSuffixReserveLength + kTimestampLength; + char* dst = ReAllocIfNeeded(needed); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), user_value_.size()); + dst += user_value_.size(); + memcpy(dst, reserve_, kSuffixReserveLength); + dst += kSuffixReserveLength; + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + dst += kTimestampLength; + return rocksdb::Slice(start_pos, needed); + } + +private: + const size_t kDefaultValueSuffixLength = kSuffixReserveLength + kTimestampLength; +}; + +class ParsedBaseDataValue : public ParsedInternalValue { +public: + // Use this constructor after rocksdb::DB::Get(), since we use this in + // the implement of user interfaces and may need to modify the + // original value suffix, so the value_ must point to the string + explicit ParsedBaseDataValue(std::string* value) : ParsedInternalValue(value) { + if (value_->size() >= kBaseDataValueSuffixLength) { + user_value_ = rocksdb::Slice(value_->data(), value_->size() - kBaseDataValueSuffixLength); + memcpy(reserve_, value_->data() + user_value_.size(), kSuffixReserveLength); + uint64_t ctime = DecodeFixed64(value_->data() + user_value_.size() + kSuffixReserveLength); + ctime_ = (ctime & ~(1ULL << 63)); + } + } + + // Use this constructor in rocksdb::CompactionFilter::Filter(), + // since we use this in Compaction process, all we need to do is parsing + // the rocksdb::Slice, so don't need to modify the original value, value_ can be + // set to nullptr + explicit ParsedBaseDataValue(const rocksdb::Slice& value) : ParsedInternalValue(value) { + if (value.size() >= kBaseDataValueSuffixLength) { + user_value_ = rocksdb::Slice(value.data(), value.size() - kBaseDataValueSuffixLength); + memcpy(reserve_, value.data() + user_value_.size(), kSuffixReserveLength); + uint64_t ctime = DecodeFixed64(value.data() + user_value_.size() + kSuffixReserveLength); + ctime_ = (ctime & ~(1ULL << 63)); + } + } + + virtual ~ParsedBaseDataValue() = default; + + void SetEtimeToValue() override {} + + void SetCtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength; + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + } + } + + void SetReserveToValue() { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kBaseDataValueSuffixLength; + memcpy(dst, reserve_, kSuffixReserveLength); + } + } + + virtual void StripSuffix() override { + if (value_) { + value_->erase(value_->size() - kBaseDataValueSuffixLength, kBaseDataValueSuffixLength); + } + } + + static size_t GetkBaseDataValueSuffixLength() { return kBaseDataValueSuffixLength; } + +protected: + virtual void SetVersionToValue() override {}; + +private: + static const size_t kBaseDataValueSuffixLength = kSuffixReserveLength + kTimestampLength; +}; + +} // namespace storage +#endif // SRC_BASE_VALUE_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/base_filter.h b/tools/pika_migrate/src/storage/src/base_filter.h new file mode 100644 index 0000000000..934b2d96d7 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/base_filter.h @@ -0,0 +1,264 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_FILTER_H_ +#define SRC_BASE_FILTER_H_ + +#include +#include +#include + +#include "glog/logging.h" +#include "rocksdb/compaction_filter.h" +#include "src/base_data_key_format.h" +#include "src/base_value_format.h" +#include "src/base_meta_value_format.h" +#include "src/lists_meta_value_format.h" +#include "src/pika_stream_meta_value.h" +#include "src/strings_value_format.h" +#include "src/zsets_data_key_format.h" +#include "src/debug.h" + +namespace storage { + +class BaseMetaFilter : public rocksdb::CompactionFilter { + public: + BaseMetaFilter() = default; + bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, std::string* new_value, + bool* value_changed) const override { + auto cur_time = pstd::NowMillis(); + /* + * For the filtering of meta information, because the field designs of string + * and list are different, their filtering policies are written separately. + * The field designs of the remaining zset,set,hash and stream in meta-value + * are the same, so the same filtering strategy is used + */ + ParsedBaseKey parsed_key(key); + auto type = static_cast(static_cast(value[0])); + DEBUG("==========================START=========================="); + if (type == DataType::kStrings) { + ParsedStringsValue parsed_strings_value(value); + DEBUG("[string type] key: %s, value = %s, timestamp: %llu, cur_time: %llu", parsed_key.Key().ToString().c_str(), + parsed_strings_value.UserValue().ToString().c_str(), parsed_strings_value.Etime(), cur_time); + if (parsed_strings_value.Etime() != 0 && parsed_strings_value.Etime() < cur_time) { + DEBUG("Drop[Stale]"); + return true; + } else { + DEBUG("Reserve"); + return false; + } + } else if (type == DataType::kStreams) { + ParsedStreamMetaValue parsed_stream_meta_value(value); + DEBUG("[stream meta type], key: %s, entries_added = %llu, first_id: %s, last_id: %s, version: %llu", + parsed_key.Key().ToString().c_str(), parsed_stream_meta_value.entries_added(), + parsed_stream_meta_value.first_id().ToString().c_str(), + parsed_stream_meta_value.last_id().ToString().c_str(), + parsed_stream_meta_value.version()); + return false; + } else if (type == DataType::kLists) { + ParsedListsMetaValue parsed_lists_meta_value(value); + DEBUG("[list meta type], key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", parsed_key.Key().ToString().c_str(), + parsed_lists_meta_value.Count(), parsed_lists_meta_value.Etime(), cur_time, + parsed_lists_meta_value.Version()); + + if (parsed_lists_meta_value.Etime() != 0 && parsed_lists_meta_value.Etime() < cur_time && + parsed_lists_meta_value.Version() < cur_time) { + DEBUG("Drop[Stale & version < cur_time]"); + return true; + } + if (parsed_lists_meta_value.Count() == 0 && parsed_lists_meta_value.Version() < cur_time) { + DEBUG("Drop[Empty & version < cur_time]"); + return true; + } + DEBUG("Reserve"); + return false; + } else { + ParsedBaseMetaValue parsed_base_meta_value(value); + DEBUG("[%s meta type] key: %s, count = %d, timestamp: %llu, cur_time: %llu, version: %llu", + DataTypeToString(type), parsed_key.Key().ToString().c_str(), parsed_base_meta_value.Count(), + parsed_base_meta_value.Etime(), cur_time, parsed_base_meta_value.Version()); + + if (parsed_base_meta_value.Etime() != 0 && parsed_base_meta_value.Etime() < cur_time && + parsed_base_meta_value.Version() < cur_time) { + DEBUG("Drop[Stale & version < cur_time]"); + return true; + } + if (parsed_base_meta_value.Count() == 0 && parsed_base_meta_value.Version() < cur_time) { + DEBUG("Drop[Empty & version < cur_time]"); + return true; + } + DEBUG("Reserve"); + return false; + } + } + + const char* Name() const override { return "BaseMetaFilter"; } +}; + +class BaseMetaFilterFactory : public rocksdb::CompactionFilterFactory { + public: + BaseMetaFilterFactory() = default; + std::unique_ptr CreateCompactionFilter( + const rocksdb::CompactionFilter::Context& context) override { + return std::unique_ptr(new BaseMetaFilter()); + } + const char* Name() const override { return "BaseMetaFilterFactory"; } +}; + +class BaseDataFilter : public rocksdb::CompactionFilter { + public: + BaseDataFilter(rocksdb::DB* db, std::vector* cf_handles_ptr, enum DataType type) + : db_(db), + cf_handles_ptr_(cf_handles_ptr), + type_(type) + {} + + bool Filter(int level, const Slice& key, const rocksdb::Slice& value, std::string* new_value, + bool* value_changed) const override { + UNUSED(level); + UNUSED(value); + UNUSED(new_value); + UNUSED(value_changed); + ParsedBaseDataKey parsed_base_data_key(key); + TRACE("==========================START=========================="); + TRACE("[DataFilter], key: %s, data = %s, version = %llu", parsed_base_data_key.Key().ToString().c_str(), + parsed_base_data_key.Data().ToString().c_str(), parsed_base_data_key.Version()); + + const char* ptr = key.data(); + int key_size = key.size(); + ptr = SeekUserkeyDelim(ptr + kPrefixReserveLength, key_size - kPrefixReserveLength); + std::string meta_key_enc(key.data(), std::distance(key.data(), ptr)); + meta_key_enc.append(kSuffixReserveLength, kNeedTransformCharacter); + + if (meta_key_enc != cur_key_) { + cur_meta_etime_ = 0; + cur_meta_version_ = 0; + meta_not_found_ = true; + cur_key_ = meta_key_enc; + std::string meta_value; + // destroyed when close the database, Reserve Current key value + if (cf_handles_ptr_->empty()) { + return false; + } + Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[0], cur_key_, &meta_value); + if (s.ok()) { + /* + * The elimination policy for keys of the Data type is that if the key + * type obtained from MetaCF is inconsistent with the key type in Data, + * it needs to be eliminated + */ + auto type = static_cast(static_cast(meta_value[0])); + if (type != type_) { + return true; + } else if (type == DataType::kStreams) { + ParsedStreamMetaValue parsed_stream_meta_value(meta_value); + meta_not_found_ = false; + cur_meta_version_ = parsed_stream_meta_value.version(); + cur_meta_etime_ = 0; // stream do not support ttl + } else if (type == DataType::kHashes || type == DataType::kSets || type == DataType::kZSets) { + ParsedBaseMetaValue parsed_base_meta_value(&meta_value); + meta_not_found_ = false; + cur_meta_version_ = parsed_base_meta_value.Version(); + cur_meta_etime_ = parsed_base_meta_value.Etime(); + } else { + return true; + } + } else if (s.IsNotFound()) { + meta_not_found_ = true; + } else { + cur_key_ = ""; + TRACE("Reserve[Get meta_key faild]"); + return false; + } + } + + if (meta_not_found_) { + TRACE("Drop[Meta key not exist]"); + return true; + } + + pstd::TimeType unix_time = pstd::NowMillis(); + if (cur_meta_etime_ != 0 && cur_meta_etime_ < unix_time) { + TRACE("Drop[Timeout]"); + return true; + } + + if (cur_meta_version_ > parsed_base_data_key.Version()) { + TRACE("Drop[data_key_version < cur_meta_version]"); + return true; + } else { + TRACE("Reserve[data_key_version == cur_meta_version]"); + return false; + } + } + + /* + // Only judge by meta value ttl + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + uint64_t expire_time, std::string* new_value, std::string* skip_until) const override { + UNUSED(level); + UNUSED(expire_time); + UNUSED(new_value); + UNUSED(skip_until); + bool unused_value_changed; + bool should_remove = Filter(level, key, Slice{}, new_value, &unused_value_changed); + if (should_remove) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + + const char* Name() const override { return "BaseDataFilter"; } + + private: + rocksdb::DB* db_ = nullptr; + std::vector* cf_handles_ptr_ = nullptr; + rocksdb::ReadOptions default_read_options_; + mutable std::string cur_key_; + mutable bool meta_not_found_ = false; + mutable uint64_t cur_meta_version_ = 0; + mutable uint64_t cur_meta_etime_ = 0; + enum DataType type_ = DataType::kNones; +}; + +class BaseDataFilterFactory : public rocksdb::CompactionFilterFactory { + public: + BaseDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, enum DataType type) + : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr), type_(type) {} + std::unique_ptr CreateCompactionFilter( + const rocksdb::CompactionFilter::Context& context) override { + return std::make_unique(BaseDataFilter(*db_ptr_, cf_handles_ptr_, type_)); + } + const char* Name() const override { return "BaseDataFilterFactory"; } + + private: + rocksdb::DB** db_ptr_ = nullptr; + std::vector* cf_handles_ptr_ = nullptr; + enum DataType type_ = DataType::kNones; +}; + +using HashesMetaFilter = BaseMetaFilter; +using HashesMetaFilterFactory = BaseMetaFilterFactory; +using HashesDataFilter = BaseDataFilter; +using HashesDataFilterFactory = BaseDataFilterFactory; + +using SetsMetaFilter = BaseMetaFilter; +using SetsMetaFilterFactory = BaseMetaFilterFactory; +using SetsMemberFilter = BaseDataFilter; +using SetsMemberFilterFactory = BaseDataFilterFactory; + +using ZSetsMetaFilter = BaseMetaFilter; +using ZSetsMetaFilterFactory = BaseMetaFilterFactory; +using ZSetsDataFilter = BaseDataFilter; +using ZSetsDataFilterFactory = BaseDataFilterFactory; + +using SetsMemberFilter = BaseDataFilter; +using SetsMemberFilterFactory = BaseDataFilterFactory; + +using MetaFilter = BaseMetaFilter; +using MetaFilterFactory = BaseMetaFilterFactory; +} // namespace storage +#endif // SRC_BASE_FILTER_H_ diff --git a/tools/pika_migrate/src/storage/src/base_key_format.h b/tools/pika_migrate/src/storage/src/base_key_format.h new file mode 100644 index 0000000000..75d4d156fe --- /dev/null +++ b/tools/pika_migrate/src/storage/src/base_key_format.h @@ -0,0 +1,99 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_KEY_FORMAT_H_ +#define SRC_BASE_KEY_FORMAT_H_ + +#include "storage/storage_define.h" + +namespace storage { +/* +* used for string data key or hash/zset/set/list's meta key. format: +* | reserve1 | key | reserve2 | +* | 8B | | 16B | +*/ + +class BaseKey { + public: + BaseKey(const Slice& key) : key_(key) {} + + ~BaseKey() { + if (start_ != space_) { + delete[] start_; + } + } + + Slice Encode() { + size_t meta_size = sizeof(reserve1_) + sizeof(reserve2_); + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + size_t usize = nzero + kEncodedKeyDelimSize + key_.size(); + size_t needed = meta_size + usize; + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + + // Need to allocate space, delete previous space + if (start_ != space_) { + delete[] start_; + } + } + + start_ = dst; + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // TODO(wangshaoyi): no need to reserve tailing, + // since we already set delimiter + memcpy(dst, reserve2_, sizeof(reserve2_)); + return Slice(start_, needed); + } + + private: + char* start_ = nullptr; + char space_[200]; + char reserve1_[8] = {0}; + Slice key_; + char reserve2_[16] = {0}; +}; + +class ParsedBaseKey { + public: + explicit ParsedBaseKey(const std::string* key) { + const char* ptr = key->data(); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); + } + + explicit ParsedBaseKey(const Slice& key) { + const char* ptr = key.data(); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + + void decode(const char* ptr, const char* end_ptr) { + // skip head reserve + ptr += kPrefixReserveLength; + // skip tail reserve2_ + end_ptr -= kSuffixReserveLength; + DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + } + + virtual ~ParsedBaseKey() = default; + + Slice Key() { return Slice(key_str_); } + +protected: + std::string key_str_; +}; + +using ParsedBaseMetaKey = ParsedBaseKey; +using BaseMetaKey = BaseKey; + +} // namespace storage +#endif // SRC_BASE_KEY_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/base_meta_value_format.h b/tools/pika_migrate/src/storage/src/base_meta_value_format.h new file mode 100644 index 0000000000..588c980624 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/base_meta_value_format.h @@ -0,0 +1,225 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_META_VALUE_FORMAT_H_ +#define SRC_BASE_META_VALUE_FORMAT_H_ + +#include + +#include "pstd/include/env.h" +#include "storage/storage_define.h" +#include "src/base_value_format.h" + +namespace storage { + +/* +*| type | value | version | reserve | cdate | timestamp | +*| 1B | | 8B | 16B | 8B | 8B | +*/ +// TODO(wangshaoyi): reformat encode, AppendTimestampAndVersion +class BaseMetaValue : public InternalValue { + public: + /* + * Constructing MetaValue requires passing in a type value + */ + explicit BaseMetaValue(DataType type, const Slice& user_value) : InternalValue(type, user_value) {} + rocksdb::Slice Encode() override { + size_t usize = user_value_.size(); + size_t needed = usize + kVersionLength + kSuffixReserveLength + 2 * kTimestampLength + kTypeLength; + char* dst = ReAllocIfNeeded(needed); + memcpy(dst, &type_, sizeof(type_)); + dst += sizeof(type_); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), user_value_.size()); + dst += user_value_.size(); + EncodeFixed64(dst, version_); + dst += sizeof(version_); + memcpy(dst, reserve_, sizeof(reserve_)); + dst += sizeof(reserve_); + // The most significant bit is 1 for milliseconds and 0 for seconds. + // The previous data was stored in seconds, but the subsequent data was stored in milliseconds + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + dst += sizeof(ctime_); + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + return {start_, needed}; + } + + uint64_t UpdateVersion() { + pstd::TimeType unix_time = pstd::NowMillis(); + if (version_ >= unix_time) { + version_++; + } else { + version_ = unix_time; + } + return version_; + } +}; + +class ParsedBaseMetaValue : public ParsedInternalValue { + public: + // Use this constructor after rocksdb::DB::Get(); + explicit ParsedBaseMetaValue(std::string* internal_value_str) : ParsedInternalValue(internal_value_str) { + if (internal_value_str->size() >= kBaseMetaValueSuffixLength) { + size_t offset = 0; + type_ = static_cast(static_cast((*internal_value_str)[0])); + offset += kTypeLength; + user_value_ = Slice(internal_value_str->data() + offset, + internal_value_str->size() - kBaseMetaValueSuffixLength - offset); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_str->data() + offset); + offset += sizeof(version_); + memcpy(reserve_, internal_value_str->data() + offset, sizeof(reserve_)); + offset += sizeof(reserve_); + uint64_t ctime = DecodeFixed64(internal_value_str->data() + offset); + offset += sizeof(ctime_); + uint64_t etime = DecodeFixed64(internal_value_str->data() + offset); + + ctime_ = (ctime & ~(1ULL << 63)); + // if ctime_==ctime, means ctime_ storaged in seconds + if (ctime_ == ctime) { + ctime_ *= 1000; + } + etime_ = (etime & ~(1ULL << 63)); + // if etime_==etime, means etime_ storaged in seconds + if (etime == etime_) { + etime_ *= 1000; + } + } + count_ = DecodeFixed32(internal_value_str->data() + kTypeLength); + } + + // Use this constructor in rocksdb::CompactionFilter::Filter(); + explicit ParsedBaseMetaValue(const Slice& internal_value_slice) : ParsedInternalValue(internal_value_slice) { + if (internal_value_slice.size() >= kBaseMetaValueSuffixLength) { + size_t offset = 0; + type_ = static_cast(static_cast(internal_value_slice[0])); + offset += kTypeLength; + user_value_ = Slice(internal_value_slice.data() + offset, + internal_value_slice.size() - kBaseMetaValueSuffixLength - offset); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += sizeof(uint64_t); + memcpy(reserve_, internal_value_slice.data() + offset, sizeof(reserve_)); + offset += sizeof(reserve_); + uint64_t ctime = DecodeFixed64(internal_value_slice.data() + offset); + offset += sizeof(ctime_); + uint64_t etime = DecodeFixed64(internal_value_slice.data() + offset); + + ctime_ = (ctime & ~(1ULL << 63)); + // if ctime_!=ctime, means ctime_ storaged in seconds + if (ctime_ == ctime) { + ctime_ *= 1000; + } + etime_ = (etime & ~(1ULL << 63)); + // if etime_!=etime, means etime_ storaged in seconds + if (etime == etime_) { + etime_ *= 1000; + } + } + count_ = DecodeFixed32(internal_value_slice.data() + kTypeLength); + } + + void StripSuffix() override { + if (value_) { + value_->erase(value_->size() - kBaseMetaValueSuffixLength, kBaseMetaValueSuffixLength); + } + } + + void SetVersionToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kBaseMetaValueSuffixLength; + EncodeFixed64(dst, version_); + } + } + + void SetCtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - 2 * kTimestampLength; + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + } + } + + void SetEtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength; + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + } + } + + uint64_t InitialMetaValue() { + this->SetCount(0); + this->SetEtime(0); + this->SetCtime(0); + return this->UpdateVersion(); + } + + bool IsValid() override { + return !IsStale() && Count() != 0; + } + + bool check_set_count(size_t count) { + if (count > INT32_MAX) { + return false; + } + return true; + } + + int32_t Count() { return count_; } + + void SetCount(int32_t count) { + count_ = count; + if (value_) { + char* dst = const_cast(value_->data()); + EncodeFixed32(dst + kTypeLength, count_); + } + } + + bool CheckModifyCount(int32_t delta) { + int64_t count = count_; + count += delta; + if (count < 0 || count > INT32_MAX) { + return false; + } + return true; + } + + void ModifyCount(int32_t delta) { + count_ += delta; + if (value_) { + char* dst = const_cast(value_->data()); + EncodeFixed32(dst + kTypeLength, count_); + } + } + + uint64_t UpdateVersion() { + pstd::TimeType unix_time = pstd::NowMillis(); + if (version_ >= unix_time) { + version_++; + } else { + version_ = unix_time; + } + SetVersionToValue(); + return version_; + } + + private: + static const size_t kBaseMetaValueSuffixLength = kVersionLength + kSuffixReserveLength + 2 * kTimestampLength; + int32_t count_ = 0; +}; + +using HashesMetaValue = BaseMetaValue; +using ParsedHashesMetaValue = ParsedBaseMetaValue; +using SetsMetaValue = BaseMetaValue; +using ParsedSetsMetaValue = ParsedBaseMetaValue; +using ZSetsMetaValue = BaseMetaValue; +using ParsedZSetsMetaValue = ParsedBaseMetaValue; + +} // namespace storage +#endif // SRC_BASE_META_VALUE_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/base_value_format.h b/tools/pika_migrate/src/storage/src/base_value_format.h new file mode 100644 index 0000000000..14e0175f46 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/base_value_format.h @@ -0,0 +1,160 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_BASE_VALUE_FORMAT_H_ +#define SRC_BASE_VALUE_FORMAT_H_ + +#include + +#include "rocksdb/env.h" +#include "rocksdb/slice.h" + +#include "src/coding.h" +#include "src/mutex.h" + +#include "pstd/include/env.h" + +namespace storage { + +enum class DataType : uint8_t { kStrings = 0, kHashes = 1, kSets = 2, kLists = 3, kZSets = 4, kStreams = 5, kNones = 6, kAll = 7 }; +constexpr int DataTypeNum = int(DataType::kNones); + +constexpr char DataTypeTag[] = { 'k', 'h', 's', 'l', 'z', 'x', 'n', 'a'}; +constexpr char* DataTypeStrings[] = { "string", "hash", "set", "list", "zset", "streams", "none", "all"}; + +constexpr char* DataTypeToString(DataType type) { + if (type < DataType::kStrings || type > DataType::kNones) { + return DataTypeStrings[static_cast(DataType::kNones)]; + } + return DataTypeStrings[static_cast(type)]; +} + +constexpr char DataTypeToTag(DataType type) { + if (type < DataType::kStrings || type > DataType::kNones) { + return DataTypeTag[static_cast(DataType::kNones)]; + } + return DataTypeTag[static_cast(type)]; +} + +class InternalValue { +public: + explicit InternalValue(DataType type, const rocksdb::Slice& user_value) : type_(type), user_value_(user_value) { + ctime_ = pstd::NowMillis(); + } + + virtual ~InternalValue() { + if (start_ != space_) { + delete[] start_; + } + } + void SetEtime(uint64_t etime = 0) { etime_ = etime; } + void setCtime(uint64_t ctime) { ctime_ = ctime; } + rocksdb::Status SetRelativeTimeInMillsec(int64_t ttl_millsec) { + pstd::TimeType unix_time = pstd::NowMillis(); + etime_ = unix_time + ttl_millsec; + return rocksdb::Status::OK(); + } + void SetVersion(uint64_t version = 0) { version_ = version; } + + char* ReAllocIfNeeded(size_t needed) { + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + if (start_ != space_) { + delete[] start_; + } + } + start_ = dst; + return dst; + } + + virtual rocksdb::Slice Encode() = 0; + +protected: + char space_[200]; + char* start_ = nullptr; + rocksdb::Slice user_value_; + uint64_t version_ = 0; + uint64_t etime_ = 0; + uint64_t ctime_ = 0; + DataType type_; + char reserve_[16] = {0}; +}; + +class ParsedInternalValue { +public: + // Use this constructor after rocksdb::DB::Get(), since we use this in + // the implement of user interfaces and may need to modify the + // original value suffix, so the value_ must point to the string + explicit ParsedInternalValue(std::string* value) : value_(value) {} + + // Use this constructor in rocksdb::CompactionFilter::Filter(), + // since we use this in Compaction process, all we need to do is parsing + // the rocksdb::Slice, so don't need to modify the original value, value_ can be + // set to nullptr + explicit ParsedInternalValue(const rocksdb::Slice& value) {} + + virtual ~ParsedInternalValue() = default; + + rocksdb::Slice UserValue() { return user_value_; } + + uint64_t Version() { return version_; } + + void SetVersion(uint64_t version) { + version_ = version; + SetVersionToValue(); + } + + uint64_t Etime() { return etime_; } + + void SetEtime(uint64_t etime) { + etime_ = etime; + SetEtimeToValue(); + } + + void SetCtime(uint64_t ctime) { + ctime_ = ctime; + SetCtimeToValue(); + } + + void SetRelativeTimestamp(int64_t ttl_millsec) { + pstd::TimeType unix_time = pstd::NowMillis(); + etime_ = unix_time + ttl_millsec; + SetEtimeToValue(); + } + + bool IsPermanentSurvival() { return etime_ == 0; } + + bool IsStale() { + if (etime_ == 0) { + return false; + } + pstd::TimeType unix_time = pstd::NowMillis(); + return etime_ < unix_time; + } + + virtual bool IsValid() { + return !IsStale(); + } + + virtual void StripSuffix() = 0; + +protected: + virtual void SetVersionToValue() = 0; + virtual void SetEtimeToValue() = 0; + virtual void SetCtimeToValue() = 0; + std::string* value_ = nullptr; + rocksdb::Slice user_value_; + uint64_t version_ = 0 ; + uint64_t ctime_ = 0; + uint64_t etime_ = 0; + DataType type_; + char reserve_[16] = {0}; //unused +}; + +} // namespace storage +#endif // SRC_BASE_VALUE_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/build_version.cc.in b/tools/pika_migrate/src/storage/src/build_version.cc.in new file mode 100644 index 0000000000..1ad5231ac8 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/build_version.cc.in @@ -0,0 +1,9 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "storage/build_version.h" +const char* storage_build_git_sha = "storage_build_git_sha:@@GIT_SHA@@"; +const char* storage_build_git_date = "storage_build_git_date:@@GIT_DATE_TIME@@"; +const char* storage_build_compile_date = __DATE__; diff --git a/tools/pika_migrate/src/storage/src/coding.h b/tools/pika_migrate/src/storage/src/coding.h new file mode 100644 index 0000000000..824bf7a080 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/coding.h @@ -0,0 +1,86 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_CODING_H_ +#define SRC_CODING_H_ + +#undef STORAGE_PLATFORM_IS_LITTLE_ENDIAN + +#if defined(__APPLE__) +# include // __BYTE_ORDER +# define __BYTE_ORDER __DARWIN_BYTE_ORDER +# define __LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN +#elif defined(__FreeBSD__) +# include +# include +# define STORAGE_PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) +#else +# include // __BYTE_ORDER +#endif + +#ifndef STORAGE_PLATFORM_IS_LITTLE_ENDIAN +# define STORAGE_PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) +#endif +#include + +namespace storage { +static const bool kLittleEndian = STORAGE_PLATFORM_IS_LITTLE_ENDIAN; +#undef STORAGE_PLATFORM_IS_LITTLE_ENDIAN + +inline void EncodeFixed32(char* buf, uint32_t value) { + if (kLittleEndian) { + memcpy(buf, &value, sizeof(value)); + } else { + buf[0] = value & 0xff; + buf[1] = (value >> 8) & 0xff; + buf[2] = (value >> 16) & 0xff; + buf[3] = (value >> 24) & 0xff; + } +} + +inline void EncodeFixed64(char* buf, uint64_t value) { + if (kLittleEndian) { + memcpy(buf, &value, sizeof(value)); + } else { + buf[0] = value & 0xff; + buf[1] = (value >> 8) & 0xff; + buf[2] = (value >> 16) & 0xff; + buf[3] = (value >> 24) & 0xff; + buf[4] = (value >> 32) & 0xff; + buf[5] = (value >> 40) & 0xff; + buf[6] = (value >> 48) & 0xff; + buf[7] = (value >> 56) & 0xff; + } +} + +inline uint32_t DecodeFixed32(const char* ptr) { + if (kLittleEndian) { + // Load the raw bytes + uint32_t result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; + } else { + return ((static_cast(static_cast(ptr[0]))) | + (static_cast(static_cast(ptr[1])) << 8) | + (static_cast(static_cast(ptr[2])) << 16) | + (static_cast(static_cast(ptr[3])) << 24)); + } +} + +inline uint64_t DecodeFixed64(const char* ptr) { + if (kLittleEndian) { + // Load the raw bytes + uint64_t result; + memcpy(&result, ptr, sizeof(result)); // gcc optimizes this to a plain load + return result; + } else { + uint64_t lo = DecodeFixed32(ptr); + uint64_t hi = DecodeFixed32(ptr + 4); + return (hi << 32) | lo; + } +} + +} // namespace storage +#endif // SRC_CODING_H_ diff --git a/tools/pika_migrate/src/storage/src/custom_comparator.h b/tools/pika_migrate/src/storage/src/custom_comparator.h new file mode 100644 index 0000000000..185fc1d678 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/custom_comparator.h @@ -0,0 +1,261 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef INCLUDE_CUSTOM_COMPARATOR_H_ +#define INCLUDE_CUSTOM_COMPARATOR_H_ + +#include "rocksdb/comparator.h" +#include "glog/logging.h" + +#include "storage/storage_define.h" +#include "src/debug.h" +#include "src/coding.h" + +namespace storage { +/* list data key pattern +* | reserve1 | key | version | index | reserve2 | +* | 8B | | 8B | 8B | 16B | +*/ +class ListsDataKeyComparatorImpl : public rocksdb::Comparator { + public: + ListsDataKeyComparatorImpl() = default; + + // keep compatible with floyd + const char* Name() const override { return "floyd.ListsDataKeyComparator"; } + + int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { + assert(!a.empty() && !b.empty()); + const char* ptr_a = a.data(); + const char* ptr_b = b.data(); + auto a_size = static_cast(a.size()); + auto b_size = static_cast(b.size()); + + ptr_a += kPrefixReserveLength; + ptr_b += kPrefixReserveLength; + ptr_a = SeekUserkeyDelim(ptr_a, a_size - kPrefixReserveLength); + ptr_b = SeekUserkeyDelim(ptr_b, b_size - kPrefixReserveLength); + + rocksdb::Slice a_prefix(a.data(), std::distance(a.data(), ptr_a)); + rocksdb::Slice b_prefix(b.data(), std::distance(b.data(), ptr_b)); + if (a_prefix != b_prefix) { + return a_prefix.compare(b_prefix); + } + + if (ptr_a - a.data() == a_size && ptr_b - b.data() == b_size) { + return 0; + } else if (ptr_a - a.data() == a_size) { + return -1; + } else if (ptr_b - b.data() == b_size) { + return 1; + } + + uint64_t version_a = DecodeFixed64(ptr_a); + uint64_t version_b = DecodeFixed64(ptr_b); + ptr_a += sizeof(uint64_t); + ptr_b += sizeof(uint64_t); + if (version_a != version_b) { + return version_a < version_b ? -1 : 1; + } + if (ptr_a - a.data() == a_size && ptr_b - b.data() == b_size) { + return 0; + } else if (ptr_a - a.data() == a_size) { + return -1; + } else if (ptr_b - b.data() == b_size) { + return 1; + } + + uint64_t index_a = DecodeFixed64(ptr_a); + uint64_t index_b = DecodeFixed64(ptr_b); + ptr_a += sizeof(uint64_t); + ptr_b += sizeof(uint64_t); + if (index_a != index_b) { + return index_a < index_b ? -1 : 1; + } else { + return 0; + } + } + + bool Equal(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { return Compare(a, b) == 0; } + + void FindShortestSeparator(std::string* start, const rocksdb::Slice& limit) const override {} + + void FindShortSuccessor(std::string* key) const override {} +}; + +/* zset score key pattern + * | | | | | | | + * | 8 Bytes | Key Size Bytes | 8 Bytes | 8 Bytes | | 16B | + */ +class ZSetsScoreKeyComparatorImpl : public rocksdb::Comparator { + public: + // keep compatible with floyd + const char* Name() const override { return "floyd.ZSetsScoreKeyComparator"; } + int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { + assert(a.size() > kPrefixReserveLength); + assert(b.size() > kPrefixReserveLength); + + const char* ptr_a = a.data(); + const char* ptr_b = b.data(); + auto a_size = static_cast(a.size()); + auto b_size = static_cast(b.size()); + + ptr_a += kPrefixReserveLength; + ptr_b += kPrefixReserveLength; + const char* p_a = SeekUserkeyDelim(ptr_a, a_size - kPrefixReserveLength); + const char* p_b = SeekUserkeyDelim(ptr_b, b_size - kPrefixReserveLength); + rocksdb::Slice p_a_prefix = Slice(ptr_a, std::distance(ptr_a, p_a)); + rocksdb::Slice p_b_prefix = Slice(ptr_b, std::distance(ptr_b, p_b)); + int ret = p_a_prefix.compare(p_b_prefix); + if (ret != 0) { + return ret; + } + + ptr_a = p_a; + ptr_b = p_b; + // compare version + uint64_t version_a = DecodeFixed64(ptr_a); + uint64_t version_b = DecodeFixed64(ptr_b); + if (version_a != version_b) { + return version_a < version_b ? -1 : 1; + } + + ptr_a += kVersionLength; + ptr_b += kVersionLength; + // compare score + uint64_t a_i = DecodeFixed64(ptr_a); + uint64_t b_i = DecodeFixed64(ptr_b); + + const void* ptr_a_score = reinterpret_cast(&a_i); + const void* ptr_b_score = reinterpret_cast(&b_i); + double a_score = *reinterpret_cast(ptr_a_score); + double b_score = *reinterpret_cast(ptr_b_score); + if (a_score != b_score) { + return a_score < b_score ? -1 : 1; + } + + // compare rest of the key, including: member and reserve + ptr_a += kScoreLength; + ptr_b += kScoreLength; + rocksdb::Slice rest_a(ptr_a, a_size - std::distance(a.data(), ptr_a)); + rocksdb::Slice rest_b(ptr_b, b_size - std::distance(b.data(), ptr_b)); + return rest_a.compare(rest_b); + } + + bool Equal(const rocksdb::Slice& a, const rocksdb::Slice& b) const override { return Compare(a, b) == 0; } + + // Advanced functions: these are used to reduce the space requirements + // for internal data structures like index blocks. + + // If *start < limit, changes *start to a short string in [start,limit). + // Simple comparator implementations may return with *start unchanged, + // i.e., an implementation of this method that does nothing is correct. + // TODO(wangshaoyi): need reformat, if pkey differs, why return limit directly? + void FindShortestSeparator(std::string* start, const rocksdb::Slice& limit) const override { + assert(start->size() > kPrefixReserveLength); + assert(limit.size() > kPrefixReserveLength); + + const char* head_start = start->data(); + const char* head_limit = limit.data(); + const char* ptr_start = start->data(); + const char* ptr_limit = limit.data(); + ptr_start += kPrefixReserveLength; + ptr_limit += kPrefixReserveLength; + ptr_start = SeekUserkeyDelim(ptr_start, start->size() - std::distance(head_start, ptr_start)); + ptr_limit = SeekUserkeyDelim(ptr_limit, limit.size() - std::distance(head_limit, ptr_limit)); + + ptr_start += kVersionLength; + ptr_limit += kVersionLength; + + size_t start_head_to_version_length = std::distance(head_start, ptr_start); + size_t limit_head_to_version_length = std::distance(head_limit, ptr_limit); + + rocksdb::Slice key_start_prefix(start->data(), start_head_to_version_length); + rocksdb::Slice key_limit_prefix(start->data(), limit_head_to_version_length); + if (key_start_prefix.compare(key_limit_prefix) != 0) { + return; + } + + uint64_t start_i = DecodeFixed64(ptr_start); + uint64_t limit_i = DecodeFixed64(ptr_limit); + const void* ptr_start_score = reinterpret_cast(&start_i); + const void* ptr_limit_score = reinterpret_cast(&limit_i); + double start_score = *reinterpret_cast(ptr_start_score); + double limit_score = *reinterpret_cast(ptr_limit_score); + ptr_start += sizeof(uint64_t); + ptr_limit += sizeof(uint64_t); + if (start_score < limit_score) { + if (start_score + 1 < limit_score) { + start->resize(start_head_to_version_length); + start_score += 1; + const void* addr_start_score = reinterpret_cast(&start_score); + char dst[sizeof(uint64_t)]; + EncodeFixed64(dst, *reinterpret_cast(addr_start_score)); + start->append(dst, sizeof(uint64_t)); + } + return; + } + + size_t head_to_score_length = start_head_to_version_length + kScoreLength; + + std::string start_rest(ptr_start, start->size() - head_to_score_length); + std::string limit_rest(ptr_limit, limit.size() - head_to_score_length); + // Find length of common prefix + size_t min_length = std::min(start_rest.size(), limit_rest.size()); + size_t diff_index = 0; + while ((diff_index < min_length) && (start_rest[diff_index] == limit_rest[diff_index])) { + diff_index++; + } + + if (diff_index >= min_length) { + // Do not shorten if one string is a prefix of the other + } else { + auto key_start_member_byte = static_cast(start_rest[diff_index]); + auto key_limit_member_byte = static_cast(limit_rest[diff_index]); + if (key_start_member_byte >= key_limit_member_byte) { + // Cannot shorten since limit is smaller than start or start is + // already the shortest possible. + return; + } + assert(key_start_member_byte < key_limit_member_byte); + + if (diff_index < limit_rest.size() - 1 || key_start_member_byte + 1 < key_limit_member_byte) { + start_rest[diff_index]++; + start_rest.resize(diff_index + 1); + start->resize(head_to_score_length); + start->append(start_rest); + } else { + // v + // A A 1 A A A + // A A 2 + // + // Incrementing the current byte will make start bigger than limit, we + // will skip this byte, and find the first non 0xFF byte in start and + // increment it. + diff_index++; + + while (diff_index < start_rest.size()) { + // Keep moving until we find the first non 0xFF byte to + // increment it + if (static_cast(start_rest[diff_index]) < static_cast(0xff)) { + start_rest[diff_index]++; + start_rest.resize(diff_index + 1); + start->resize(head_to_score_length); + start->append(start_rest); + break; + } + diff_index++; + } + } + } + } + + // Changes *key to a short string >= *key. + // Simple comparator implementations may return with *key unchanged, + // i.e., an implementation of this method that does nothing is correct. + void FindShortSuccessor(std::string* key) const override {} +}; + +} // namespace storage +#endif // INCLUDE_CUSTOM_COMPARATOR_H_ diff --git a/tools/pika_migrate/src/storage/src/db_checkpoint.cc b/tools/pika_migrate/src/storage/src/db_checkpoint.cc new file mode 100644 index 0000000000..0490d62a41 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/db_checkpoint.cc @@ -0,0 +1,265 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2012 Facebook. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ROCKSDB_LITE + +# include "storage/db_checkpoint.h" + +# ifndef __STDC_FORMAT_MACROS +# define __STDC_FORMAT_MACROS +# endif + +# include + +#include +# include "file/file_util.h" +# include "rocksdb/db.h" +// #include "file/filename.h" + +namespace rocksdb { + +class DBCheckpointImpl : public DBCheckpoint { + public: + // Creates a DBCheckPoint object to be used for creating openable snapshots + explicit DBCheckpointImpl(DB* db) : db_(db) {} + + // Builds an openable snapshot of RocksDB on the same disk, which + // accepts an output directory on the same disk, and under the directory + // (1) hard-linked SST files pointing to existing live SST files + // SST files will be copied if output directory is on a different filesystem + // (2) a copied manifest files and other files + // The directory should not already exist and will be created by this API. + // The directory will be an absolute path + using DBCheckpoint::CreateCheckpoint; + Status CreateCheckpoint(const std::string& checkpoint_dir) override; + + using DBCheckpoint::GetCheckpointFiles; + Status GetCheckpointFiles(std::vector& live_files, VectorLogPtr& live_wal_files, + uint64_t& manifest_file_size, uint64_t& sequence_number) override; + + using DBCheckpoint::CreateCheckpointWithFiles; + Status CreateCheckpointWithFiles(const std::string& checkpoint_dir, std::vector& live_files, + VectorLogPtr& live_wal_files, uint64_t manifest_file_size, + uint64_t sequence_number) override; + + private: + DB* db_; +}; + +Status DBCheckpoint::Create(DB* db, DBCheckpoint** checkpoint_ptr) { + *checkpoint_ptr = new DBCheckpointImpl(db); + return Status::OK(); +} + +Status DBCheckpoint::CreateCheckpoint(const std::string& checkpoint_dir) { return Status::NotSupported(""); } + +// Builds an openable snapshot of RocksDB +Status DBCheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) { + std::vector live_files; + VectorLogPtr live_wal_files; + uint64_t manifest_file_size; + uint64_t sequence_number; + Status s = GetCheckpointFiles(live_files, live_wal_files, manifest_file_size, sequence_number); + if (s.ok()) { + s = CreateCheckpointWithFiles(checkpoint_dir, live_files, live_wal_files, manifest_file_size, sequence_number); + } + return s; +} + +Status DBCheckpointImpl::GetCheckpointFiles(std::vector& live_files, VectorLogPtr& live_wal_files, + uint64_t& manifest_file_size, uint64_t& sequence_number) { + Status s; + sequence_number = db_->GetLatestSequenceNumber(); + + s = db_->DisableFileDeletions(); + if (s.ok()) { + // this will return live_files prefixed with "/" + s = db_->GetLiveFiles(live_files, &manifest_file_size); + } + + // if we have more than one column family, we need to also get WAL files + if (s.ok()) { + s = db_->GetSortedWalFiles(live_wal_files); + } + + if (!s.ok()) { + db_->EnableFileDeletions(false); + } + + return s; +} + +Status DBCheckpointImpl::CreateCheckpointWithFiles(const std::string& checkpoint_dir, + std::vector& live_files, VectorLogPtr& live_wal_files, + uint64_t manifest_file_size, uint64_t sequence_number) { + bool same_fs = true; + + Status s = db_->GetEnv()->FileExists(checkpoint_dir); + if (s.ok()) { + return Status::InvalidArgument("Directory exists"); + } else if (!s.IsNotFound()) { + assert(s.IsIOError()); + return s; + } + + // if wal_dir eq db path, rocksdb will clear it when opening + // make wal_dir valid in that case + std::string wal_dir = db_->GetOptions().wal_dir; + if (wal_dir.empty()) { + wal_dir = db_->GetOptions().db_paths[0].path; + } + + size_t wal_size = live_wal_files.size(); + Log(db_->GetOptions().info_log, "Started the snapshot process -- creating snapshot in directory %s", + checkpoint_dir.c_str()); + + std::string full_private_path = checkpoint_dir + ".tmp"; + + // create snapshot directory + s = db_->GetEnv()->CreateDir(full_private_path); + + // copy/hard link live_files + std::string manifest_fname; + std::string current_fname; + for (size_t i = 0; s.ok() && i < live_files.size(); ++i) { + uint64_t number; + FileType type; + bool ok = ParseFileName(live_files[i], &number, &type); + if (!ok) { + s = Status::Corruption("Can't parse file name. This is very bad"); + break; + } + // we should only get sst, options, manifest and current files here + assert(type == kTableFile || type == kDescriptorFile || type == kCurrentFile || type == kOptionsFile); + assert(!live_files[i].empty() && live_files[i][0] == '/'); + if (type == kCurrentFile) { + // We will craft the current file manually to ensure it's consistent with + // the manifest number. This is necessary because current's file contents + // can change during checkpoint creation. + current_fname = live_files[i]; + continue; + } else if (type == kDescriptorFile) { + manifest_fname = live_files[i]; + } + std::string src_fname = live_files[i]; + + // rules: + // * if it's kTableFile, then it's shared + // * if it's kDescriptorFile, limit the size to manifest_file_size + // * always copy if cross-device link + if ((type == kTableFile) && same_fs) { + Log(db_->GetOptions().info_log, "Hard Linking %s", src_fname.c_str()); + s = db_->GetEnv()->LinkFile(db_->GetName() + src_fname, full_private_path + src_fname); + if (s.IsNotSupported()) { + same_fs = false; + s = Status::OK(); + } + } + if ((type != kTableFile) || (!same_fs)) { + Log(db_->GetOptions().info_log, "Copying %s", src_fname.c_str()); +# if (ROCKSDB_MAJOR < 5 || (ROCKSDB_MAJOR == 5 && ROCKSDB_MINOR < 3)) + s = CopyFile(db_->GetEnv(), db_->GetName() + src_fname, full_private_path + src_fname, + (type == kDescriptorFile) ? manifest_file_size : 0); +# else + s = CopyFile(db_->GetFileSystem(), db_->GetName() + src_fname, full_private_path + src_fname, + (type == kDescriptorFile) ? manifest_file_size : 0, false, nullptr, Temperature::kUnknown); +# endif + } + } + if (s.ok() && !current_fname.empty() && !manifest_fname.empty()) { +// 5.17.2 Createfile with new argv use_fsync +# if (ROCKSDB_MAJOR < 5 || (ROCKSDB_MAJOR == 5 && ROCKSDB_MINOR < 17)) + s = CreateFile(db_->GetEnv(), full_private_path + current_fname, manifest_fname.substr(1) + "\n"); +# else + s = CreateFile(db_->GetFileSystem(), full_private_path + current_fname, manifest_fname.substr(1) + "\n", false); +# endif + } + // Log(db_->GetOptions().info_log, + // "Number of log files %" ROCKSDB_PRIszt, live_wal_files.size()); + + // Link WAL files. Copy exact size of last one because it is the only one + // that has changes after the last flush. + for (size_t i = 0; s.ok() && i < wal_size; ++i) { + if ((live_wal_files[i]->Type() == kAliveLogFile) && (live_wal_files[i]->StartSequence() >= sequence_number)) { + if (i + 1 == wal_size) { + Log(db_->GetOptions().info_log, "Copying %s", live_wal_files[i]->PathName().c_str()); +# if (ROCKSDB_MAJOR < 5 || (ROCKSDB_MAJOR == 5 && ROCKSDB_MINOR < 3)) + s = CopyFile(db_->GetEnv(), wal_dir + live_wal_files[i]->PathName(), + full_private_path + live_wal_files[i]->PathName(), live_wal_files[i]->SizeFileBytes()); +# else + s = CopyFile(db_->GetFileSystem(), wal_dir + live_wal_files[i]->PathName(), + full_private_path + live_wal_files[i]->PathName(), live_wal_files[i]->SizeFileBytes(), false, + nullptr, Temperature::kUnknown); +# endif + break; + } + if (same_fs) { + // we only care about live log files + Log(db_->GetOptions().info_log, "Hard Linking %s", live_wal_files[i]->PathName().c_str()); + s = db_->GetEnv()->LinkFile(wal_dir + live_wal_files[i]->PathName(), + full_private_path + live_wal_files[i]->PathName()); + if (s.IsNotSupported()) { + same_fs = false; + s = Status::OK(); + } + } + if (!same_fs) { + Log(db_->GetOptions().info_log, "Copying %s", live_wal_files[i]->PathName().c_str()); +# if (ROCKSDB_MAJOR < 5 || (ROCKSDB_MAJOR == 5 && ROCKSDB_MINOR < 3)) + s = CopyFile(db_->GetEnv(), wal_dir + live_wal_files[i]->PathName(), + full_private_path + live_wal_files[i]->PathName(), 0); +# else + s = CopyFile(db_->GetFileSystem(), wal_dir + live_wal_files[i]->PathName(), + full_private_path + live_wal_files[i]->PathName(), 0, false, nullptr, Temperature::kUnknown); +# endif + } + } + } + + // we copied all the files, enable file deletions + db_->EnableFileDeletions(false); + + if (s.ok()) { + // move tmp private backup to real snapshot directory + s = db_->GetEnv()->RenameFile(full_private_path, checkpoint_dir); + } + if (s.ok()) { + std::unique_ptr checkpoint_directory; + db_->GetEnv()->NewDirectory(checkpoint_dir, &checkpoint_directory); + if (checkpoint_directory) { + s = checkpoint_directory->Fsync(); + } + } + + if (!s.ok()) { + // clean all the files we might have created + Log(db_->GetOptions().info_log, "Snapshot failed -- %s", s.ToString().c_str()); + // we have to delete the dir and all its children + std::vector subchildren; + db_->GetEnv()->GetChildren(full_private_path, &subchildren); + for (auto& subchild : subchildren) { + std::string subchild_path = full_private_path.append("/" + subchild); + Status s1 = db_->GetEnv()->DeleteFile(subchild_path); + Log(db_->GetOptions().info_log, "Delete file %s -- %s", subchild_path.c_str(), s1.ToString().c_str()); + } + // finally delete the private dir + Status s1 = db_->GetEnv()->DeleteDir(full_private_path); + Log(db_->GetOptions().info_log, "Delete dir %s -- %s", full_private_path.c_str(), s1.ToString().c_str()); + return s; + } + + // here we know that we succeeded and installed the new snapshot + Log(db_->GetOptions().info_log, "Snapshot DONE. All is good"); + Log(db_->GetOptions().info_log, "Snapshot sequence number: %" PRIu64, sequence_number); + + return s; +} +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/tools/pika_migrate/src/storage/src/debug.h b/tools/pika_migrate/src/storage/src/debug.h new file mode 100644 index 0000000000..94c32c70b1 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/debug.h @@ -0,0 +1,32 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_DEBUG_H_ +#define SRC_DEBUG_H_ + +#ifndef NDEBUG +# define TRACE(M, ...) fprintf(stderr, "[TRACE] (%s:%d) " M "\n", __FILE__, __LINE__, ##__VA_ARGS__) +# define DEBUG(M, ...) fprintf(stderr, "[Debug] (%s:%d) " M "\n", __FILE__, __LINE__, ##__VA_ARGS__) +#else +# define TRACE(M, ...) {} +# define DEBUG(M, ...) {} +#endif // NDEBUG + +static std::string get_printable_key(const std::string& key) { + std::string res; + for (int i = 0; i < key.size(); i++) { + if (std::isprint(key[i])) { + res.append(1, key[i]); + } else { + char tmp[3]; + snprintf(tmp, 2, "%02x", key[i] & 0xFF); + res.append(tmp, 2); + } + } + return res; +} + + +#endif // SRC_DEBUG_H_ diff --git a/tools/pika_migrate/src/storage/src/lists_data_key_format.h b/tools/pika_migrate/src/storage/src/lists_data_key_format.h new file mode 100644 index 0000000000..1c5ab5ec1b --- /dev/null +++ b/tools/pika_migrate/src/storage/src/lists_data_key_format.h @@ -0,0 +1,118 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_LISTS_DATA_KEY_FORMAT_H_ +#define SRC_LISTS_DATA_KEY_FORMAT_H_ + +#include "src/coding.h" +#include "storage/storage_define.h" + +namespace storage { +/* +* used for List data key. format: +* | reserve1 | key | version | index | reserve2 | +* | 8B | | 8B | 8B | 16B | +*/ +class ListsDataKey { +public: + ListsDataKey(const Slice& key, uint64_t version, uint64_t index) + : key_(key), version_(version), index_(index) {} + + ~ListsDataKey() { + if (start_ != space_) { + delete[] start_; + } + } + + Slice Encode() { + size_t meta_size = sizeof(reserve1_) + sizeof(version_) + sizeof(reserve2_); + size_t usize = key_.size() + sizeof(index_) + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + + // Need to allocate space, delete previous space + if (start_ != space_) { + delete[] start_; + } + } + + start_ = dst; + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // index + EncodeFixed64(dst, index_); + dst += sizeof(index_); + // TODO(wangshaoyi): too much for reserve + // reserve2: 16 byte + memcpy(dst, reserve2_, sizeof(reserve2_)); + return Slice(start_, needed); + } + +private: + char* start_ = nullptr; + char space_[200]; + char reserve1_[8] = {0}; + Slice key_; + uint64_t version_ = uint64_t(-1); + uint64_t index_ = 0; + char reserve2_[16] = {0}; +}; + +class ParsedListsDataKey { + public: + explicit ParsedListsDataKey(const std::string* key) { + const char* ptr = key->data(); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); + } + + explicit ParsedListsDataKey(const Slice& key) { + const char* ptr = key.data(); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + + void decode(const char* ptr, const char* end_ptr) { + const char* start = ptr; + // skip head reserve1_ + ptr += sizeof(reserve1_); + // skip tail reserve2_ + end_ptr -= sizeof(reserve2_); + + ptr = DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + version_ = DecodeFixed64(ptr); + ptr += sizeof(version_); + index_ = DecodeFixed64(ptr); + } + + virtual ~ParsedListsDataKey() = default; + + Slice key() { return Slice(key_str_); } + + uint64_t Version() { return version_; } + + uint64_t index() { return index_; } + + private: + std::string key_str_; + char reserve1_[8] = {0}; + uint64_t version_ = (uint64_t)(-1); + uint64_t index_ = 0; + char reserve2_[16] = {0}; +}; + +} // namespace storage +#endif // SRC_LISTS_DATA_KEY_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/lists_filter.h b/tools/pika_migrate/src/storage/src/lists_filter.h new file mode 100644 index 0000000000..92186d5149 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/lists_filter.h @@ -0,0 +1,153 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_LISTS_FILTER_H_ +#define SRC_LISTS_FILTER_H_ + +#include +#include +#include + +#include "rocksdb/compaction_filter.h" +#include "rocksdb/db.h" +#include "src/debug.h" +#include "src/lists_data_key_format.h" +#include "src/lists_meta_value_format.h" +#include "src/base_value_format.h" + +namespace storage { + +/* + * Because the meta data filtering strategy for list + * is integrated into base_filter.h, we delete it here + */ + +class ListsDataFilter : public rocksdb::CompactionFilter { + public: + ListsDataFilter(rocksdb::DB* db, std::vector* cf_handles_ptr, enum DataType type) + : db_(db), + cf_handles_ptr_(cf_handles_ptr), + type_(type) + {} + + bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, std::string* new_value, + bool* value_changed) const override { + UNUSED(level); + UNUSED(value); + UNUSED(new_value); + UNUSED(value_changed); + ParsedListsDataKey parsed_lists_data_key(key); + TRACE("==========================START=========================="); + TRACE("[DataFilter], key: %s, index = %llu, data = %s, version = %llu", parsed_lists_data_key.key().ToString().c_str(), + parsed_lists_data_key.index(), value.ToString().c_str(), parsed_lists_data_key.Version()); + + const char* ptr = key.data(); + int key_size = key.size(); + ptr = SeekUserkeyDelim(ptr + kPrefixReserveLength, key_size - kPrefixReserveLength); + std::string meta_key_enc(key.data(), std::distance(key.data(), ptr)); + meta_key_enc.append(kSuffixReserveLength, kNeedTransformCharacter); + + if (meta_key_enc != cur_key_) { + cur_key_ = meta_key_enc; + cur_meta_etime_ = 0; + cur_meta_version_ = 0; + meta_not_found_ = true; + std::string meta_value; + // destroyed when close the database, Reserve Current key value + if (cf_handles_ptr_->empty()) { + return false; + } + rocksdb::Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[0], cur_key_, &meta_value); + if (s.ok()) { + /* + * The elimination policy for keys of the Data type is that if the key + * type obtained from MetaCF is inconsistent with the key type in Data, + * it needs to be eliminated + */ + auto type = static_cast(static_cast(meta_value[0])); + if (type != type_) { + return true; + } + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + meta_not_found_ = false; + cur_meta_version_ = parsed_lists_meta_value.Version(); + cur_meta_etime_ = parsed_lists_meta_value.Etime(); + } else if (s.IsNotFound()) { + meta_not_found_ = true; + } else { + cur_key_ = ""; + TRACE("Reserve[Get meta_key faild]"); + return false; + } + } + + if (meta_not_found_) { + TRACE("Drop[Meta key not exist]"); + return true; + } + + pstd::TimeType unix_time = pstd::NowMillis(); + if (cur_meta_etime_ != 0 && cur_meta_etime_ < static_cast(unix_time)) { + TRACE("Drop[Timeout]"); + return true; + } + + if (cur_meta_version_ > parsed_lists_data_key.Version()) { + TRACE("Drop[list_data_key_version < cur_meta_version]"); + return true; + } else { + TRACE("Reserve[list_data_key_version == cur_meta_version]"); + return false; + } + } + + /* + // Only judge by meta value ttl + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + std::string* new_value, std::string* skip_until) const { + UNUSED(level); + UNUSED(new_value); + UNUSED(skip_until); + bool unused_value_changed; + bool should_remove = Filter(level, key, Slice{}, new_value, &unused_value_changed); + if (should_remove) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + + const char* Name() const override { return "ListsDataFilter"; } + + private: + rocksdb::DB* db_ = nullptr; + std::vector* cf_handles_ptr_ = nullptr; + rocksdb::ReadOptions default_read_options_; + mutable std::string cur_key_; + mutable bool meta_not_found_ = false; + mutable uint64_t cur_meta_version_ = 0; + mutable uint64_t cur_meta_etime_ = 0; + enum DataType type_ = DataType::kNones; +}; + +class ListsDataFilterFactory : public rocksdb::CompactionFilterFactory { + public: + ListsDataFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, enum DataType type) + : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr), type_(type) {} + + std::unique_ptr CreateCompactionFilter( + const rocksdb::CompactionFilter::Context& context) override { + return std::unique_ptr(new ListsDataFilter(*db_ptr_, cf_handles_ptr_, type_)); + } + const char* Name() const override { return "ListsDataFilterFactory"; } + + private: + rocksdb::DB** db_ptr_ = nullptr; + std::vector* cf_handles_ptr_ = nullptr; + enum DataType type_ = DataType::kNones; +}; + +} // namespace storage +#endif // SRC_LISTS_FILTER_H_ diff --git a/tools/pika_migrate/src/storage/src/lists_meta_value_format.h b/tools/pika_migrate/src/storage/src/lists_meta_value_format.h new file mode 100644 index 0000000000..b417d9a186 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/lists_meta_value_format.h @@ -0,0 +1,284 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_LISTS_META_VALUE_FORMAT_H_ +#define SRC_LISTS_META_VALUE_FORMAT_H_ + +#include + +#include "src/base_value_format.h" +#include "storage/storage_define.h" + +namespace storage { + +const uint64_t InitalLeftIndex = 9223372036854775807; +const uint64_t InitalRightIndex = 9223372036854775808U; + +/* +*| type | list_size | version | left index | right index | reserve | cdate | timestamp | +*| 1B | 8B | 8B | 8B | 8B | 16B | 8B | 8B | +*/ +class ListsMetaValue : public InternalValue { + public: + explicit ListsMetaValue(const rocksdb::Slice& user_value) + : InternalValue(DataType::kLists, user_value), left_index_(InitalLeftIndex), right_index_(InitalRightIndex) {} + + rocksdb::Slice Encode() override { + size_t usize = user_value_.size(); + size_t needed = usize + kVersionLength + 2 * kListValueIndexLength + + kSuffixReserveLength + 2 * kTimestampLength + kTypeLength; + char* dst = ReAllocIfNeeded(needed); + memcpy(dst, &type_, sizeof(type_)); + dst += sizeof(type_); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), usize); + dst += usize; + EncodeFixed64(dst, version_); + dst += kVersionLength; + EncodeFixed64(dst, left_index_); + dst += kListValueIndexLength; + EncodeFixed64(dst, right_index_); + dst += kListValueIndexLength; + memcpy(dst, reserve_, sizeof(reserve_)); + dst += kSuffixReserveLength; + // The most significant bit is 1 for milliseconds and 0 for seconds. + // The previous data was stored in seconds, but the subsequent data was stored in milliseconds + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + dst += kTimestampLength; + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + return {start_, needed}; + } + + uint64_t UpdateVersion() { + pstd::TimeType unix_time = pstd::NowMillis(); + if (version_ >= static_cast(unix_time)) { + version_++; + } else { + version_ = static_cast(unix_time); + } + return version_; + } + + uint64_t LeftIndex() { return left_index_; } + + void ModifyLeftIndex(uint64_t index) { left_index_ -= index; } + + uint64_t RightIndex() { return right_index_; } + + void ModifyRightIndex(uint64_t index) { right_index_ += index; } + + private: + uint64_t left_index_ = 0; + uint64_t right_index_ = 0; +}; + +class ParsedListsMetaValue : public ParsedInternalValue { + public: + // Use this constructor after rocksdb::DB::Get(); + explicit ParsedListsMetaValue(std::string* internal_value_str) + : ParsedInternalValue(internal_value_str) { + assert(internal_value_str->size() >= kListsMetaValueSuffixLength); + if (internal_value_str->size() >= kListsMetaValueSuffixLength) { + size_t offset = 0; + type_ = static_cast(static_cast((*internal_value_str)[0])); + offset += kTypeLength; + user_value_ = rocksdb::Slice(internal_value_str->data() + kTypeLength, + internal_value_str->size() - kListsMetaValueSuffixLength - kTypeLength); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kVersionLength; + left_index_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kListValueIndexLength; + right_index_ = DecodeFixed64(internal_value_str->data() + offset); + offset += kListValueIndexLength; + memcpy(reserve_, internal_value_str->data() + offset, sizeof(reserve_)); + offset += kSuffixReserveLength; + uint64_t ctime = DecodeFixed64(internal_value_str->data() + offset); + offset += kTimestampLength; + uint64_t etime = DecodeFixed64(internal_value_str->data() + offset); + offset += kTimestampLength; + + ctime_ = (ctime & ~(1ULL << 63)); + // if ctime_==ctime, means ctime_ storaged in seconds + if (ctime_ == ctime) { + ctime_ *= 1000; + } + etime_ = (etime & ~(1ULL << 63)); + // if etime_==etime, means etime_ storaged in seconds + if (etime == etime_) { + etime_ *= 1000; + } + } + count_ = DecodeFixed64(internal_value_str->data() + kTypeLength); + } + + // Use this constructor in rocksdb::CompactionFilter::Filter(); + explicit ParsedListsMetaValue(const rocksdb::Slice& internal_value_slice) + : ParsedInternalValue(internal_value_slice) { + assert(internal_value_slice.size() >= kListsMetaValueSuffixLength); + if (internal_value_slice.size() >= kListsMetaValueSuffixLength) { + size_t offset = 0; + type_ = static_cast(static_cast(internal_value_slice[0])); + offset += kTypeLength; + user_value_ = rocksdb::Slice(internal_value_slice.data() + kTypeLength, + internal_value_slice.size() - kListsMetaValueSuffixLength - kTypeLength); + offset += user_value_.size(); + version_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kVersionLength; + left_index_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kListValueIndexLength; + right_index_ = DecodeFixed64(internal_value_slice.data() + offset); + offset += kListValueIndexLength; + memcpy(reserve_, internal_value_slice.data() + offset, sizeof(reserve_)); + offset += kSuffixReserveLength; + uint64_t ctime = DecodeFixed64(internal_value_slice.data() + offset); + offset += kTimestampLength; + uint64_t etime = DecodeFixed64(internal_value_slice.data() + offset); + offset += kTimestampLength; + + ctime_ = (ctime & ~(1ULL << 63)); + // if ctime_==ctime, means ctime_ storaged in seconds + if (ctime_ == ctime) { + ctime_ *= 1000; + } + etime_ = (etime & ~(1ULL << 63)); + // if etime_==etime, means etime_ storaged in seconds + if (etime == etime_) { + etime_ *= 1000; + } + } + count_ = DecodeFixed64(internal_value_slice.data() + kTypeLength); + } + + void StripSuffix() override { + if (value_) { + value_->erase(value_->size() - kListsMetaValueSuffixLength, kListsMetaValueSuffixLength); + } + } + + void SetVersionToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength; + EncodeFixed64(dst, version_); + } + } + + void SetCtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - 2 * kTimestampLength; + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + } + } + + void SetEtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kTimestampLength; + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + } + } + + void SetIndexToValue() { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength; + EncodeFixed64(dst, left_index_); + dst += sizeof(left_index_); + EncodeFixed64(dst, right_index_); + } + } + + uint64_t InitialMetaValue() { + this->SetCount(0); + this->set_left_index(InitalLeftIndex); + this->set_right_index(InitalRightIndex); + this->SetEtime(0); + this->SetCtime(0); + return this->UpdateVersion(); + } + + bool IsValid() override { + return !IsStale() && Count() != 0; + } + + uint64_t Count() { return count_; } + + void SetCount(uint64_t count) { + count_ = count; + if (value_) { + char* dst = const_cast(value_->data()); + EncodeFixed64(dst + kTypeLength, count_); + } + } + + void ModifyCount(uint64_t delta) { + count_ += delta; + if (value_) { + char* dst = const_cast(value_->data()); + EncodeFixed64(dst + kTypeLength, count_); + } + } + + uint64_t UpdateVersion() { + pstd::TimeType unix_time = pstd::NowMillis(); + if (version_ >= static_cast(unix_time)) { + version_++; + } else { + version_ = static_cast(unix_time); + } + SetVersionToValue(); + return version_; + } + + uint64_t LeftIndex() { return left_index_; } + + void set_left_index(uint64_t index) { + left_index_ = index; + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength; + EncodeFixed64(dst, left_index_); + } + } + + void ModifyLeftIndex(uint64_t index) { + left_index_ -= index; + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength; + EncodeFixed64(dst, left_index_); + } + } + + uint64_t RightIndex() { return right_index_; } + + void set_right_index(uint64_t index) { + right_index_ = index; + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength + kListValueIndexLength; + EncodeFixed64(dst, right_index_); + } + } + + void ModifyRightIndex(uint64_t index) { + right_index_ += index; + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - kListsMetaValueSuffixLength + kVersionLength + kListValueIndexLength; + EncodeFixed64(dst, right_index_); + } + } + +private: + const size_t kListsMetaValueSuffixLength = kVersionLength + 2 * kListValueIndexLength + kSuffixReserveLength + 2 * kTimestampLength; + + private: + uint64_t count_ = 0; + uint64_t left_index_ = 0; + uint64_t right_index_ = 0; +}; + +} // namespace storage +#endif // SRC_LISTS_META_VALUE_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/lock_mgr.h b/tools/pika_migrate/src/storage/src/lock_mgr.h new file mode 100644 index 0000000000..e07530f22c --- /dev/null +++ b/tools/pika_migrate/src/storage/src/lock_mgr.h @@ -0,0 +1,21 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_LOCK_MGR_H_ +#define SRC_LOCK_MGR_H_ + +#include +#include + +#include "pstd/include/lock_mgr.h" + +#include "src/mutex.h" + +namespace storage { + +using LockMgr = pstd::lock::LockMgr; + +} // namespace storage +#endif // SRC_LOCK_MGR_H_ diff --git a/tools/pika_migrate/src/storage/src/lru_cache.h b/tools/pika_migrate/src/storage/src/lru_cache.h new file mode 100644 index 0000000000..f2634e752c --- /dev/null +++ b/tools/pika_migrate/src/storage/src/lru_cache.h @@ -0,0 +1,297 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_LRU_CACHE_H_ +#define SRC_LRU_CACHE_H_ + +#include +#include +#include + +#include "rocksdb/status.h" + +#include "pstd/include/pstd_mutex.h" + +namespace storage { + +template +struct LRUHandle { + T1 key; + T2 value; + size_t charge; + LRUHandle* next; + LRUHandle* prev; +}; + +template +class HandleTable { + public: + HandleTable(); + ~HandleTable(); + + size_t TableSize(); + LRUHandle* Lookup(const T1& key); + LRUHandle* Remove(const T1& key); + LRUHandle* Insert(const T1& key, LRUHandle* handle); + + private: + std::unordered_map*> table_; +}; + +template +HandleTable::HandleTable() = default; + +template +HandleTable::~HandleTable() = default; + +template +size_t HandleTable::TableSize() { + return table_.size(); +} + +template +LRUHandle* HandleTable::Lookup(const T1& key) { + if (table_.find(key) != table_.end()) { + return table_[key]; + } else { + return nullptr; + } +} + +template +LRUHandle* HandleTable::Remove(const T1& key) { + LRUHandle* old = nullptr; + if (table_.find(key) != table_.end()) { + old = table_[key]; + table_.erase(key); + } + return old; +} + +template +LRUHandle* HandleTable::Insert(const T1& key, LRUHandle* const handle) { + LRUHandle* old = nullptr; + if (table_.find(key) != table_.end()) { + old = table_[key]; + table_.erase(key); + } + table_.insert({key, handle}); + return old; +} + +template +class LRUCache { + public: + LRUCache(); + ~LRUCache(); + + size_t Size(); + size_t TotalCharge(); + size_t Capacity(); + void SetCapacity(size_t capacity); + + rocksdb::Status Lookup(const T1& key, T2* value); + rocksdb::Status Insert(const T1& key, const T2& value, size_t charge = 1); + rocksdb::Status Remove(const T1& key); + rocksdb::Status Clear(); + + // Just for test + bool LRUAndHandleTableConsistent(); + bool LRUAsExpected(const std::vector>& expect); + + private: + void LRU_Trim(); + void LRU_Remove(LRUHandle* e); + void LRU_Append(LRUHandle* e); + void LRU_MoveToHead(LRUHandle* e); + bool FinishErase(LRUHandle* e); + + // Initialized before use. + size_t capacity_ = 0; + size_t usage_ = 0; + size_t size_ = 0; + + pstd::Mutex mutex_; + + // Dummy head of LRU list. + // lru.prev is newest entry, lru.next is oldest entry. + LRUHandle lru_; + + HandleTable handle_table_; +}; + +template +LRUCache::LRUCache() { + // Make empty circular linked lists. + lru_.next = &lru_; + lru_.prev = &lru_; +} + +template +LRUCache::~LRUCache() { + Clear(); +} + +template +size_t LRUCache::Size() { + std::lock_guard l(mutex_); + return size_; +} + +template +size_t LRUCache::TotalCharge() { + std::lock_guard l(mutex_); + return usage_; +} + +template +size_t LRUCache::Capacity() { + std::lock_guard l(mutex_); + return capacity_; +} + +template +void LRUCache::SetCapacity(size_t capacity) { + std::lock_guard l(mutex_); + capacity_ = capacity; + LRU_Trim(); +} + +template +rocksdb::Status LRUCache::Lookup(const T1& key, T2* const value) { + std::lock_guard l(mutex_); + LRUHandle* handle = handle_table_.Lookup(key); + if (handle) { + LRU_MoveToHead(handle); + *value = handle->value; + } + return (!handle) ? rocksdb::Status::NotFound() : rocksdb::Status::OK(); +} + +template +rocksdb::Status LRUCache::Insert(const T1& key, const T2& value, size_t charge) { + std::lock_guard l(mutex_); + if (capacity_ == 0) { + return rocksdb::Status::Corruption("capacity is empty"); + } else { + auto handle = new LRUHandle(); + handle->key = key; + handle->value = value; + handle->charge = charge; + LRU_Append(handle); + size_++; + usage_ += charge; + FinishErase(handle_table_.Insert(key, handle)); + LRU_Trim(); + } + return rocksdb::Status::OK(); +} + +template +rocksdb::Status LRUCache::Remove(const T1& key) { + std::lock_guard l(mutex_); + bool erased = FinishErase(handle_table_.Remove(key)); + return erased ? rocksdb::Status::OK() : rocksdb::Status::NotFound(); +} + +template +rocksdb::Status LRUCache::Clear() { + std::lock_guard l(mutex_); + LRUHandle* old = nullptr; + while (lru_.next != &lru_) { + old = lru_.next; + bool erased = FinishErase(handle_table_.Remove(old->key)); + if (!erased) { // to avoid unused variable when compiled NDEBUG + assert(erased); + } + } + return rocksdb::Status::OK(); +} + +template +bool LRUCache::LRUAndHandleTableConsistent() { + size_t count = 0; + std::lock_guard l(mutex_); + LRUHandle* handle = nullptr; + LRUHandle* current = lru_.prev; + while (current != &lru_) { + handle = handle_table_.Lookup(current->key); + if (!handle || handle != current) { + return false; + } else { + count++; + current = current->prev; + } + } + return count == handle_table_.TableSize(); +} + +template +bool LRUCache::LRUAsExpected(const std::vector>& expect) { + if (Size() != expect.size()) { + return false; + } else { + size_t idx = 0; + LRUHandle* current = lru_.prev; + while (current != &lru_) { + if (current->key != expect[idx].first || current->value != expect[idx].second) { + return false; + } else { + idx++; + current = current->prev; + } + } + } + return true; +} + +template +void LRUCache::LRU_Trim() { + LRUHandle* old = nullptr; + while (usage_ > capacity_ && lru_.next != &lru_) { + old = lru_.next; + bool erased = FinishErase(handle_table_.Remove(old->key)); + if (!erased) { // to avoid unused variable when compiled NDEBUG + assert(erased); + } + } +} + +template +void LRUCache::LRU_Remove(LRUHandle* const e) { + e->next->prev = e->prev; + e->prev->next = e->next; +} + +template +void LRUCache::LRU_Append(LRUHandle* const e) { + // Make "e" newest entry by inserting just before lru_ + e->next = &lru_; + e->prev = lru_.prev; + e->prev->next = e; + e->next->prev = e; +} + +template +void LRUCache::LRU_MoveToHead(LRUHandle* const e) { + LRU_Remove(e); + LRU_Append(e); +} + +template +bool LRUCache::FinishErase(LRUHandle* const e) { + bool erased = false; + if (e) { + LRU_Remove(e); + size_--; + usage_ -= e->charge; + delete e; + erased = true; + } + return erased; +} + +} // namespace storage +#endif // SRC_LRU_CACHE_H_ diff --git a/tools/pika_migrate/src/storage/src/murmurhash.cc b/tools/pika_migrate/src/storage/src/murmurhash.cc new file mode 100644 index 0000000000..9c42fcb4ed --- /dev/null +++ b/tools/pika_migrate/src/storage/src/murmurhash.cc @@ -0,0 +1,197 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +/* + Murmurhash from http://sites.google.com/site/murmurhash/ + + All code is released to the public domain. For business purposes, Murmurhash + is under the MIT license. +*/ +#include "src/murmurhash.h" + +#if defined(__x86_64__) + +// ------------------------------------------------------------------- +// +// The same caveats as 32-bit MurmurHash2 apply here - beware of alignment +// and endian-ness issues if used across multiple platforms. +// +// 64-bit hash for 64-bit platforms + +uint64_t MurmurHash64A(const void* key, int len, unsigned int seed) { + const uint64_t m = 0xc6a4a7935bd1e995; + const int r = 47; + + uint64_t h = seed ^ (len * m); + + auto data = static_cast(key); + auto end = data + (len / 8); + + while (data != end) { + uint64_t k = *data++; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + auto data2 = reinterpret_cast(data); + + switch (len & 7) { + case 7: + h ^= (static_cast(data2[6])) << 48; + [[fallthrough]]; + case 6: + h ^= (static_cast(data2[5])) << 40; + [[fallthrough]]; + case 5: + h ^= (static_cast(data2[4])) << 32; + [[fallthrough]]; + case 4: + h ^= (static_cast(data2[3])) << 24; + [[fallthrough]]; + case 3: + h ^= (static_cast(data2[2])) << 16; + [[fallthrough]]; + case 2: + h ^= (static_cast(data2[1])) << 8; + [[fallthrough]]; + case 1: + h ^= (static_cast(data2[0])); + h *= m; + } + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return h; +} + +#elif defined(__i386__) + +// ------------------------------------------------------------------- +// +// Note - This code makes a few assumptions about how your machine behaves - +// +// 1. We can read a 4-byte value from any address without crashing +// 2. sizeof(int) == 4 +// +// And it has a few limitations - +// +// 1. It will not work incrementally. +// 2. It will not produce the same results on little-endian and big-endian +// machines. + +unsigned int MurmurHash2(const void* key, int len, unsigned int seed) { + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + + const unsigned int m = 0x5bd1e995; + const int r = 24; + + // Initialize the hash to a 'random' value + + unsigned int h = seed ^ len; + + // Mix 4 bytes at a time into the hash + + auto data = (const unsigned char*)key; + + while (len >= 4) { + unsigned int k = *(unsigned int*)data; + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + // Handle the last few bytes of the input array + + switch (len) { + case 3: + h ^= data[2] << 16; + [[fallthrough]]; + case 2: + h ^= data[1] << 8; + [[fallthrough]]; + case 1: + h ^= data[0]; + h *= m; + } + + // Do a few final mixes of the hash to ensure the last few + // bytes are well-incorporated. + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +#else + +// ------------------------------------------------------------------- +// +// Same as MurmurHash2, but endian- and alignment-neutral. +// Half the speed though, alas. + +unsigned int MurmurHashNeutral2(const void* key, int len, unsigned int seed) { + const unsigned int m = 0x5bd1e995; + const int r = 24; + + unsigned int h = seed ^ len; + + auto data = static_cast(key); + + while (len >= 4) { + unsigned int k; + + k = data[0]; + k |= data[1] << 8; + k |= data[2] << 16; + k |= data[3] << 24; + + k *= m; + k ^= k >> r; + k *= m; + + h *= m; + h ^= k; + + data += 4; + len -= 4; + } + + switch (len) { + case 3: + h ^= data[2] << 16; + [[fallthrough]]; + case 2: + h ^= data[1] << 8; + [[fallthrough]]; + case 1: + h ^= data[0]; + h *= m; + } + + h ^= h >> 13; + h *= m; + h ^= h >> 15; + + return h; +} + +#endif diff --git a/tools/pika_migrate/src/storage/src/murmurhash.h b/tools/pika_migrate/src/storage/src/murmurhash.h new file mode 100644 index 0000000000..6692033a24 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/murmurhash.h @@ -0,0 +1,45 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +/* + Murmurhash from http://sites.google.com/site/murmurhash/ + + All code is released to the public domain. For business purposes, Murmurhash + is under the MIT license. +*/ +#ifndef SRC_MURMURHASH_H_ +#define SRC_MURMURHASH_H_ + +#include +#include "rocksdb/slice.h" + +#if defined(__x86_64__) +# define MURMUR_HASH MurmurHash64A +uint64_t MurmurHash64A(const void* key, int len, unsigned int seed); +# define MurmurHash MurmurHash64A +typedef uint64_t murmur_t; + +#elif defined(__i386__) +# define MURMUR_HASH MurmurHash2 +unsigned int MurmurHash2(const void* key, int len, unsigned int seed); +# define MurmurHash MurmurHash2 +typedef unsigned int murmur_t; + +#else +# define MURMUR_HASH MurmurHashNeutral2 +unsigned int MurmurHashNeutral2(const void* key, int len, unsigned int seed); +# define MurmurHash MurmurHashNeutral2 +using murmur_t = unsigned int; +#endif + +// Allow slice to be hashable by murmur hash. +namespace storage { +using Slice = rocksdb::Slice; +struct murmur_hash { + size_t operator()(const Slice& slice) const { return MurmurHash(slice.data(), static_cast(slice.size()), 0); } +}; +} // namespace storage +#endif // SRC_MURMURHASH_H_ + diff --git a/tools/pika_migrate/src/storage/src/mutex.h b/tools/pika_migrate/src/storage/src/mutex.h new file mode 100644 index 0000000000..f8efb55e47 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/mutex.h @@ -0,0 +1,24 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_MUTEX_H_ +#define SRC_MUTEX_H_ + +#include + +#include "rocksdb/status.h" + +#include "pstd/include/mutex.h" + +namespace storage { + +using Status = rocksdb::Status; + +using Mutex = pstd::lock::Mutex; +using CondVar = pstd::lock::CondVar; +using MutexFactory = pstd::lock::MutexFactory; + +} // namespace storage +#endif // SRC_MUTEX_H_ diff --git a/tools/pika_migrate/src/storage/src/mutex_impl.h b/tools/pika_migrate/src/storage/src/mutex_impl.h new file mode 100644 index 0000000000..2cd47c4bca --- /dev/null +++ b/tools/pika_migrate/src/storage/src/mutex_impl.h @@ -0,0 +1,20 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_MUTEX_IMPL_H_ +#define SRC_MUTEX_IMPL_H_ + +#include "src/mutex.h" + +#include "pstd/include/mutex_impl.h" + +#include + +namespace storage { + +using MutexFactoryImpl = pstd::lock::MutexFactoryImpl; + +} // namespace storage +#endif // SRC_MUTEX_IMPL_H_ diff --git a/tools/pika_migrate/src/storage/src/options_helper.cc b/tools/pika_migrate/src/storage/src/options_helper.cc new file mode 100644 index 0000000000..a7a7c401b1 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/options_helper.cc @@ -0,0 +1,98 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "src/options_helper.h" + +#include + +namespace storage { + +// strToInt may throw exception +static bool strToInt(const std::string& value, int* num, int base = 10) { + size_t end; + *num = std::stoi(value, &end, base); + return end >= value.size(); +} + +// strToUint64 may throw exception +static bool strToUint64(const std::string& value, uint64_t* num, int base = 10) { + size_t end; + *num = std::stoull(value, &end, base); + return end >= value.size(); +} + +// strToUint32 may throw exception +static bool strToUint32(const std::string& value, uint32_t* num, int base = 10) { + uint64_t uint64Val; + if (!strToUint64(value, &uint64Val)) { + return false; + } + if ((uint64Val >> 32LL) == 0) { + *num = static_cast(uint64Val); + } else { + throw std::out_of_range(value); + } + return true; +} + +// strToBool may throw exception +static bool strToBool(const std::string& value, bool* boolVal, int base = 10) { + if (value != "true" && value != "false") { + throw std::invalid_argument(value); + } + *boolVal = value == "true"; + return true; +} + +bool ParseOptionMember(const MemberType& member_type, const std::string& value, char* member_address) { + switch (member_type) { + case MemberType::kInt: { + int intVal; + if (!strToInt(value, &intVal)) { + return false; + } + *reinterpret_cast(member_address) = intVal; + break; + } + case MemberType::kUint: { + uint32_t uint32Val; + if (!strToUint32(value, &uint32Val)) { + return false; + } + *reinterpret_cast(member_address) = static_cast(uint32Val); + break; + } + case MemberType::kUint64T: { + uint64_t uint64Val; + if (!strToUint64(value, &uint64Val)) { + return false; + } + *reinterpret_cast(member_address) = uint64Val; + break; + } + case MemberType::kSizeT: { + uint64_t uint64Val; + if (!strToUint64(value, &uint64Val)) { + return false; + } + *reinterpret_cast(member_address) = static_cast(uint64Val); + break; + } + case MemberType::kBool: { + bool boolVal; + if (!strToBool(value, &boolVal)) { + return false; + } + *reinterpret_cast(member_address) = static_cast(boolVal); + break; + } + default: { + return false; + } + } + return true; +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/options_helper.h b/tools/pika_migrate/src/storage/src/options_helper.h new file mode 100644 index 0000000000..5907e2116f --- /dev/null +++ b/tools/pika_migrate/src/storage/src/options_helper.h @@ -0,0 +1,79 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_OPTIONS_HELPER_H +#define SRC_OPTIONS_HELPER_H + +#include + +#include + +namespace storage { + +enum class MemberType { + kInt, + kUint, + kUint64T, + kSizeT, + kUnknown, + kBool, +}; + +struct MemberTypeInfo { + int offset; + MemberType type; +}; + +// offset_of is used to get the offset of a class data member with non standard-layout +// http://en.cppreference.com/w/cpp/concept/StandardLayoutType +// https://gist.github.com/graphitemaster/494f21190bb2c63c5516 +template +inline int offset_of(T1 T2::*member) { + static T2 obj; + return int(size_t(&(obj.*member)) - size_t(&obj)); +} + +static std::unordered_map mutable_db_options_member_type_info = { + {"max_background_jobs", {offsetof(struct rocksdb::DBOptions, max_background_jobs), MemberType::kInt}}, + {"max_background_compactions", {offsetof(struct rocksdb::DBOptions, max_background_compactions), MemberType::kInt}}, + // {"base_background_compactions", {offsetof(struct rocksdb::DBOptions, base_background_compactions), + // MemberType::kInt}}, + {"max_open_files", {offsetof(struct rocksdb::DBOptions, max_open_files), MemberType::kInt}}, + {"bytes_per_sync", {offsetof(struct rocksdb::DBOptions, bytes_per_sync), MemberType::kUint64T}}, + {"delayed_write_rate", {offsetof(struct rocksdb::DBOptions, delayed_write_rate), MemberType::kUint64T}}, + {"max_total_wal_size", {offsetof(struct rocksdb::DBOptions, max_total_wal_size), MemberType::kUint64T}}, + {"wal_bytes_per_sync", {offsetof(struct rocksdb::DBOptions, wal_bytes_per_sync), MemberType::kUint64T}}, + {"stats_dump_period_sec", {offsetof(struct rocksdb::DBOptions, stats_dump_period_sec), MemberType::kUint}}, +}; + +static std::unordered_map mutable_cf_options_member_type_info = { + {"max_write_buffer_number", {offset_of(&rocksdb::ColumnFamilyOptions::max_write_buffer_number), MemberType::kInt}}, + {"write_buffer_size", {offset_of(&rocksdb::ColumnFamilyOptions::write_buffer_size), MemberType::kSizeT}}, + {"target_file_size_base", {offset_of(&rocksdb::ColumnFamilyOptions::target_file_size_base), MemberType::kUint64T}}, + {"target_file_size_multiplier", + {offset_of(&rocksdb::ColumnFamilyOptions::target_file_size_multiplier), MemberType::kInt}}, + {"arena_block_size", {offset_of(&rocksdb::ColumnFamilyOptions::arena_block_size), MemberType::kSizeT}}, + {"level0_file_num_compaction_trigger", + {offset_of(&rocksdb::ColumnFamilyOptions::level0_file_num_compaction_trigger), MemberType::kInt}}, + {"level0_slowdown_writes_trigger", + {offset_of(&rocksdb::ColumnFamilyOptions::level0_slowdown_writes_trigger), MemberType::kInt}}, + {"level0_stop_writes_trigger", + {offset_of(&rocksdb::ColumnFamilyOptions::level0_stop_writes_trigger), MemberType::kInt}}, + {"max_compaction_bytes", {offset_of(&rocksdb::ColumnFamilyOptions::max_compaction_bytes), MemberType::kUint64T}}, + {"soft_pending_compaction_bytes_limit", + {offset_of(&rocksdb::ColumnFamilyOptions::soft_pending_compaction_bytes_limit), MemberType::kUint64T}}, + {"hard_pending_compaction_bytes_limit", + {offset_of(&rocksdb::ColumnFamilyOptions::hard_pending_compaction_bytes_limit), MemberType::kUint64T}}, + {"disable_auto_compactions", + {offset_of(&rocksdb::ColumnFamilyOptions::disable_auto_compactions), MemberType::kBool}}, + {"ttl", {offset_of(&rocksdb::AdvancedColumnFamilyOptions::ttl), MemberType::kUint64T}}, + {"periodic_compaction_seconds", + {offset_of(&rocksdb::AdvancedColumnFamilyOptions::periodic_compaction_seconds), MemberType::kUint64T}}, +}; + +extern bool ParseOptionMember(const MemberType& member_type, const std::string& value, char* member_address); + +} // namespace storage +#endif // SRC_OPTIONS_HELPER_H diff --git a/tools/pika_migrate/src/storage/src/pika_stream_meta_value.h b/tools/pika_migrate/src/storage/src/pika_stream_meta_value.h new file mode 100644 index 0000000000..d505eb9094 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/pika_stream_meta_value.h @@ -0,0 +1,517 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include "glog/logging.h" +#include "pika_stream_types.h" +#include "src/coding.h" +#include "storage/storage.h" +#include "storage/storage_define.h" +#include "src/base_value_format.h" + + +/* + *| type | group_id_ | entries_added_ | first_id_ms | first_id_seq | last_id_ms | last_id_seq | max_deleted_entry_ms | max_deleted_entry_seq | length | version | + *| 1B | 4B | 8B | 8B | 8B | 8B | 8B | 8B | 8B | 4B | 4B | + */ + +namespace storage { + +static const uint64_t kDefaultStreamValueLength = + sizeof(tree_id_t) + sizeof(uint64_t) + 3 * sizeof(streamID) + sizeof(int32_t) + sizeof(uint64_t) + kTypeLength; +class StreamMetaValue { + public: + explicit StreamMetaValue() : type_(DataType::kStreams) {} + + // used only when create a new stream + void InitMetaValue() { + groups_id_ = kINVALID_TREE_ID; + entries_added_ = 0; + first_id_ = streamID(); + last_id_ = streamID(); + max_deleted_entry_id_ = streamID(); + length_ = 0; + + // We do not reset version_ here, because we want to keep the version of the old stream meta. + // Each time we delete a stream, we will increase the version of the stream meta, so that the old stream date will + // not be seen by the new stream with the same key. + ++version_; + + uint64_t needed = kDefaultStreamValueLength; + value_.resize(needed); + + char* dst = &value_[0]; + + memcpy(dst, &type_, sizeof(type_)); + dst += sizeof(type_); + // Encode each member into the string + EncodeFixed64(dst, groups_id_); + dst += sizeof(tree_id_t); + + EncodeFixed64(dst, entries_added_); + dst += sizeof(uint64_t); + + EncodeFixed64(dst, first_id_.ms); + dst += sizeof(uint64_t); + EncodeFixed64(dst, first_id_.seq); + dst += sizeof(uint64_t); + + EncodeFixed64(dst, last_id_.ms); + dst += sizeof(uint64_t); + EncodeFixed64(dst, last_id_.seq); + dst += sizeof(uint64_t); + + EncodeFixed64(dst, max_deleted_entry_id_.ms); + dst += sizeof(uint64_t); + EncodeFixed64(dst, max_deleted_entry_id_.seq); + dst += sizeof(uint64_t); + + EncodeFixed32(dst, length_); + dst += sizeof(length_); + + EncodeFixed64(dst, version_); + } + + // used only when parse a existed stream meta + // value_ = std::move(value); + void ParseFrom(std::string& value) { + value_ = std::move(value); + assert(value_.size() == kDefaultStreamValueLength); + if (value_.size() != kDefaultStreamValueLength) { + LOG(ERROR) << "Invalid stream meta value length: " << value_.size() + << " expected: " << kDefaultStreamValueLength; + return; + } + char* pos = &value_[0]; + type_ = static_cast(static_cast((value_)[0])); + pos += kTypeLength; + groups_id_ = DecodeFixed32(pos); + pos += sizeof(tree_id_t); + + entries_added_ = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + first_id_.ms = DecodeFixed64(pos); + pos += sizeof(uint64_t); + first_id_.seq = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + last_id_.ms = DecodeFixed64(pos); + pos += sizeof(uint64_t); + last_id_.seq = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + max_deleted_entry_id_.ms = DecodeFixed64(pos); + pos += sizeof(uint64_t); + max_deleted_entry_id_.seq = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + length_ = static_cast(DecodeFixed32(pos)); + pos += sizeof(length_); + + version_ = static_cast(DecodeFixed64(pos)); + } + + uint64_t version() const { return version_; } + + tree_id_t groups_id() const { return groups_id_; } + + uint64_t entries_added() const { return entries_added_; } + + void ModifyEntriesAdded(uint64_t delta) { set_entries_added(entries_added_ + delta); } + + streamID first_id() const { return first_id_; } + + streamID last_id() const { return last_id_; } + + streamID max_deleted_entry_id() const { return max_deleted_entry_id_; } + + int32_t length() const { return length_; } + + std::string& value() { return value_; } + + std::string ToString() { + return "stream_meta: " + std::string("groups_id: ") + std::to_string(groups_id_) + + std::string(", entries_added: ") + std::to_string(entries_added_) + std::string(", first_id: ") + + first_id_.ToString() + std::string(", last_id: ") + last_id_.ToString() + + std::string(", max_deleted_entry_id: ") + max_deleted_entry_id_.ToString() + std::string(", length: ") + + std::to_string(length_) + std::string(", version: ") + std::to_string(version_); + } + + void set_groups_id(tree_id_t groups_id) { + assert(value_.size() == kDefaultStreamValueLength); + groups_id_ = groups_id; + char* dst = const_cast(value_.data() + kTypeLength); + EncodeFixed32(dst, groups_id_); + } + + void set_entries_added(uint64_t entries_added) { + assert(value_.size() == kDefaultStreamValueLength); + entries_added_ = entries_added; + char* dst = const_cast(value_.data()) + sizeof(tree_id_t) + kTypeLength; + EncodeFixed64(dst, entries_added_); + } + + void set_first_id(streamID first_id) { + assert(value_.size() == kDefaultStreamValueLength); + first_id_ = first_id; + char* dst = const_cast(value_.data()) + sizeof(tree_id_t) + sizeof(uint64_t) + kTypeLength; + EncodeFixed64(dst, first_id_.ms); + dst += sizeof(uint64_t); + EncodeFixed64(dst, first_id_.seq); + } + + void set_last_id(streamID last_id) { + assert(value_.size() == kDefaultStreamValueLength); + last_id_ = last_id; + char* dst = const_cast(value_.data()) + sizeof(tree_id_t) + sizeof(uint64_t) + sizeof(streamID) + kTypeLength; + EncodeFixed64(dst, last_id_.ms); + dst += sizeof(uint64_t); + EncodeFixed64(dst, last_id_.seq); + } + + void set_max_deleted_entry_id(streamID max_deleted_entry_id) { + assert(value_.size() == kDefaultStreamValueLength); + max_deleted_entry_id_ = max_deleted_entry_id; + char* dst = const_cast(value_.data()) + sizeof(tree_id_t) + sizeof(uint64_t) + 2 * sizeof(streamID) + kTypeLength; + EncodeFixed64(dst, max_deleted_entry_id_.ms); + dst += sizeof(uint64_t); + EncodeFixed64(dst, max_deleted_entry_id_.seq); + } + + void set_length(int32_t length) { + assert(value_.size() == kDefaultStreamValueLength); + length_ = length; + char* dst = const_cast(value_.data()) + sizeof(tree_id_t) + sizeof(uint64_t) + 3 * sizeof(streamID) + kTypeLength; + EncodeFixed32(dst, length_); + } + + void set_version(uint64_t version) { + assert(value_.size() == kDefaultStreamValueLength); + version_ = version; + char* dst = + const_cast(value_.data()) + sizeof(tree_id_t) + sizeof(uint64_t) + 3 * sizeof(streamID) + sizeof(length_) + kTypeLength; + EncodeFixed64(dst, version_); + } + + private: + tree_id_t groups_id_ = kINVALID_TREE_ID; + uint64_t entries_added_{0}; + streamID first_id_; + streamID last_id_; + streamID max_deleted_entry_id_; + int32_t length_{0}; // number of the messages in the stream + uint64_t version_{0}; + DataType type_; + std::string value_{}; +}; + +// Used only for reading ! +class ParsedStreamMetaValue { + public: + ParsedStreamMetaValue(const Slice& value) { + assert(value.size() == kDefaultStreamValueLength); + if (value.size() != kDefaultStreamValueLength) { + LOG(ERROR) << "Invalid stream meta value length: " << value.size() + << " expected: " << kDefaultStreamValueLength; + return; + } + char* pos = const_cast(value.data()); + type_ = static_cast(static_cast((value)[0])); + pos += kTypeLength; + groups_id_ = DecodeFixed32(pos); + pos += sizeof(tree_id_t); + + entries_added_ = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + first_id_.ms = DecodeFixed64(pos); + pos += sizeof(uint64_t); + first_id_.seq = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + last_id_.ms = DecodeFixed64(pos); + pos += sizeof(uint64_t); + last_id_.seq = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + max_deleted_entry_id_.ms = DecodeFixed64(pos); + pos += sizeof(uint64_t); + max_deleted_entry_id_.seq = DecodeFixed64(pos); + pos += sizeof(uint64_t); + + length_ = static_cast(DecodeFixed32(pos)); + pos += sizeof(length_); + + version_ = static_cast(DecodeFixed64(pos)); + } + + uint64_t version() const { return version_; } + + tree_id_t groups_id() const { return groups_id_; } + + uint64_t entries_added() const { return entries_added_; } + + streamID first_id() const { return first_id_; } + + streamID last_id() const { return last_id_; } + + streamID max_deleted_entry_id() const { return max_deleted_entry_id_; } + + int32_t length() const { return length_; } + + std::string ToString() { + return "stream_meta: " + std::string("groups_id: ") + std::to_string(groups_id_) + + std::string(", entries_added: ") + std::to_string(entries_added_) + std::string(", first_id: ") + + first_id_.ToString() + std::string(", last_id: ") + last_id_.ToString() + + std::string(", max_deleted_entry_id: ") + max_deleted_entry_id_.ToString() + std::string(", length: ") + + std::to_string(length_) + std::string(", version: ") + std::to_string(version_); + } + + private: + tree_id_t groups_id_ = kINVALID_TREE_ID; + uint64_t entries_added_{0}; + streamID first_id_; + streamID last_id_; + streamID max_deleted_entry_id_; + int32_t length_{0}; // number of the messages in the stream + uint64_t version_{0}; + DataType type_; +}; + +static const uint64_t kDefaultStreamCGroupValueLength = sizeof(streamID) + sizeof(uint64_t) + 2 * sizeof(tree_id_t) + kTypeLength; + +class StreamCGroupMetaValue { + public: + explicit StreamCGroupMetaValue() = default; + + // tid and consumers should be set at beginning + void Init(tree_id_t tid, tree_id_t consumers) { + pel_ = tid; + consumers_ = consumers; + uint64_t needed = kDefaultStreamCGroupValueLength; + assert(value_.size() == 0); + if (value_.size() != 0) { + LOG(ERROR) << "Init on a existed stream cgroup meta value!"; + return; + } + value_.resize(needed); + + char* dst = &value_[0]; + + memcpy(dst, &last_id_, sizeof(streamID)); + dst += sizeof(uint64_t); + memcpy(dst, &entries_read_, sizeof(uint64_t)); + dst += sizeof(uint64_t); + memcpy(dst, &pel_, sizeof(tree_id_t)); + dst += sizeof(tree_id_t); + memcpy(dst, &consumers_, sizeof(tree_id_t)); + } + + void ParseFrom(std::string& value) { + value_ = std::move(value); + assert(value_.size() == kDefaultStreamCGroupValueLength); + if (value_.size() != kDefaultStreamCGroupValueLength) { + LOG(ERROR) << "Invalid stream cgroup meta value length: " << value_.size() + << " expected: " << kDefaultStreamValueLength; + return; + } + if (value_.size() == kDefaultStreamCGroupValueLength) { + auto pos = value_.data(); + memcpy(&last_id_, pos, sizeof(streamID)); + pos += sizeof(streamID); + memcpy(&entries_read_, pos, sizeof(uint64_t)); + pos += sizeof(uint64_t); + memcpy(&pel_, pos, sizeof(tree_id_t)); + pos += sizeof(tree_id_t); + memcpy(&consumers_, pos, sizeof(tree_id_t)); + } + } + + streamID last_id() { return last_id_; } + + void set_last_id(streamID last_id) { + assert(value_.size() == kDefaultStreamCGroupValueLength); + last_id_ = last_id; + char* dst = const_cast(value_.data()); + memcpy(dst, &last_id_, sizeof(streamID)); + } + + uint64_t entries_read() { return entries_read_; } + + void set_entries_read(uint64_t entries_read) { + assert(value_.size() == kDefaultStreamCGroupValueLength); + entries_read_ = entries_read; + char* dst = const_cast(value_.data()) + sizeof(streamID); + memcpy(dst, &entries_read_, sizeof(uint64_t)); + } + + // pel and consumers were set in constructor, can't be modified + tree_id_t pel() { return pel_; } + + tree_id_t consumers() { return consumers_; } + + std::string& value() { return value_; } + + private: + std::string value_; + + streamID last_id_; + uint64_t entries_read_ = 0; + tree_id_t pel_ = 0; + tree_id_t consumers_ = 0; +}; + +static const uint64_t kDefaultStreamConsumerValueLength = sizeof(stream_ms_t) * 2 + sizeof(tree_id_t); +class StreamConsumerMetaValue { + public: + // pel must been set at beginning + StreamConsumerMetaValue() = default; + + void ParseFrom(std::string& value) { + value_ = std::move(value); + assert(value_.size() == kDefaultStreamConsumerValueLength); + if (value_.size() != kDefaultStreamConsumerValueLength) { + LOG(ERROR) << "Invalid stream consumer meta value length: " << value_.size() + << " expected: " << kDefaultStreamConsumerValueLength; + return; + } + if (value_.size() == kDefaultStreamConsumerValueLength) { + auto pos = value_.data(); + memcpy(&seen_time_, pos, sizeof(stream_ms_t)); + pos += sizeof(stream_ms_t); + memcpy(&active_time_, pos, sizeof(stream_ms_t)); + pos += sizeof(stream_ms_t); + memcpy(&pel_, pos, sizeof(tree_id_t)); + } + } + + void Init(tree_id_t pel) { + pel_ = pel; + assert(value_.size() == 0); + if (value_.size() != 0) { + LOG(ERROR) << "Invalid stream consumer meta value length: " << value_.size() << " expected: 0"; + return; + } + uint64_t needed = kDefaultStreamConsumerValueLength; + value_.resize(needed); + char* dst = &value_[0]; + + memcpy(dst, &seen_time_, sizeof(stream_ms_t)); + dst += sizeof(stream_ms_t); + memcpy(dst, &active_time_, sizeof(stream_ms_t)); + dst += sizeof(stream_ms_t); + memcpy(dst, &pel_, sizeof(tree_id_t)); + } + + stream_ms_t seen_time() { return seen_time_; } + + void set_seen_time(stream_ms_t seen_time) { + seen_time_ = seen_time; + assert(value_.size() == kDefaultStreamConsumerValueLength); + char* dst = const_cast(value_.data()); + memcpy(dst, &seen_time_, sizeof(stream_ms_t)); + } + + stream_ms_t active_time() { return active_time_; } + + void set_active_time(stream_ms_t active_time) { + active_time_ = active_time; + assert(value_.size() == kDefaultStreamConsumerValueLength); + char* dst = const_cast(value_.data()) + sizeof(stream_ms_t); + memcpy(dst, &active_time_, sizeof(stream_ms_t)); + } + + // pel was set in constructor, can't be modified + tree_id_t pel_tid() { return pel_; } + + std::string& value() { return value_; } + + private: + std::string value_; + + stream_ms_t seen_time_ = 0; + stream_ms_t active_time_ = 0; + tree_id_t pel_ = 0; +}; + +static const uint64_t kDefaultStreamPelMetaValueLength = sizeof(stream_ms_t) + sizeof(uint64_t) + sizeof(tree_id_t); +class StreamPelMeta { + public: + // consumer must been set at beginning + StreamPelMeta() = default; + + void Init(std::string consumer, stream_ms_t delivery_time) { + consumer_ = std::move(consumer); + delivery_time_ = delivery_time; + uint64_t needed = kDefaultStreamPelMetaValueLength; + assert(value_.size() == 0); + if (value_.size() != 0) { + LOG(ERROR) << "Init on a existed stream pel meta value!"; + return; + } + value_.resize(needed); + char* dst = &value_[0]; + + memcpy(dst, &delivery_time_, sizeof(stream_ms_t)); + dst += sizeof(stream_ms_t); + memcpy(dst, &delivery_count_, sizeof(uint64_t)); + dst += sizeof(uint64_t); + memcpy(dst, &cname_len_, sizeof(uint64_t)); + dst += sizeof(uint64_t); + memcpy(dst, consumer_.data(), cname_len_); + } + + void ParseFrom(std::string& value) { + value_ = std::move(value); + assert(value_.size() == kDefaultStreamPelMetaValueLength); + if (value_.size() != kDefaultStreamPelMetaValueLength) { + LOG(ERROR) << "Invalid stream pel meta value length: "; + return; + } + auto pos = value_.data(); + memcpy(&delivery_time_, pos, sizeof(stream_ms_t)); + pos += sizeof(stream_ms_t); + memcpy(&delivery_count_, pos, sizeof(uint64_t)); + pos += sizeof(uint64_t); + memcpy(&cname_len_, pos, sizeof(uint64_t)); + pos += sizeof(uint64_t); + consumer_.assign(pos, cname_len_); + } + + stream_ms_t delivery_time() { return delivery_time_; } + + void set_delivery_time(stream_ms_t delivery_time) { + assert(value_.size() == kDefaultStreamPelMetaValueLength); + delivery_time_ = delivery_time; + char* dst = const_cast(value_.data()); + memcpy(dst, &delivery_time_, sizeof(stream_ms_t)); + } + + uint64_t delivery_count() { return delivery_count_; } + + void set_delivery_count(uint64_t delivery_count) { + assert(value_.size() == kDefaultStreamPelMetaValueLength); + delivery_count_ = delivery_count; + char* dst = const_cast(value_.data()); + memcpy(dst + sizeof(stream_ms_t), &delivery_count_, sizeof(uint64_t)); + } + + std::string& consumer() { return consumer_; } + + std::string& value() { return value_; } + + private: + std::string value_; + + stream_ms_t delivery_time_ = 0; + uint64_t delivery_count_ = 1; + uint64_t cname_len_ = 0; + std::string consumer_; +}; + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/pika_stream_types.h b/tools/pika_migrate/src/storage/src/pika_stream_types.h new file mode 100644 index 0000000000..69c4733334 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/pika_stream_types.h @@ -0,0 +1,87 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include +#include +#include +#include "src/coding.h" + +namespace storage { + +#define kINVALID_TREE_ID 0 + +using streamID = struct streamID { + streamID(uint64_t _ms, uint64_t _seq) : ms(_ms), seq(_seq) {} + bool operator==(const streamID& other) const { return ms == other.ms && seq == other.seq; } + bool operator<(const streamID& other) const { return ms < other.ms || (ms == other.ms && seq < other.seq); } + bool operator>(const streamID& other) const { return ms > other.ms || (ms == other.ms && seq > other.seq); } + bool operator<=(const streamID& other) const { return ms < other.ms || (ms == other.ms && seq <= other.seq); } + bool operator>=(const streamID& other) const { return ms > other.ms || (ms == other.ms && seq >= other.seq); } + std::string ToString() const { return std::to_string(ms) + "-" + std::to_string(seq); } + + // We must store the streamID in memory in big-endian format. This way, our comparison of the serialized streamID byte + // code will be equivalent to the comparison of the uint64_t numbers. + inline void EncodeUint64InBigEndian(char* buf, uint64_t value) const { + if (kLittleEndian) { + // little endian, reverse the bytes + for (int i = 7; i >= 0; --i) { + buf[i] = static_cast(value & 0xff); + value >>= 8; + } + } else { + // big endian, just copy the bytes + memcpy(buf, &value, sizeof(value)); + } + } + + inline uint64_t DecodeUint64OfBigEndian(const char* ptr) { + uint64_t value; + if (kLittleEndian) { + // little endian, reverse the bytes + value = 0; + for (int i = 0; i < 8; ++i) { + value <<= 8; + value |= static_cast(ptr[i]); + } + } else { + // big endian, just copy the bytes + memcpy(&value, ptr, sizeof(value)); + } + return value; + } + + std::string Serialize() const { + std::string dst; + dst.resize(sizeof(ms) + sizeof(seq)); + EncodeUint64InBigEndian(&dst[0], ms); + EncodeUint64InBigEndian(&dst[0] + sizeof(ms), seq); + return dst; + } + + void DeserializeFrom(std::string& src) { + assert(src.size() == sizeof(ms) + sizeof(seq)); + ms = DecodeUint64OfBigEndian(&src[0]); + seq = DecodeUint64OfBigEndian(&src[0] + sizeof(ms)); + } + + streamID() = default; + uint64_t ms = 0; /* Unix time in milliseconds. */ + uint64_t seq = 0; /* Sequence number. */ +}; + +static const streamID kSTREAMID_MAX = streamID(UINT64_MAX, UINT64_MAX); +static const streamID kSTREAMID_MIN = streamID(0, 0); + +enum StreamTrimStrategy { TRIM_STRATEGY_NONE, TRIM_STRATEGY_MAXLEN, TRIM_STRATEGY_MINID }; + +using tree_id_t = uint32_t; + +using stream_ms_t = uint64_t; + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/redis.cc b/tools/pika_migrate/src/storage/src/redis.cc new file mode 100644 index 0000000000..077fe15dd0 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis.cc @@ -0,0 +1,766 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "rocksdb/env.h" + +#include "src/redis.h" +#include "src/lists_filter.h" +#include "src/base_filter.h" +#include "src/zsets_filter.h" +#include "pstd/include/pstd_defer.h" + +namespace storage { + +constexpr const char* ErrTypeMessage = "WRONGTYPE"; + +const rocksdb::Comparator* ListsDataKeyComparator() { + static ListsDataKeyComparatorImpl ldkc; + return &ldkc; +} + +rocksdb::Comparator* ZSetsScoreKeyComparator() { + static ZSetsScoreKeyComparatorImpl zsets_score_key_compare; + return &zsets_score_key_compare; +} + +Redis::Redis(Storage* const s, int32_t index) + : storage_(s), index_(index), + lock_mgr_(std::make_shared(1000, 0, std::make_shared())), + small_compaction_threshold_(5000), + small_compaction_duration_threshold_(10000) { + statistics_store_ = std::make_unique>(); + scan_cursors_store_ = std::make_unique>(); + spop_counts_store_ = std::make_unique>(); + default_compact_range_options_.exclusive_manual_compaction = false; + default_compact_range_options_.change_level = true; + spop_counts_store_->SetCapacity(1000); + scan_cursors_store_->SetCapacity(5000); + //env_ = rocksdb::Env::Instance(); + handles_.clear(); +} + +Redis::~Redis() { + rocksdb::CancelAllBackgroundWork(db_, true); + std::vector tmp_handles = handles_; + handles_.clear(); + for (auto handle : tmp_handles) { + delete handle; + } + // delete env_; + delete db_; + + if (default_compact_range_options_.canceled) { + delete default_compact_range_options_.canceled; + } +} + +Status Redis::Open(const StorageOptions& storage_options, const std::string& db_path) { + statistics_store_->SetCapacity(storage_options.statistics_max_size); + small_compaction_threshold_ = storage_options.small_compaction_threshold; + + rocksdb::BlockBasedTableOptions table_ops(storage_options.table_options); + table_ops.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); + + rocksdb::Options ops(storage_options.options); + ops.create_missing_column_families = true; + if (storage_options.enable_db_statistics) { + db_statistics_ = rocksdb::CreateDBStatistics(); + db_statistics_->set_stats_level(static_cast(storage_options.db_statistics_level)); + ops.statistics = db_statistics_; + } + + /* + * Because zset, set, the hash, list, stream type meta + * information exists kMetaCF, so we delete the various + * types of MetaCF before + */ + // meta & string column-family options + rocksdb::ColumnFamilyOptions meta_cf_ops(storage_options.options); + meta_cf_ops.compaction_filter_factory = std::make_shared(); + rocksdb::BlockBasedTableOptions meta_table_ops(table_ops); + + rocksdb::BlockBasedTableOptions string_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + meta_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + meta_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(meta_table_ops)); + + // hash column-family options + rocksdb::ColumnFamilyOptions hash_data_cf_ops(storage_options.options); + hash_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kHashes); + rocksdb::BlockBasedTableOptions hash_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + hash_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + hash_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(hash_data_cf_table_ops)); + + // list column-family options + rocksdb::ColumnFamilyOptions list_data_cf_ops(storage_options.options); + list_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kLists); + list_data_cf_ops.comparator = ListsDataKeyComparator(); + + rocksdb::BlockBasedTableOptions list_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + list_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + list_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(list_data_cf_table_ops)); + + // set column-family options + rocksdb::ColumnFamilyOptions set_data_cf_ops(storage_options.options); + set_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kSets); + rocksdb::BlockBasedTableOptions set_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + set_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + set_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(set_data_cf_table_ops)); + + // zset column-family options + rocksdb::ColumnFamilyOptions zset_data_cf_ops(storage_options.options); + rocksdb::ColumnFamilyOptions zset_score_cf_ops(storage_options.options); + zset_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kZSets); + zset_score_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kZSets); + zset_score_cf_ops.comparator = ZSetsScoreKeyComparator(); + + rocksdb::BlockBasedTableOptions zset_meta_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions zset_data_cf_table_ops(table_ops); + rocksdb::BlockBasedTableOptions zset_score_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + zset_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + zset_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_data_cf_table_ops)); + zset_score_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(zset_score_cf_table_ops)); + + // stream column-family options + rocksdb::ColumnFamilyOptions stream_data_cf_ops(storage_options.options); + stream_data_cf_ops.compaction_filter_factory = std::make_shared(&db_, &handles_, DataType::kStreams); + rocksdb::BlockBasedTableOptions stream_data_cf_table_ops(table_ops); + if (!storage_options.share_block_cache && storage_options.block_cache_size > 0) { + stream_data_cf_table_ops.block_cache = rocksdb::NewLRUCache(storage_options.block_cache_size); + } + stream_data_cf_ops.table_factory.reset(rocksdb::NewBlockBasedTableFactory(stream_data_cf_table_ops)); + + std::vector column_families; + // meta & string cf + column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); + // hash CF + column_families.emplace_back("hash_data_cf", hash_data_cf_ops); + // set CF + column_families.emplace_back("set_data_cf", set_data_cf_ops); + // list CF + column_families.emplace_back("list_data_cf", list_data_cf_ops); + // zset CF + column_families.emplace_back("zset_data_cf", zset_data_cf_ops); + column_families.emplace_back("zset_score_cf", zset_score_cf_ops); + // stream CF + column_families.emplace_back("stream_data_cf", stream_data_cf_ops); + ops.listeners.emplace_back(std::make_shared()); + + return rocksdb::DB::Open(ops, db_path, column_families, &handles_, &db_); +} + +Status Redis::GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point) { + std::string index_key; + index_key.append(1, DataTypeTag[static_cast(type)]); + index_key.append("_"); + index_key.append(key.ToString()); + index_key.append("_"); + index_key.append(pattern.ToString()); + index_key.append("_"); + index_key.append(std::to_string(cursor)); + return scan_cursors_store_->Lookup(index_key, start_point); +} + +Status Redis::StoreScanNextPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, + const std::string& next_point) { + std::string index_key; + index_key.append(1, DataTypeTag[static_cast(type)]); + index_key.append("_"); + index_key.append(key.ToString()); + index_key.append("_"); + index_key.append(pattern.ToString()); + index_key.append("_"); + index_key.append(std::to_string(cursor)); + return scan_cursors_store_->Insert(index_key, next_point); +} + +Status Redis::SetMaxCacheStatisticKeys(size_t max_cache_statistic_keys) { + statistics_store_->SetCapacity(max_cache_statistic_keys); + return Status::OK(); +} + +/* + * compactrange no longer supports compact for a single data type + */ +Status Redis::CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end) { + db_->CompactRange(default_compact_range_options_, begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kHashesDataCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kSetsDataCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kListsDataCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kZsetsDataCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kZsetsScoreCF], begin, end); + db_->CompactRange(default_compact_range_options_, handles_[kStreamsDataCF], begin, end); + return Status::OK(); +} + +void SelectColumnFamilyHandles(const DataType& option_type, const ColumnFamilyType& type, + std::vector& handleIdxVec) { + switch (option_type) { + case DataType::kStrings: + handleIdxVec.push_back(kMetaCF); + break; + case DataType::kHashes: + if (type == kMeta || type == kMetaAndData) { + handleIdxVec.push_back(kMetaCF); + } + if (type == kData || type == kMetaAndData) { + handleIdxVec.push_back(kHashesDataCF); + } + break; + case DataType::kSets: + if (type == kMeta || type == kMetaAndData) { + handleIdxVec.push_back(kMetaCF); + } + if (type == kData || type == kMetaAndData) { + handleIdxVec.push_back(kSetsDataCF); + } + break; + case DataType::kLists: + if (type == kMeta || type == kMetaAndData) { + handleIdxVec.push_back(kMetaCF); + } + if (type == kData || type == kMetaAndData) { + handleIdxVec.push_back(kListsDataCF); + } + break; + case DataType::kZSets: + if (type == kMeta || type == kMetaAndData) { + handleIdxVec.push_back(kMetaCF); + } + if (type == kData || type == kMetaAndData) { + handleIdxVec.push_back(kZsetsDataCF); + handleIdxVec.push_back(kZsetsScoreCF); + } + break; + case DataType::kStreams: + if (type == kMeta || type == kMetaAndData) { + handleIdxVec.push_back(kMetaCF); + } + if (type == kData || type == kMetaAndData) { + handleIdxVec.push_back(kStreamsDataCF); + } + break; + case DataType::kAll: + for (auto s = kMetaCF; s <= kStreamsDataCF; s = static_cast(s + 1)) { + handleIdxVec.push_back(s); + } + break; + default: + break; + } +} + +Status Redis::LongestNotCompactionSstCompact(const DataType& option_type, std::vector* compact_result_vec, + const ColumnFamilyType& type) { + bool no_compact = false; + bool to_comapct = true; + if (!in_compact_flag_.compare_exchange_weak(no_compact, to_comapct, std::memory_order_relaxed, + std::memory_order_relaxed)) { + return Status::Busy("compact running"); + } + + DEFER { in_compact_flag_.store(false); }; + std::vector handleIdxVec; + SelectColumnFamilyHandles(option_type, type, handleIdxVec); + if (handleIdxVec.size() == 0) { + return Status::Corruption("Invalid data type"); + } + + if (compact_result_vec) { + compact_result_vec->clear(); + } + + // sort it for convenience to traverse + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + std::sort(metadata.begin(), metadata.end(), [](const auto& a, const auto& b) { return a.name < b.name; }); + + // turn it on before compacting and turn it off after + listener_.Start(); + DEFER { + listener_.End(); + listener_.Clear(); + }; + + for (auto idx : handleIdxVec) { + rocksdb::TablePropertiesCollection props; + Status s = db_->GetPropertiesOfAllTables(handles_[idx], &props); + if (!s.ok()) { + if (compact_result_vec) { + compact_result_vec->push_back( + Status::Corruption(handles_[idx]->GetName() + + " LongestNotCompactionSstCompact GetPropertiesOfAllTables error: " + s.ToString())); + } + continue; + } + + // clear deleted sst file records because we use them in different cf + listener_.Clear(); + + // The main goal of compaction was reclaimed the disk space and removed + // the tombstone. It seems that compaction scheduler was unnecessary here when + // the live files was too few, Hard code to 1 here. + if (props.size() <= 1) { + // LOG(WARNING) << "LongestNotCompactionSstCompact " << handles_[idx]->GetName() << " only one file"; + if (compact_result_vec) { + compact_result_vec->push_back(Status::OK()); + } + continue; + } + + size_t max_files_to_compact = 1; + const StorageOptions& storageOptions = storage_->GetStorageOptions(); + if (props.size() / storageOptions.compact_param_.compact_every_num_of_files_ > max_files_to_compact) { + max_files_to_compact = props.size() / storageOptions.compact_param_.compact_every_num_of_files_; + } + + // sort it for convenience to traverse + std::vector>> props_vec(props.begin(), + props.end()); + std::sort(props_vec.begin(), props_vec.end(), [](const auto& a, const auto& b) { return a.first < b.first; }); + + int64_t now = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count(); + + auto force_compact_min_ratio = + static_cast(storageOptions.compact_param_.force_compact_min_delete_ratio_) / 100.0; + auto best_delete_min_ratio = static_cast(storageOptions.compact_param_.best_delete_min_ratio_) / 100.0; + + std::string best_filename; + double best_delete_ratio = 0; + uint64_t total_keys = 0, deleted_keys = 0; + rocksdb::Slice start_key, stop_key, best_start_key, best_stop_key; + Status compact_result; + auto metadata_iter = metadata.begin(); + for (const auto& iter : props_vec) { + auto file_path = iter.first; + + // maybe some sst files which occur in props_vec has been compacted in CompactRange, + // so these files should not be checked. + if (listener_.Contains(file_path)) { + continue; + } + + uint64_t file_creation_time = iter.second->file_creation_time; + if (file_creation_time == 0) { + // Fallback to the file Modification time to prevent repeatedly compacting the same file, + // file_creation_time is 0 which means the unknown condition in rocksdb + auto s = rocksdb::Env::Default()->GetFileModificationTime(file_path, &file_creation_time); + if (!s.ok()) { + LOG(WARNING) << handles_[idx]->GetName() << " Failed to get the file creation time: " << file_path << " in " + << handles_[idx]->GetName() << ", err: " << s.ToString(); + continue; + } + } + + while (metadata_iter != metadata.end() && file_path.substr(file_path.find_last_of('/')) != metadata_iter->name) { + ++metadata_iter; + } + if (metadata_iter == metadata.end()) { + // we reach here only in this case: some sst files has been created + // before calling GetPropertiesOfAllTables and after calling GetLiveFilesMetaData. + break; + } + + start_key = metadata_iter->smallestkey; + stop_key = metadata_iter->largestkey; + total_keys = metadata_iter->num_entries; + deleted_keys = metadata_iter->num_deletions; + ++metadata_iter; + + double delete_ratio = static_cast(deleted_keys) / static_cast(total_keys); + + // pick the file according to force compact policy + if (file_creation_time < + static_cast(now / 1000 - storageOptions.compact_param_.force_compact_file_age_seconds_) && + delete_ratio >= force_compact_min_ratio) { + compact_result = db_->CompactRange(default_compact_range_options_, &start_key, &stop_key); + if (--max_files_to_compact == 0) { + break; + } + continue; + } + + // don't compact the SST created in x `dont_compact_sst_created_in_seconds_`. + // the elems in props_vec has been sorted by filename, meaning that the file + // creation time of the subsequent sst file must be not less than this time. + if (file_creation_time > + static_cast(now / 1000 - storageOptions.compact_param_.dont_compact_sst_created_in_seconds_)) { + break; + } + + // pick the file which has highest delete ratio + if (total_keys != 0 && delete_ratio > best_delete_ratio) { + best_delete_ratio = delete_ratio; + best_filename = iter.first; + best_start_key = start_key; + start_key.clear(); + best_stop_key = stop_key; + stop_key.clear(); + } + } + + // if max_files_to_compact is zero, we should not compact this sst file. + if (best_delete_ratio > best_delete_min_ratio && !best_start_key.empty() && !best_stop_key.empty() && + max_files_to_compact != 0) { + compact_result = + db_->CompactRange(default_compact_range_options_, handles_[idx], &best_start_key, &best_stop_key); + } + + if (!compact_result.ok()) { + if (compact_result_vec) { + compact_result_vec->push_back( + Status::Corruption(handles_[idx]->GetName() + " Failed to do compaction " + compact_result.ToString())); + } + continue; + } + + if (compact_result_vec) { + compact_result_vec->push_back(Status::OK()); + } + } + return Status::OK(); +} + +Status Redis::SetSmallCompactionThreshold(uint64_t small_compaction_threshold) { + small_compaction_threshold_ = small_compaction_threshold; + return Status::OK(); +} + +Status Redis::SetSmallCompactionDurationThreshold(uint64_t small_compaction_duration_threshold) { + small_compaction_duration_threshold_ = small_compaction_duration_threshold; + return Status::OK(); +} + +Status Redis::UpdateSpecificKeyStatistics(const DataType& dtype, const std::string& key, uint64_t count) { + if ((statistics_store_->Capacity() != 0U) && (count != 0U) && (small_compaction_threshold_ != 0U)) { + KeyStatistics data; + std::string lkp_key; + lkp_key.append(1, DataTypeTag[static_cast(dtype)]); + lkp_key.append(key); + statistics_store_->Lookup(lkp_key, &data); + data.AddModifyCount(count); + statistics_store_->Insert(lkp_key, data); + AddCompactKeyTaskIfNeeded(dtype, key, data.ModifyCount(), data.AvgDuration()); + } + return Status::OK(); +} + +Status Redis::UpdateSpecificKeyDuration(const DataType& dtype, const std::string& key, uint64_t duration) { + if ((statistics_store_->Capacity() != 0U) && (duration != 0U) && (small_compaction_duration_threshold_ != 0U)) { + KeyStatistics data; + std::string lkp_key; + lkp_key.append(1, DataTypeTag[static_cast(dtype)]); + lkp_key.append(key); + statistics_store_->Lookup(lkp_key, &data); + data.AddDuration(duration); + statistics_store_->Insert(lkp_key, data); + AddCompactKeyTaskIfNeeded(dtype, key, data.ModifyCount(), data.AvgDuration()); + } + return Status::OK(); +} + +Status Redis::AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t total, uint64_t duration) { + if (total < small_compaction_threshold_ || duration < small_compaction_duration_threshold_) { + return Status::OK(); + } else { + std::string lkp_key(1, DataTypeTag[static_cast(dtype)]); + lkp_key.append(key); + storage_->AddBGTask({dtype, kCompactRange, {key}}); + statistics_store_->Remove(lkp_key); + } + return Status::OK(); +} + +Status Redis::SetOptions(const OptionType& option_type, const std::unordered_map& options) { + if (option_type == OptionType::kDB) { + return db_->SetDBOptions(options); + } + if (handles_.empty()) { + return db_->SetOptions(db_->DefaultColumnFamily(), options); + } + Status s; + for (auto handle : handles_) { + s = db_->SetOptions(handle, options); + if (!s.ok()) { + break; + } + } + return s; +} + +void Redis::GetRocksDBInfo(std::string& info, const char* prefix) { + std::ostringstream string_stream; + string_stream << "#" << prefix << "RocksDB" << "\r\n"; + + auto write_aggregated_int_property=[&](const Slice& property, const char *metric) { + uint64_t value = 0; + db_->GetAggregatedIntProperty(property, &value); + string_stream << prefix << metric << ':' << value << "\r\n"; + }; + + auto write_property=[&](const Slice& property, const char *metric) { + if (handles_.size() == 0) { + std::string value; + db_->GetProperty(db_->DefaultColumnFamily(), property, &value); + string_stream << prefix << metric << "_" << db_->DefaultColumnFamily()->GetName() << ':' << value << "\r\n"; + } else { + for (auto handle : handles_) { + std::string value; + db_->GetProperty(handle, property, &value); + string_stream << prefix << metric << "_" << handle->GetName() << ':' << value << "\r\n"; + } + } + }; + + auto write_ticker_count = [&](uint32_t tick_type, const char *metric) { + if (db_statistics_ == nullptr) { + return; + } + uint64_t count = db_statistics_->getTickerCount(tick_type); + string_stream << prefix << metric << ':' << count << "\r\n"; + }; + + auto mapToString=[&](const std::map& map_data, const char *prefix) { + for (const auto& kv : map_data) { + std::string str_data; + str_data += kv.first + ": " + kv.second + "\r\n"; + string_stream << prefix << str_data; + } + }; + + // memtables num + write_aggregated_int_property(rocksdb::DB::Properties::kNumImmutableMemTable, "num_immutable_mem_table"); + write_aggregated_int_property(rocksdb::DB::Properties::kNumImmutableMemTableFlushed, "num_immutable_mem_table_flushed"); + write_aggregated_int_property(rocksdb::DB::Properties::kMemTableFlushPending, "mem_table_flush_pending"); + write_aggregated_int_property(rocksdb::DB::Properties::kNumRunningFlushes, "num_running_flushes"); + + // compaction + write_aggregated_int_property(rocksdb::DB::Properties::kCompactionPending, "compaction_pending"); + write_aggregated_int_property(rocksdb::DB::Properties::kNumRunningCompactions, "num_running_compactions"); + + // background errors + write_aggregated_int_property(rocksdb::DB::Properties::kBackgroundErrors, "background_errors"); + + // memtables size + write_aggregated_int_property(rocksdb::DB::Properties::kCurSizeActiveMemTable, "cur_size_active_mem_table"); + write_aggregated_int_property(rocksdb::DB::Properties::kCurSizeAllMemTables, "cur_size_all_mem_tables"); + write_aggregated_int_property(rocksdb::DB::Properties::kSizeAllMemTables, "size_all_mem_tables"); + + // keys + write_aggregated_int_property(rocksdb::DB::Properties::kEstimateNumKeys, "estimate_num_keys"); + + // table readers mem + write_aggregated_int_property(rocksdb::DB::Properties::kEstimateTableReadersMem, "estimate_table_readers_mem"); + + // snapshot + write_aggregated_int_property(rocksdb::DB::Properties::kNumSnapshots, "num_snapshots"); + + // version + write_aggregated_int_property(rocksdb::DB::Properties::kNumLiveVersions, "num_live_versions"); + write_aggregated_int_property(rocksdb::DB::Properties::kCurrentSuperVersionNumber, "current_super_version_number"); + + // live data size + write_aggregated_int_property(rocksdb::DB::Properties::kEstimateLiveDataSize, "estimate_live_data_size"); + + // sst files + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"0", "num_files_at_level0"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"1", "num_files_at_level1"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"2", "num_files_at_level2"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"3", "num_files_at_level3"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"4", "num_files_at_level4"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"5", "num_files_at_level5"); + write_property(rocksdb::DB::Properties::kNumFilesAtLevelPrefix+"6", "num_files_at_level6"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"0", "compression_ratio_at_level0"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"1", "compression_ratio_at_level1"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"2", "compression_ratio_at_level2"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"3", "compression_ratio_at_level3"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"4", "compression_ratio_at_level4"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"5", "compression_ratio_at_level5"); + write_property(rocksdb::DB::Properties::kCompressionRatioAtLevelPrefix+"6", "compression_ratio_at_level6"); + write_aggregated_int_property(rocksdb::DB::Properties::kTotalSstFilesSize, "total_sst_files_size"); + write_aggregated_int_property(rocksdb::DB::Properties::kLiveSstFilesSize, "live_sst_files_size"); + + // pending compaction bytes + write_aggregated_int_property(rocksdb::DB::Properties::kEstimatePendingCompactionBytes, "estimate_pending_compaction_bytes"); + + // block cache + write_aggregated_int_property(rocksdb::DB::Properties::kBlockCacheCapacity, "block_cache_capacity"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlockCacheUsage, "block_cache_usage"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlockCachePinnedUsage, "block_cache_pinned_usage"); + + // blob files + write_aggregated_int_property(rocksdb::DB::Properties::kNumBlobFiles, "num_blob_files"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlobStats, "blob_stats"); + write_aggregated_int_property(rocksdb::DB::Properties::kTotalBlobFileSize, "total_blob_file_size"); + write_aggregated_int_property(rocksdb::DB::Properties::kLiveBlobFileSize, "live_blob_file_size"); + + write_aggregated_int_property(rocksdb::DB::Properties::kBlobCacheCapacity, "blob_cache_capacity"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlobCacheUsage, "blob_cache_usage"); + write_aggregated_int_property(rocksdb::DB::Properties::kBlobCachePinnedUsage, "blob_cache_pinned_usage"); + + //rocksdb ticker + { + // memtables num + write_ticker_count(rocksdb::Tickers::MEMTABLE_HIT, "memtable_hit"); + write_ticker_count(rocksdb::Tickers::MEMTABLE_MISS, "memtable_miss"); + + write_ticker_count(rocksdb::Tickers::BYTES_WRITTEN, "bytes_written"); + write_ticker_count(rocksdb::Tickers::BYTES_READ, "bytes_read"); + write_ticker_count(rocksdb::Tickers::ITER_BYTES_READ, "iter_bytes_read"); + write_ticker_count(rocksdb::Tickers::GET_HIT_L0, "get_hit_l0"); + write_ticker_count(rocksdb::Tickers::GET_HIT_L1, "get_hit_l1"); + write_ticker_count(rocksdb::Tickers::GET_HIT_L2_AND_UP, "get_hit_l2_and_up"); + + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_USEFUL, "bloom_filter_useful"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_FULL_POSITIVE, "bloom_filter_full_positive"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_FULL_TRUE_POSITIVE, "bloom_filter_full_true_positive"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_PREFIX_CHECKED, "bloom_filter_prefix_checked"); + write_ticker_count(rocksdb::Tickers::BLOOM_FILTER_PREFIX_USEFUL, "bloom_filter_prefix_useful"); + + // compaction + write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_NEWER_ENTRY, "compaction_key_drop_newer_entry"); + write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_OBSOLETE, "compaction_key_drop_obsolete"); + write_ticker_count(rocksdb::Tickers::COMPACTION_KEY_DROP_USER, "compaction_key_drop_user"); + write_ticker_count(rocksdb::Tickers::COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE, "compaction_optimized_del_drop_obsolete"); + write_ticker_count(rocksdb::Tickers::COMPACT_READ_BYTES, "compact_read_bytes"); + write_ticker_count(rocksdb::Tickers::COMPACT_WRITE_BYTES, "compact_write_bytes"); + write_ticker_count(rocksdb::Tickers::FLUSH_WRITE_BYTES, "flush_write_bytes"); + + // keys + write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_READ, "number_keys_read"); + write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_WRITTEN, "number_keys_written"); + write_ticker_count(rocksdb::Tickers::NUMBER_KEYS_UPDATED, "number_keys_updated"); + write_ticker_count(rocksdb::Tickers::NUMBER_OF_RESEEKS_IN_ITERATION, "number_of_reseeks_in_iteration"); + + write_ticker_count(rocksdb::Tickers::NUMBER_DB_SEEK, "number_db_seek"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_NEXT, "number_db_next"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_PREV, "number_db_prev"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_SEEK_FOUND, "number_db_seek_found"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_NEXT_FOUND, "number_db_next_found"); + write_ticker_count(rocksdb::Tickers::NUMBER_DB_PREV_FOUND, "number_db_prev_found"); + write_ticker_count(rocksdb::Tickers::LAST_LEVEL_READ_BYTES, "last_level_read_bytes"); + write_ticker_count(rocksdb::Tickers::LAST_LEVEL_READ_COUNT, "last_level_read_count"); + write_ticker_count(rocksdb::Tickers::NON_LAST_LEVEL_READ_BYTES, "non_last_level_read_bytes"); + write_ticker_count(rocksdb::Tickers::NON_LAST_LEVEL_READ_COUNT, "non_last_level_read_count"); + + // background errors + write_ticker_count(rocksdb::Tickers::STALL_MICROS, "stall_micros"); + + // sst files + write_ticker_count(rocksdb::Tickers::NO_FILE_OPENS, "no_file_opens"); + write_ticker_count(rocksdb::Tickers::NO_FILE_ERRORS, "no_file_errors"); + + // block cache + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_INDEX_HIT, "block_cache_index_hit"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_INDEX_MISS, "block_cache_index_miss"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_FILTER_HIT, "block_cache_filter_hit"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_FILTER_MISS, "block_cache_filter_miss"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_DATA_HIT, "block_cache_data_hit"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_DATA_MISS, "block_cache_data_miss"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_BYTES_READ, "block_cache_bytes_read"); + write_ticker_count(rocksdb::Tickers::BLOCK_CACHE_BYTES_WRITE, "block_cache_bytes_write"); + + // blob files + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_KEYS_WRITTEN, "blob_db_num_keys_written"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_KEYS_READ, "blob_db_num_keys_read"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BYTES_WRITTEN, "blob_db_bytes_written"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BYTES_READ, "blob_db_bytes_read"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_SEEK, "blob_db_num_seek"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_NEXT, "blob_db_num_next"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_NUM_PREV, "blob_db_num_prev"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BLOB_FILE_BYTES_WRITTEN, "blob_db_blob_file_bytes_written"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_BLOB_FILE_BYTES_READ, "blob_db_blob_file_bytes_read"); + + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_FILES, "blob_db_gc_num_files"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_NEW_FILES, "blob_db_gc_num_new_files"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_NUM_KEYS_RELOCATED, "blob_db_gc_num_keys_relocated"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_GC_BYTES_RELOCATED, "blob_db_gc_bytes_relocated"); + + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_MISS, "blob_db_cache_miss"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_HIT, "blob_db_cache_hit"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_BYTES_READ, "blob_db_cache_bytes_read"); + write_ticker_count(rocksdb::Tickers::BLOB_DB_CACHE_BYTES_WRITE, "blob_db_cache_bytes_write"); + } + // column family stats + std::map mapvalues; + db_->rocksdb::DB::GetMapProperty(rocksdb::DB::Properties::kCFStats,&mapvalues); + mapToString(mapvalues,prefix); + info.append(string_stream.str()); +} + +void Redis::SetWriteWalOptions(const bool is_wal_disable) { + default_write_options_.disableWAL = is_wal_disable; +} + +void Redis::SetCompactRangeOptions(const bool is_canceled) { + if (!default_compact_range_options_.canceled) { + default_compact_range_options_.canceled = new std::atomic(is_canceled); + } else { + default_compact_range_options_.canceled->store(is_canceled); + } +} + +Status Redis::GetProperty(const std::string& property, uint64_t* out) { + std::string value; + for (const auto& handle : handles_) { + db_->GetProperty(handle, property, &value); + *out += std::strtoull(value.c_str(), nullptr, 10); + } + return Status::OK(); +} + +Status Redis::ScanKeyNum(std::vector* key_infos) { + key_infos->resize(DataTypeNum); + rocksdb::Status s; + s = ScanStringsKeyNum(&((*key_infos)[0])); + if (!s.ok()) { + return s; + } + s = ScanHashesKeyNum(&((*key_infos)[1])); + if (!s.ok()) { + return s; + } + s = ScanListsKeyNum(&((*key_infos)[2])); + if (!s.ok()) { + return s; + } + s = ScanZsetsKeyNum(&((*key_infos)[3])); + if (!s.ok()) { + return s; + } + s = ScanSetsKeyNum(&((*key_infos)[4])); + if (!s.ok()) { + return s; + } + s = ScanStreamsKeyNum(&((*key_infos)[5])); + if (!s.ok()) { + return s; + } + + return Status::OK(); +} + +void Redis::ScanDatabase() { + ScanStrings(); + ScanHashes(); + ScanLists(); + ScanZsets(); + ScanSets(); +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/redis.h b/tools/pika_migrate/src/storage/src/redis.h new file mode 100644 index 0000000000..54c6e10d46 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis.h @@ -0,0 +1,544 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_REDIS_H_ +#define SRC_REDIS_H_ + +#include +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" + +#include "src/debug.h" +#include "src/lock_mgr.h" +#include "src/lru_cache.h" +#include "src/mutex_impl.h" +#include "src/type_iterator.h" +#include "src/custom_comparator.h" +#include "storage/storage.h" +#include "storage/storage_define.h" +#include "pstd/include/env.h" +#include "src/redis_streams.h" +#include "pstd/include/pika_codis_slot.h" + +#define SPOP_COMPACT_THRESHOLD_COUNT 500 +#define SPOP_COMPACT_THRESHOLD_DURATION (1000 * 1000) // 1000ms + +namespace storage { +using Status = rocksdb::Status; +using Slice = rocksdb::Slice; + +class Redis { + public: + Redis(Storage* storage, int32_t index); + virtual ~Redis(); + + rocksdb::DB* GetDB() { return db_; } + + struct KeyStatistics { + size_t window_size; + std::deque durations; + + uint64_t modify_count; + + KeyStatistics() : KeyStatistics(10) {} + + KeyStatistics(size_t size) : window_size(size + 2), modify_count(0) {} + + void AddDuration(uint64_t duration) { + durations.push_back(duration); + while (durations.size() > window_size) { + durations.pop_front(); + } + } + uint64_t AvgDuration() { + if (durations.size () < window_size) { + return 0; + } + uint64_t min = durations[0]; + uint64_t max = durations[0]; + uint64_t sum = 0; + for (auto duration : durations) { + if (duration < min) { + min = duration; + } + if (duration > max) { + max = duration; + } + sum += duration; + } + return (sum - max - min) / (durations.size() - 2); + } + void AddModifyCount(uint64_t count) { + modify_count += count; + } + uint64_t ModifyCount() { + return modify_count; + } + }; + + struct KeyStatisticsDurationGuard { + Redis* ctx; + std::string key; + uint64_t start_us; + DataType dtype; + KeyStatisticsDurationGuard(Redis* that, const DataType type, const std::string& key): ctx(that), key(key), start_us(pstd::NowMicros()), dtype(type) { + } + ~KeyStatisticsDurationGuard() { + uint64_t end_us = pstd::NowMicros(); + uint64_t duration = end_us > start_us ? end_us - start_us : 0; + ctx->UpdateSpecificKeyDuration(dtype, key, duration); + } + }; + int GetIndex() const {return index_;} + + Status SetOptions(const OptionType& option_type, const std::unordered_map& options); + void SetWriteWalOptions(const bool is_wal_disable); + void SetCompactRangeOptions(const bool is_canceled); + + // Common Commands + Status Open(const StorageOptions& storage_options, const std::string& db_path); + + virtual Status CompactRange(const rocksdb::Slice* begin, const rocksdb::Slice* end); + + virtual Status LongestNotCompactionSstCompact(const DataType& option_type, std::vector* compact_result_vec, + const ColumnFamilyType& type = kMetaAndData); + + virtual Status GetProperty(const std::string& property, uint64_t* out); + + Status ScanKeyNum(std::vector* key_info); + Status ScanStringsKeyNum(KeyInfo* key_info); + Status ScanHashesKeyNum(KeyInfo* key_info); + Status ScanListsKeyNum(KeyInfo* key_info); + Status ScanZsetsKeyNum(KeyInfo* key_info); + Status ScanSetsKeyNum(KeyInfo* key_info); + Status ScanStreamsKeyNum(KeyInfo* key_info); + + // Keys Commands + virtual Status StringsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status HashesExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status ListsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status ZsetsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status SetsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta = {}); + + virtual Status StringsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status HashesDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ListsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ZsetsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status SetsDel(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status StreamsDel(const Slice& key, std::string&& prefetch_meta = {}); + + virtual Status StringsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta = {}); + virtual Status HashesExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta = {}); + virtual Status ListsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta = {}); + virtual Status SetsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta = {}); + virtual Status ZsetsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta = {}); + + virtual Status StringsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status HashesPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ListsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status ZsetsPersist(const Slice& key, std::string&& prefetch_meta = {}); + virtual Status SetsPersist(const Slice& key, std::string&& prefetch_meta = {}); + + virtual Status StringsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status HashesTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status ListsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status ZsetsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta = {}); + virtual Status SetsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta = {}); + + // Strings Commands + Status Append(const Slice& key, const Slice& value, int32_t* ret, int64_t* expired_timestamp_millsec, std::string& out_new_value); + Status BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range); + Status BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string &value_to_dest, int64_t* ret); + Status Decrby(const Slice& key, int64_t value, int64_t* ret); + Status Get(const Slice& key, std::string* value); + Status HyperloglogGet(const Slice& key, std::string* value); + Status MGet(const Slice& key, std::string* value); + Status GetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec); + Status MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec); + Status GetBit(const Slice& key, int64_t offset, int32_t* ret); + Status Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret); + Status GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, + std::string* ret, std::string* value, int64_t* ttl_millsec); + Status GetSet(const Slice& key, const Slice& value, std::string* old_value); + Status Incrby(const Slice& key, int64_t value, int64_t* ret, int64_t* expired_timestamp_millsec); + Status Incrbyfloat(const Slice& key, const Slice& value, std::string* ret, int64_t* expired_timestamp_sec); + Status MSet(const std::vector& kvs); + Status MSetnx(const std::vector& kvs, int32_t* ret); + Status Set(const Slice& key, const Slice& value); + Status HyperloglogSet(const Slice& key, const Slice& value); + Status Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec = 0); + Status SetBit(const Slice& key, int64_t offset, int32_t value, int32_t* ret); + Status Setex(const Slice& key, const Slice& value, int64_t ttl_millsec); + Status Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec = 0); + Status Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int64_t ttl_millsec = 0); + Status Delvx(const Slice& key, const Slice& value, int32_t* ret); + Status Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret); + Status Strlen(const Slice& key, int32_t* len); + + Status BitPos(const Slice& key, int32_t bit, int64_t* ret); + Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret); + Status BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret); + Status PKSetexAt(const Slice& key, const Slice& value, int64_t time_stamp_millsec_); + + Status Exists(const Slice& key); + Status Del(const Slice& key); + Status Expire(const Slice& key, int64_t ttl_millsec); + Status Expireat(const Slice& key, int64_t timestamp_millsec); + Status Persist(const Slice& key); + Status TTL(const Slice& key, int64_t* ttl_millsec); + Status PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, std::vector* remove_keys, const int64_t& max_count); + + Status GetType(const Slice& key, enum DataType& type); + Status IsExist(const Slice& key); + // Hash Commands + Status HDel(const Slice& key, const std::vector& fields, int32_t* ret); + Status HExists(const Slice& key, const Slice& field); + Status HGet(const Slice& key, const Slice& field, std::string* value); + Status HGetall(const Slice& key, std::vector* fvs); + Status HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl_millsec); + Status HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret); + Status HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value); + Status HKeys(const Slice& key, std::vector* fields); + Status HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); + Status HMGet(const Slice& key, const std::vector& fields, std::vector* vss); + Status HMSet(const Slice& key, const std::vector& fvs); + Status HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res); + Status HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret); + Status HVals(const Slice& key, std::vector* values); + Status HStrlen(const Slice& key, const Slice& field, int32_t* len); + Status HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* field_values, int64_t* next_cursor); + Status HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, + std::vector* field_values, std::string* next_field); + Status PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, + int32_t limit, std::vector* field_values, std::string* next_field); + Status PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, const Slice& pattern, + int32_t limit, std::vector* field_values, std::string* next_field); + + Status SetMaxCacheStatisticKeys(size_t max_cache_statistic_keys); + Status SetSmallCompactionThreshold(uint64_t small_compaction_threshold); + Status SetSmallCompactionDurationThreshold(uint64_t small_compaction_duration_threshold); + + + std::vector GetStringCFHandles() { return {handles_[kMetaCF]}; } + + std::vector GetHashCFHandles() { + return {handles_.begin() + kMetaCF, handles_.begin() + kHashesDataCF + 1}; + } + + std::vector GetListCFHandles() { + return {handles_.begin() + kMetaCF, handles_.begin() + kListsDataCF + 1}; + } + + std::vector GetSetCFHandles() { + return {handles_.begin() + kMetaCF, handles_.begin() + kSetsDataCF + 1}; + } + + std::vector GetZsetCFHandles() { + return {handles_.begin() + kMetaCF, handles_.begin() + kZsetsScoreCF + 1}; + } + + std::vector GetStreamCFHandles() { + return {handles_.begin() + kMetaCF, handles_.end()}; + } + void GetRocksDBInfo(std::string &info, const char *prefix); + + // Sets Commands + Status SAdd(const Slice& key, const std::vector& members, int32_t* ret); + Status SCard(const Slice& key, int32_t* ret, std::string&& prefetch_meta = {}); + Status SDiff(const std::vector& keys, std::vector* members); + Status SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SInter(const std::vector& keys, std::vector* members); + Status SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SIsmember(const Slice& key, const Slice& member, int32_t* ret); + Status SMembers(const Slice& key, std::vector* members); + Status SMembersWithTTL(const Slice& key, std::vector* members, int64_t* ttl_millsec); + Status SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret); + Status SPop(const Slice& key, std::vector* members, int64_t cnt); + Status SRandmember(const Slice& key, int32_t count, std::vector* members); + Status SRem(const Slice& key, const std::vector& members, int32_t* ret); + Status SUnion(const std::vector& keys, std::vector* members); + Status SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret); + Status SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* members, int64_t* next_cursor); + Status AddAndGetSpopCount(const std::string& key, uint64_t* count); + Status ResetSpopCount(const std::string& key); + + // Lists commands + Status LIndex(const Slice& key, int64_t index, std::string* element); + Status LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, + const std::string& value, int64_t* ret); + Status LLen(const Slice& key, uint64_t* len, std::string&& prefetch_meta = {}); + Status LPop(const Slice& key, int64_t count, std::vector* elements); + Status LPush(const Slice& key, const std::vector& values, uint64_t* ret); + Status LPushx(const Slice& key, const std::vector& values, uint64_t* len); + Status LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret); + Status LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t* ttl_millsec); + Status LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret); + Status LSet(const Slice& key, int64_t index, const Slice& value); + Status LTrim(const Slice& key, int64_t start, int64_t stop); + Status RPop(const Slice& key, int64_t count, std::vector* elements); + Status RPoplpush(const Slice& source, const Slice& destination, std::string* element); + Status RPush(const Slice& key, const std::vector& values, uint64_t* ret); + Status RPushx(const Slice& key, const std::vector& values, uint64_t* len); + + // Zsets Commands + Status ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret); + Status ZCard(const Slice& key, int32_t* card, std::string&& prefetch_meta = {}); + Status ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); + Status ZIncrby(const Slice& key, const Slice& member, double increment, double* ret); + Status ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); + Status ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, int64_t* ttl_millsec); + Status ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, + int64_t offset, std::vector* score_members); + Status ZRank(const Slice& key, const Slice& member, int32_t* rank); + Status ZRem(const Slice& key, const std::vector& members, int32_t* ret); + Status ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret); + Status ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret); + Status ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members); + Status ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, int64_t count, + int64_t offset, std::vector* score_members); + Status ZRevrank(const Slice& key, const Slice& member, int32_t* rank); + Status ZScore(const Slice& key, const Slice& member, double* score); + Status ZGetAll(const Slice& key, double weight, std::map* value_to_dest); + Status ZUnionstore(const Slice& destination, const std::vector& keys, const std::vector& weights, + AGGREGATE agg, std::map& value_to_dest, int32_t* ret); + Status ZInterstore(const Slice& destination, const std::vector& keys, const std::vector& weights, + AGGREGATE agg, std::vector& value_to_dest, int32_t* ret); + Status ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + std::vector* members); + Status ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret); + Status ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret); + Status ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* score_members, int64_t* next_cursor); + Status ZPopMax(const Slice& key, int64_t count, std::vector* score_members); + Status ZPopMin(const Slice& key, int64_t count, std::vector* score_members); + + //===--------------------------------------------------------------------===// + // Commands + //===--------------------------------------------------------------------===// + Status XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args); + Status XDel(const Slice& key, const std::vector& ids, int32_t& count); + Status XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count); + Status XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages, std::string&& prefetch_meta = {}); + Status XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages); + Status XLen(const Slice& key, int32_t& len); + Status XRead(const StreamReadGroupReadArgs& args, std::vector>& results, + std::vector& reserved_keys); + Status XInfo(const Slice& key, StreamInfoResult& result); + Status ScanStream(const ScanStreamOptions& option, std::vector& id_messages, std::string& next_field, + rocksdb::ReadOptions& read_options); + // get and parse the stream meta if found + // @return ok only when the stream meta exists + Status GetStreamMeta(StreamMetaValue& tream_meta, const rocksdb::Slice& key, rocksdb::ReadOptions& read_options, std::string&& prefetch_meta = {}); + + // Before calling this function, the caller should ensure that the ids are valid + Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& ids, rocksdb::ReadOptions& read_options); + + // Before calling this function, the caller should ensure that the ids are valid + Status DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& serialized_ids, rocksdb::ReadOptions& read_options); + + Status TrimStream(int32_t& count, StreamMetaValue& stream_meta, const rocksdb::Slice& key, StreamAddTrimArgs& args, + rocksdb::ReadOptions& read_options); + + void ScanDatabase(); + void ScanStrings(); + void ScanHashes(); + void ScanLists(); + void ScanZsets(); + void ScanSets(); + + TypeIterator* CreateIterator(const DataType& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) { + return CreateIterator(DataTypeTag[static_cast(type)], pattern, lower_bound, upper_bound); + } + + TypeIterator* CreateIterator(const char& type, const std::string& pattern, const Slice* lower_bound, const Slice* upper_bound) { + rocksdb::ReadOptions options; + options.fill_cache = false; + options.iterate_lower_bound = lower_bound; + options.iterate_upper_bound = upper_bound; + switch (type) { + case 'k': + return new StringsIterator(options, db_, handles_[kMetaCF], pattern); + break; + case 'h': + return new HashesIterator(options, db_, handles_[kMetaCF], pattern); + break; + case 's': + return new SetsIterator(options, db_, handles_[kMetaCF], pattern); + break; + case 'l': + return new ListsIterator(options, db_, handles_[kMetaCF], pattern); + break; + case 'z': + return new ZsetsIterator(options, db_, handles_[kMetaCF], pattern); + break; + case 'x': + return new StreamsIterator(options, db_, handles_[kMetaCF], pattern); + break; + case 'a': + return new AllIterator(options, db_, handles_[kMetaCF], pattern); + default: + LOG(WARNING) << "Invalid datatype to create iterator"; + return nullptr; + } + return nullptr; + } + + enum DataType GetMetaValueType(const std::string &meta_value) { + DataType meta_type = static_cast(static_cast(meta_value[0])); + return meta_type; + } + + inline bool ExpectedMetaValue(enum DataType type, const std::string &meta_value) { + auto meta_type = static_cast(static_cast(meta_value[0])); + if (type == meta_type) { + return true; + } + return false; + } + + inline bool ExpectedStale(const std::string &meta_value) { + auto meta_type = static_cast(static_cast(meta_value[0])); + switch (meta_type) { + case DataType::kZSets: + case DataType::kSets: + case DataType::kHashes: { + ParsedBaseMetaValue parsed_meta_value(meta_value); + return (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0); + } + case DataType::kLists: { + ParsedListsMetaValue parsed_lists_meta_value(meta_value); + return (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0); + } + case DataType::kStrings: { + ParsedStringsValue parsed_strings_value(meta_value); + return parsed_strings_value.IsStale(); + } + case DataType::kStreams: { + StreamMetaValue stream_meta_value; + return stream_meta_value.length() == 0; + } + default: { + return false; + } + } + } + +private: + Status GenerateStreamID(const StreamMetaValue& stream_meta, StreamAddTrimArgs& args); + + Status StreamScanRange(const Slice& key, const uint64_t version, const Slice& id_start, const std::string& id_end, + const Slice& pattern, int32_t limit, std::vector& id_messages, std::string& next_id, + rocksdb::ReadOptions& read_options); + Status StreamReScanRange(const Slice& key, const uint64_t version, const Slice& id_start, const std::string& id_end, + const Slice& pattern, int32_t limit, std::vector& id_values, std::string& next_id, + rocksdb::ReadOptions& read_options); + + struct TrimRet { + // the count of deleted messages + int32_t count{0}; + // the next field after trim + std::string next_field; + // the max deleted field, will be empty if no message is deleted + std::string max_deleted_field; + }; + + Status TrimByMaxlen(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options); + + Status TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options); + + inline Status SetFirstID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, rocksdb::ReadOptions& read_options); + + inline Status SetLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, rocksdb::ReadOptions& read_options); + + inline Status SetFirstOrLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, bool is_set_first, + rocksdb::ReadOptions& read_options); + + class OBDSstListener : public rocksdb::EventListener { + public: + void OnTableFileDeleted(const rocksdb::TableFileDeletionInfo& info) override { + std::lock_guard lk(mu_); + if (!running_) { + return; + } + deletedFileNameInOBDCompact_.emplace(info.file_path); + } + + void Clear() { + std::lock_guard lk(mu_); + deletedFileNameInOBDCompact_.clear(); + } + + bool Contains(const std::string& str) { + std::lock_guard lk(mu_); + return deletedFileNameInOBDCompact_.find(str) != deletedFileNameInOBDCompact_.end(); + } + + // turn recording on/off + void Start() { + std::lock_guard lk(mu_); + running_ = true; + } + void End() { + std::lock_guard lk(mu_); + running_ = false; + } + + std::mutex mu_; + bool running_ = false; + // deleted file(.sst) name in OBD compacting + std::set deletedFileNameInOBDCompact_; + }; + +public: + inline rocksdb::WriteOptions GetDefaultWriteOptions() const { return default_write_options_; } + +private: + int32_t index_ = 0; + Storage* const storage_; + std::shared_ptr lock_mgr_; + rocksdb::DB* db_ = nullptr; + std::shared_ptr db_statistics_ = nullptr; + //TODO(wangshaoyi): seperate env for each rocksdb instance + // rocksdb::Env* env_ = nullptr; + + std::vector handles_; + rocksdb::WriteOptions default_write_options_; + rocksdb::ReadOptions default_read_options_; + rocksdb::CompactRangeOptions default_compact_range_options_; + std::atomic in_compact_flag_; + OBDSstListener listener_; // listening created sst file while compacting in OBD-compact + + // For Scan + std::unique_ptr> scan_cursors_store_; + std::unique_ptr> spop_counts_store_; + + Status GetScanStartPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, std::string* start_point); + Status StoreScanNextPoint(const DataType& type, const Slice& key, const Slice& pattern, int64_t cursor, const std::string& next_point); + + // For Statistics + std::atomic_uint64_t small_compaction_threshold_; + std::atomic_uint64_t small_compaction_duration_threshold_; + std::unique_ptr> statistics_store_; + + Status UpdateSpecificKeyStatistics(const DataType& dtype, const std::string& key, uint64_t count); + Status UpdateSpecificKeyDuration(const DataType& dtype, const std::string& key, uint64_t duration); + Status AddCompactKeyTaskIfNeeded(const DataType& dtype, const std::string& key, uint64_t count, uint64_t duration); +}; + +} // namespace storage +#endif // SRC_REDIS_H_ diff --git a/tools/pika_migrate/src/storage/src/redis_hashes.cc b/tools/pika_migrate/src/storage/src/redis_hashes.cc new file mode 100644 index 0000000000..1a947c07e7 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_hashes.cc @@ -0,0 +1,1398 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "src/redis.h" + +#include + +#include +#include + +#include "pstd/include/pika_codis_slot.h" +#include "src/base_filter.h" +#include "src/scope_record_lock.h" +#include "src/scope_snapshot.h" +#include "src/base_data_key_format.h" +#include "src/base_data_value_format.h" +#include "storage/util.h" + +namespace storage { +Status Redis::ScanHashesKeyNum(KeyInfo* key_info) { + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t ttl_sum = 0; + uint64_t invaild_keys = 0; + + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + + pstd::TimeType curtime = pstd::NowMillis(); + + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + if (!ExpectedMetaValue(DataType::kHashes, iter->value().ToString())) { + continue; + } + ParsedHashesMetaValue parsed_hashes_meta_value(iter->value()); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + invaild_keys++; + } else { + keys++; + if (!parsed_hashes_meta_value.IsPermanentSurvival()) { + expires++; + ttl_sum += parsed_hashes_meta_value.Etime() - curtime; + } + } + } + delete iter; + + key_info->keys = keys; + key_info->expires = expires; + key_info->avg_ttl = (expires != 0) ? ttl_sum / expires : 0; + key_info->invaild_keys = invaild_keys; + return Status::OK(); +} + +Status Redis::HDel(const Slice& key, const std::vector& fields, int32_t* ret) { + uint32_t statistic = 0; + std::vector filtered_fields; + std::unordered_set field_set; + for (const auto & iter : fields) { + const std::string& field = iter; + if (field_set.find(field) == field_set.end()) { + field_set.insert(field); + filtered_fields.push_back(iter); + } + } + + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + int32_t del_cnt = 0; + uint64_t version = 0; + ScopeRecordLock l(lock_mgr_, key); + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + *ret = 0; + return Status::OK(); + } else { + std::string data_value; + version = parsed_hashes_meta_value.Version(); + for (const auto& field : filtered_fields) { + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(read_options, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + del_cnt++; + statistic++; + batch.Delete(handles_[kHashesDataCF], hashes_data_key.Encode()); + } else if (s.IsNotFound()) { + continue; + } else { + return s; + } + } + *ret = del_cnt; + if (!parsed_hashes_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else if (s.IsNotFound()) { + *ret = 0; + return Status::OK(); + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + return s; +} + +Status Redis::HExists(const Slice& key, const Slice& field) { + std::string value; + return HGet(key, field, &value); +} + +Status Redis::HGet(const Slice& key, const Slice& field, std::string* value) { + std::string meta_value; + uint64_t version = 0; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey data_key(key, version, field); + s = db_->Get(read_options, handles_[kHashesDataCF], data_key.Encode(), value); + if (s.ok()) { + ParsedBaseDataValue parsed_internal_value(value); + parsed_internal_value.StripSuffix(); + } + } + } + return s; +} + +Status Redis::HGetall(const Slice& key, std::vector* fvs) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, ""); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + ParsedBaseDataValue parsed_internal_value(iter->value()); + fvs->push_back({parsed_hashes_data_key.field().ToString(), parsed_internal_value.UserValue().ToString()}); + } + delete iter; + } + } + return s; +} + +Status Redis::HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl_millsec) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + // ttl + *ttl_millsec = parsed_hashes_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, ""); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + ParsedBaseDataValue parsed_internal_value(iter->value()); + fvs->push_back({parsed_hashes_data_key.field().ToString(), parsed_internal_value.UserValue().ToString()}); + } + delete iter; + } + } + return s; +} + +Status Redis::HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret) { + *ret = 0; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + uint32_t statistic = 0; + std::string old_value; + std::string meta_value; + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char value_buf[32] = {0}; + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + version = parsed_hashes_meta_value.UpdateVersion(); + parsed_hashes_meta_value.SetCount(1); + parsed_hashes_meta_value.SetEtime(0); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + HashesDataKey hashes_data_key(key, version, field); + Int64ToStr(value_buf, 32, value); + BaseDataValue internal_value(value_buf); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = value; + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &old_value); + if (s.ok()) { + ParsedBaseDataValue parsed_internal_value(&old_value); + parsed_internal_value.StripSuffix(); + int64_t ival = 0; + if (StrToInt64(old_value.data(), old_value.size(), &ival) == 0) { + return Status::Corruption("hash value is not an integer"); + } + if ((value >= 0 && LLONG_MAX - value < ival) || (value < 0 && LLONG_MIN - value > ival)) { + return Status::InvalidArgument("Overflow"); + } + *ret = ival + value; + Int64ToStr(value_buf, 32, *ret); + BaseDataValue internal_value(value_buf); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + statistic++; + } else if (s.IsNotFound()) { + Int64ToStr(value_buf, 32, value); + if (!parsed_hashes_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("hash size overflow"); + } + BaseDataValue internal_value(value_buf); + parsed_hashes_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = value; + } else { + return s; + } + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, 1); + HashesMetaValue hashes_meta_value(DataType::kHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + HashesDataKey hashes_data_key(key, version, field); + + Int64ToStr(value_buf, 32, value); + BaseDataValue internal_value(value_buf); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = value; + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + return s; +} + +Status Redis::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value) { + new_value->clear(); + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + uint32_t statistic = 0; + std::string meta_value; + std::string old_value_str; + long double long_double_by; + + if (StrToLongDouble(by.data(), by.size(), &long_double_by) == -1) { + return Status::Corruption("value is not a vaild float"); + } + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + version = parsed_hashes_meta_value.UpdateVersion(); + parsed_hashes_meta_value.SetCount(1); + parsed_hashes_meta_value.SetEtime(0); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + HashesDataKey hashes_data_key(key, version, field); + + LongDoubleToStr(long_double_by, new_value); + BaseDataValue inter_value(*new_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &old_value_str); + if (s.ok()) { + long double total; + long double old_value; + ParsedBaseDataValue parsed_internal_value(&old_value_str); + parsed_internal_value.StripSuffix(); + if (StrToLongDouble(old_value_str.data(), old_value_str.size(), &old_value) == -1) { + return Status::Corruption("value is not a vaild float"); + } + + total = old_value + long_double_by; + if (LongDoubleToStr(total, new_value) == -1) { + return Status::InvalidArgument("Overflow"); + } + BaseDataValue internal_value(*new_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + statistic++; + } else if (s.IsNotFound()) { + LongDoubleToStr(long_double_by, new_value); + if (!parsed_hashes_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(1); + BaseDataValue internal_value(*new_value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + } else { + return s; + } + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, 1); + HashesMetaValue hashes_meta_value(DataType::kHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + + HashesDataKey hashes_data_key(key, version, field); + LongDoubleToStr(long_double_by, new_value); + BaseDataValue internal_value(*new_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + return s; +} + +Status Redis::HKeys(const Slice& key, std::vector* fields) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, ""); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + fields->push_back(parsed_hashes_data_key.field().ToString()); + } + delete iter; + } + } + return s; +} + +Status Redis::HLen(const Slice& key, int32_t* ret, std::string&& prefetch_meta) { + *ret = 0; + Status s; + std::string meta_value(std::move(prefetch_meta)); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + *ret = 0; + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + *ret = parsed_hashes_meta_value.Count(); + } + } else if (s.IsNotFound()) { + *ret = 0; + } + return s; +} + +Status Redis::HMGet(const Slice& key, const std::vector& fields, std::vector* vss) { + vss->clear(); + + uint64_t version = 0; + bool is_stale = false; + std::string value; + std::string meta_value; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if ((is_stale = parsed_hashes_meta_value.IsStale()) || parsed_hashes_meta_value.Count() == 0) { + for (size_t idx = 0; idx < fields.size(); ++idx) { + vss->push_back({std::string(), Status::NotFound()}); + } + return Status::NotFound(is_stale ? "Stale" : ""); + } else { + version = parsed_hashes_meta_value.Version(); + for (const auto& field : fields) { + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(read_options, handles_[kHashesDataCF], hashes_data_key.Encode(), &value); + if (s.ok()) { + ParsedBaseDataValue parsed_internal_value(&value); + parsed_internal_value.StripSuffix(); + vss->push_back({value, Status::OK()}); + } else if (s.IsNotFound()) { + vss->push_back({std::string(), Status::NotFound()}); + } else { + vss->clear(); + return s; + } + } + } + return Status::OK(); + } else if (s.IsNotFound()) { + for (size_t idx = 0; idx < fields.size(); ++idx) { + vss->push_back({std::string(), Status::NotFound()}); + } + } + return s; +} + +Status Redis::HMSet(const Slice& key, const std::vector& fvs) { + uint32_t statistic = 0; + std::unordered_set fields; + std::vector filtered_fvs; + for (auto iter = fvs.rbegin(); iter != fvs.rend(); ++iter) { + std::string field = iter->field; + if (fields.find(field) == fields.end()) { + fields.insert(field); + filtered_fvs.push_back(*iter); + } + } + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + version = parsed_hashes_meta_value.InitialMetaValue(); + if (!parsed_hashes_meta_value.check_set_count(static_cast(filtered_fvs.size()))) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.SetCount(static_cast(filtered_fvs.size())); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + BaseDataValue inter_value(fv.value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } + } else { + int32_t count = 0; + std::string data_value; + version = parsed_hashes_meta_value.Version(); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + BaseDataValue inter_value(fv.value); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + statistic++; + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } else if (s.IsNotFound()) { + count++; + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } else { + return s; + } + } + if (!parsed_hashes_meta_value.CheckModifyCount(count)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(count); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, filtered_fvs.size()); + HashesMetaValue hashes_meta_value(DataType::kHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + for (const auto& fv : filtered_fvs) { + HashesDataKey hashes_data_key(key, version, fv.field); + BaseDataValue inter_value(fv.value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), inter_value.Encode()); + } + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + return s; +} + +Status Redis::HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + uint32_t statistic = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + version = parsed_hashes_meta_value.InitialMetaValue(); + parsed_hashes_meta_value.SetCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + HashesDataKey data_key(key, version, field); + BaseDataValue internal_value(value); + batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); + *res = 1; + } else { + version = parsed_hashes_meta_value.Version(); + std::string data_value; + HashesDataKey hashes_data_key(key, version, field); + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + *res = 0; + if (data_value == value.ToString()) { + return Status::OK(); + } else { + BaseDataValue internal_value(value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + statistic++; + } + } else if (s.IsNotFound()) { + if (!parsed_hashes_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(1); + BaseDataValue internal_value(value); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *res = 1; + } else { + return s; + } + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, 1); + HashesMetaValue hashes_meta_value(DataType::kHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + HashesDataKey data_key(key, version, field); + BaseDataValue internal_value(value); + batch.Put(handles_[kHashesDataCF], data_key.Encode(), internal_value.Encode()); + *res = 1; + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + return s; +} + +Status Redis::HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret) { + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + BaseDataValue internal_value(value); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + char meta_value_buf[4] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + version = parsed_hashes_meta_value.InitialMetaValue(); + parsed_hashes_meta_value.SetCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + HashesDataKey hashes_data_key(key, version, field); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = 1; + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, field); + std::string data_value; + s = db_->Get(default_read_options_, handles_[kHashesDataCF], hashes_data_key.Encode(), &data_value); + if (s.ok()) { + *ret = 0; + } else if (s.IsNotFound()) { + if (!parsed_hashes_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("hash size overflow"); + } + parsed_hashes_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = 1; + } else { + return s; + } + } + } else if (s.IsNotFound()) { + EncodeFixed32(meta_value_buf, 1); + HashesMetaValue hashes_meta_value(DataType::kHashes, Slice(meta_value_buf, 4)); + version = hashes_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), hashes_meta_value.Encode()); + HashesDataKey hashes_data_key(key, version, field); + batch.Put(handles_[kHashesDataCF], hashes_data_key.Encode(), internal_value.Encode()); + *ret = 1; + } else { + return s; + } + return db_->Write(default_write_options_, &batch); +} + +Status Redis::HVals(const Slice& key, std::vector* values) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_key(key, version, ""); + Slice prefix = hashes_data_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedBaseDataValue parsed_internal_value(iter->value()); + values->push_back(parsed_internal_value.UserValue().ToString()); + } + delete iter; + } + } + return s; +} + +Status Redis::HStrlen(const Slice& key, const Slice& field, int32_t* len) { + std::string value; + Status s = HGet(key, field, &value); + if (s.ok()) { + *len = static_cast(value.size()); + } else { + *len = 0; + } + return s; +} + +Status Redis::HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* field_values, int64_t* next_cursor) { + *next_cursor = 0; + field_values->clear(); + if (cursor < 0) { + *next_cursor = 0; + return Status::OK(); + } + + int64_t rest = count; + int64_t step_length = count; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + *next_cursor = 0; + return Status::NotFound(); + } else { + std::string sub_field; + std::string start_point; + uint64_t version = parsed_hashes_meta_value.Version(); + s = GetScanStartPoint(DataType::kHashes, key, pattern, cursor, &start_point); + if (s.IsNotFound()) { + cursor = 0; + if (isTailWildcard(pattern)) { + start_point = pattern.substr(0, pattern.size() - 1); + } + } + if (isTailWildcard(pattern)) { + sub_field = pattern.substr(0, pattern.size() - 1); + } + + HashesDataKey hashes_data_prefix(key, version, sub_field); + HashesDataKey hashes_start_data_key(key, version, start_point); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(hashes_start_data_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); + iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + std::string field = parsed_hashes_data_key.field().ToString(); + if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { + ParsedBaseDataValue parsed_internal_value(iter->value()); + field_values->emplace_back(field, parsed_internal_value.UserValue().ToString()); + } + rest--; + } + + if (iter->Valid() && (iter->key().compare(prefix) <= 0 || iter->key().starts_with(prefix))) { + *next_cursor = cursor + step_length; + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + std::string next_field = parsed_hashes_data_key.field().ToString(); + StoreScanNextPoint(DataType::kHashes, key, pattern, *next_cursor, next_field); + } else { + *next_cursor = 0; + } + delete iter; + } + } else { + *next_cursor = 0; + return s; + } + return Status::OK(); +} + +Status Redis::HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, + std::vector* field_values, std::string* next_field) { + next_field->clear(); + field_values->clear(); + + int64_t rest = count; + std::string meta_value; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + *next_field = ""; + return Status::NotFound(); + } else { + uint64_t version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_prefix(key, version, Slice()); + HashesDataKey hashes_start_data_key(key, version, start_field); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(hashes_start_data_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); + iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + std::string field = parsed_hashes_data_key.field().ToString(); + if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { + ParsedBaseDataValue parsed_value(iter->value()); + field_values->emplace_back(field, parsed_value.UserValue().ToString()); + } + rest--; + } + + if (iter->Valid() && iter->key().starts_with(prefix)) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + *next_field = parsed_hashes_data_key.field().ToString(); + } else { + *next_field = ""; + } + delete iter; + } + } else { + *next_field = ""; + return s; + } + return Status::OK(); +} + +Status Redis::PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, + const Slice& pattern, int32_t limit, std::vector* field_values, + std::string* next_field) { + next_field->clear(); + field_values->clear(); + + int64_t remain = limit; + std::string meta_value; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + bool start_no_limit = field_start.compare("") == 0; + bool end_no_limit = field_end.empty(); + + if (!start_no_limit && !end_no_limit && (field_start.compare(field_end) > 0)) { + return Status::InvalidArgument("error in given range"); + } + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_hashes_meta_value.Version(); + HashesDataKey hashes_data_prefix(key, version, Slice()); + HashesDataKey hashes_start_data_key(key, version, field_start); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->Seek(start_no_limit ? prefix : hashes_start_data_key.Encode()); + iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Next()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + std::string field = parsed_hashes_data_key.field().ToString(); + if (!end_no_limit && field.compare(field_end) > 0) { + break; + } + if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { + ParsedBaseDataValue parsed_internal_value(iter->value()); + field_values->push_back({field, parsed_internal_value.UserValue().ToString()}); + } + remain--; + } + + if (iter->Valid() && iter->key().starts_with(prefix)) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + if (end_no_limit || parsed_hashes_data_key.field().compare(field_end) <= 0) { + *next_field = parsed_hashes_data_key.field().ToString(); + } + } + delete iter; + } + } else { + return s; + } + return Status::OK(); +} + +Status Redis::PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, + const Slice& pattern, int32_t limit, std::vector* field_values, + std::string* next_field) { + next_field->clear(); + field_values->clear(); + + int64_t remain = limit; + std::string meta_value; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + bool start_no_limit = field_start.compare("") == 0; + bool end_no_limit = field_end.empty(); + + if (!start_no_limit && !end_no_limit && (field_start.compare(field_end) < 0)) { + return Status::InvalidArgument("error in given range"); + } + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale() || parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_hashes_meta_value.Version(); + uint64_t start_key_version = start_no_limit ? version + 1 : version; + std::string start_key_field = start_no_limit ? "" : field_start.ToString(); + HashesDataKey hashes_data_prefix(key, version, Slice()); + HashesDataKey hashes_start_data_key(key, start_key_version, start_key_field); + std::string prefix = hashes_data_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kHashes, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kHashesDataCF]); + for (iter->SeekForPrev(hashes_start_data_key.Encode().ToString()); + iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Prev()) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + std::string field = parsed_hashes_data_key.field().ToString(); + if (!end_no_limit && field.compare(field_end) < 0) { + break; + } + if (StringMatch(pattern.data(), pattern.size(), field.data(), field.size(), 0) != 0) { + ParsedBaseDataValue parsed_value(iter->value()); + field_values->push_back({field, parsed_value.UserValue().ToString()}); + } + remain--; + } + + if (iter->Valid() && iter->key().starts_with(prefix)) { + ParsedHashesDataKey parsed_hashes_data_key(iter->key()); + if (end_no_limit || parsed_hashes_data_key.field().compare(field_end) >= 0) { + *next_field = parsed_hashes_data_key.field().ToString(); + } + } + delete iter; + } + } else { + return s; + } + return Status::OK(); +} + +Status Redis::HashesExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } + + if (ttl_millsec > 0) { + parsed_hashes_meta_value.SetRelativeTimestamp(ttl_millsec); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } else { + parsed_hashes_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +Status Redis::HashesDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint32_t statistic = parsed_hashes_meta_value.Count(); + parsed_hashes_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + } + } + return s; +} + +Status Redis::HashesExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + if (timestamp_millsec > 0) { + parsed_hashes_meta_value.SetEtime(static_cast(timestamp_millsec)); + } else { + parsed_hashes_meta_value.InitialMetaValue(); + } + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +Status Redis::HashesPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t timestamp = parsed_hashes_meta_value.Etime(); + if (timestamp == 0) { + return Status::NotFound("Not have an associated timeout"); + } else { + parsed_hashes_meta_value.SetEtime(0); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + } + return s; +} + +Status Redis::HashesTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + Status s; + BaseMetaKey base_meta_key(key); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kHashes, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kHashes)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + if (parsed_hashes_meta_value.IsStale()) { + *ttl_millsec = -2; + return Status::NotFound("Stale"); + } else if (parsed_hashes_meta_value.Count() == 0) { + *ttl_millsec = -2; + return Status::NotFound(); + } else { + *ttl_millsec = parsed_hashes_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + } + } else if (s.IsNotFound()) { + *ttl_millsec = -2; + } + return s; +} + +void Redis::ScanHashes() { + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + auto current_time = static_cast(time(nullptr)); + + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " Hashes Meta Data***************"; + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { + if (!ExpectedMetaValue(DataType::kHashes, meta_iter->value().ToString())) { + continue; + } + ParsedHashesMetaValue parsed_hashes_meta_value(meta_iter->value()); + int32_t survival_time = 0; + if (parsed_hashes_meta_value.Etime() != 0) { + survival_time = parsed_hashes_meta_value.Etime() > current_time ? parsed_hashes_meta_value.Etime() - current_time : -1; + } + ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); + + LOG(INFO) << fmt::format("[key : {:<30}] [count : {:<10}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", + parsed_meta_key.Key().ToString(), parsed_hashes_meta_value.Count(), + parsed_hashes_meta_value.Etime(), parsed_hashes_meta_value.Version(), survival_time); + } + delete meta_iter; + + LOG(INFO) << "***************Hashes Field Data***************"; + auto field_iter = db_->NewIterator(iterator_options, handles_[kHashesDataCF]); + for (field_iter->SeekToFirst(); field_iter->Valid(); field_iter->Next()) { + + ParsedHashesDataKey parsed_hashes_data_key(field_iter->key()); + ParsedBaseDataValue parsed_internal_value(field_iter->value()); + + LOG(INFO) << fmt::format("[key : {:<30}] [field : {:<20}] [value : {:<20}] [version : {}]", + parsed_hashes_data_key.Key().ToString(), parsed_hashes_data_key.field().ToString(), + parsed_internal_value.UserValue().ToString(), parsed_hashes_data_key.Version()); + } + delete field_iter; +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/redis_hyperloglog.cc b/tools/pika_migrate/src/storage/src/redis_hyperloglog.cc new file mode 100644 index 0000000000..c9cd1dd4c1 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_hyperloglog.cc @@ -0,0 +1,173 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#include +#include +#include +#include + +#include "src/storage_murmur3.h" +#include "storage/storage_define.h" +#include "src/redis.h" +#include "src/mutex.h" +#include "src/redis_hyperloglog.h" +#include "src/scope_record_lock.h" + +namespace storage { + +const int32_t HLL_HASH_SEED = 313; + +HyperLogLog::HyperLogLog(uint8_t precision, std::string origin_register) { + b_ = precision; + m_ = 1 << precision; + alpha_ = Alpha(); + register_ = std::make_unique(m_); + for (uint32_t i = 0; i < m_; ++i) { + register_[i] = 0; + } + if (!origin_register.empty()) { + for (uint32_t i = 0; i < m_; ++i) { + register_[i] = origin_register[i]; + } + } +} + +HyperLogLog::~HyperLogLog() = default; + +std::string HyperLogLog::Add(const char* value, uint32_t len) { + uint32_t hash_value; + MurmurHash3_x86_32(value, static_cast(len), HLL_HASH_SEED, static_cast(&hash_value)); + uint32_t index = hash_value & ((1 << b_) - 1); + uint8_t rank = Nctz((hash_value >> b_), static_cast(32 - b_)); + if (rank > register_[index]) { register_[index] = static_cast(rank); +} + std::string result(m_, 0); + for (uint32_t i = 0; i < m_; ++i) { + result[i] = register_[i]; + } + return result; +} + +double HyperLogLog::Estimate() const { + double estimate = FirstEstimate(); + if (estimate <= 2.5 * m_) { + uint32_t zeros = CountZero(); + if (zeros != 0) { + estimate = m_ * log(static_cast(m_) / zeros); + } + } else if (estimate > pow(2, 32) / 30.0) { + estimate = log1p(estimate * -1 / pow(2, 32)) * pow(2, 32) * -1; + } + return estimate; +} + +double HyperLogLog::FirstEstimate() const { + double estimate; + double sum = 0.0; + for (uint32_t i = 0; i < m_; i++) { + sum += 1.0 / (1 << register_[i]); + } + + estimate = alpha_ * m_ * m_ / sum; + return estimate; +} + +double HyperLogLog::Alpha() const { + switch (m_) { + case 16: + return 0.673; + case 32: + return 0.697; + case 64: + return 0.709; + default: + return 0.7213 / (1 + 1.079 / m_); + } +} + +uint32_t HyperLogLog::CountZero() const { + uint32_t count = 0; + for (uint32_t i = 0; i < m_; i++) { + if (register_[i] == 0) { + count++; + } + } + return count; +} + +std::string HyperLogLog::Merge(const HyperLogLog& hll) { + if (m_ != hll.m_) { + // TODO(shq) the number of registers doesn't match + } + for (uint32_t r = 0; r < m_; r++) { + if (register_[r] < hll.register_[r]) { + register_[r] = static_cast(register_[r] | hll.register_[r]); + } + } + + std::string result(m_, 0); + for (uint32_t i = 0; i < m_; ++i) { + result[i] = register_[i]; + } + return result; +} + +// ::__builtin_ctz(x): return the first number of '0' after the first '1' from the right +uint8_t HyperLogLog::Nctz(uint32_t x, int b) { return static_cast(std::min(b, ::__builtin_ctz(x))) + 1; } + + +bool IsHyperloglogObj(const std::string* internal_value_str) { + size_t kStringsValueSuffixLength = 2 * kTimestampLength + kSuffixReserveLength; + char reserve[16] = {0}; + size_t offset = internal_value_str->size() - kStringsValueSuffixLength; + memcpy(reserve, internal_value_str->data() + offset, kSuffixReserveLength); + + //if first bit in reserve is 0 , then this obj is string; else the obj is hyperloglog + return (reserve[0] & hyperloglog_reserve_flag) != 0;; +} + +Status Redis::HyperloglogGet(const Slice &key, std::string* value) { + value->clear(); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + if (!s.ok()) { + return s; + } + if (!ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + "hyperloglog " + "get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } else if (!IsHyperloglogObj(value)) { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ",expect type: " + "hyperloglog " + "get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } else { + ParsedStringsValue parsed_strings_value(value); + if (parsed_strings_value.IsStale()) { + value->clear(); + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + } + } + return s; +} + +Status Redis::HyperloglogSet(const Slice &key, const Slice &value) { + HyperloglogValue hyperloglog_value(value); + ScopeRecordLock l(lock_mgr_, key); + + BaseKey base_key(key); + return db_->Put(default_write_options_, base_key.Encode(), hyperloglog_value.Encode()); +} + +} // namespace storage \ No newline at end of file diff --git a/tools/pika_migrate/src/storage/src/redis_hyperloglog.h b/tools/pika_migrate/src/storage/src/redis_hyperloglog.h new file mode 100644 index 0000000000..b255580d5c --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_hyperloglog.h @@ -0,0 +1,39 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_REDIS_HYPERLOGLOG_H_ +#define SRC_REDIS_HYPERLOGLOG_H_ + +#include +#include +#include +#include + +namespace storage { + +class HyperLogLog { + public: + HyperLogLog(uint8_t precision, std::string origin_register); + ~HyperLogLog(); + + double Estimate() const; + double FirstEstimate() const; + uint32_t CountZero() const; + double Alpha() const; + uint8_t Nctz(uint32_t x, int b); + + std::string Add(const char* value, uint32_t len); + std::string Merge(const HyperLogLog& hll); + + protected: + uint32_t m_ = 0; // register bit width + uint32_t b_ = 0; // regieter size + double alpha_ = 0; + std::unique_ptr register_; +}; + +} // namespace storage + +#endif // SRC_REDIS_HYPERLOGLOG_H_ diff --git a/tools/pika_migrate/src/storage/src/redis_lists.cc b/tools/pika_migrate/src/storage/src/redis_lists.cc new file mode 100644 index 0000000000..cdf4ff122d --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_lists.cc @@ -0,0 +1,1343 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include +#include + +#include "pstd/include/pika_codis_slot.h" +#include "src/base_data_value_format.h" +#include "src/lists_filter.h" +#include "src/redis.h" +#include "src/scope_record_lock.h" +#include "src/scope_snapshot.h" +#include "storage/util.h" +#include "src/debug.h" + +namespace storage { +Status Redis::ScanListsKeyNum(KeyInfo* key_info) { + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t ttl_sum = 0; + uint64_t invaild_keys = 0; + + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + + pstd::TimeType curtime = pstd::NowMillis(); + + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + if (!ExpectedMetaValue(DataType::kLists, iter->value().ToString())) { + continue; + } + ParsedListsMetaValue parsed_lists_meta_value(iter->value()); + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { + invaild_keys++; + } else { + keys++; + if (!parsed_lists_meta_value.IsPermanentSurvival()) { + expires++; + ttl_sum += parsed_lists_meta_value.Etime() - curtime; + } + } + } + delete iter; + + key_info->keys = keys; + key_info->expires = expires; + key_info->avg_ttl = (expires != 0) ? ttl_sum / expires : 0; + key_info->invaild_keys = invaild_keys; + return Status::OK(); +} + +Status Redis::LIndex(const Slice& key, int64_t index, std::string* element) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + uint64_t version = parsed_lists_meta_value.Version(); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t target_index = + index >= 0 ? parsed_lists_meta_value.LeftIndex() + index + 1 : parsed_lists_meta_value.RightIndex() + index; + if (parsed_lists_meta_value.LeftIndex() < target_index && target_index < parsed_lists_meta_value.RightIndex()) { + ListsDataKey lists_data_key(key, version, target_index); + s = db_->Get(read_options, handles_[kListsDataCF], lists_data_key.Encode(), element); + if (s.ok()) { + ParsedBaseDataValue parsed_value(element); + parsed_value.StripSuffix(); + } + } else { + return Status::NotFound(); + } + } + } + return s; +} + +Status Redis::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, + const std::string& value, int64_t* ret) { + *ret = 0; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + bool find_pivot = false; + uint64_t pivot_index = 0; + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t current_index = parsed_lists_meta_value.LeftIndex() + 1; + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + ListsDataKey start_data_key(key, version, current_index); + for (iter->Seek(start_data_key.Encode()); iter->Valid() && current_index < parsed_lists_meta_value.RightIndex(); + iter->Next(), current_index++) { + ParsedBaseDataValue parsed_value(iter->value()); + if (pivot.compare(parsed_value.UserValue().ToString()) == 0) { + find_pivot = true; + pivot_index = current_index; + break; + } + } + delete iter; + if (!find_pivot) { + *ret = -1; + return Status::NotFound(); + } else { + uint64_t target_index; + std::vector list_nodes; + uint64_t mid_index = parsed_lists_meta_value.LeftIndex() + + (parsed_lists_meta_value.RightIndex() - parsed_lists_meta_value.LeftIndex()) / 2; + if (pivot_index <= mid_index) { + target_index = (before_or_after == Before) ? pivot_index - 1 : pivot_index; + current_index = parsed_lists_meta_value.LeftIndex() + 1; + rocksdb::Iterator* first_half_iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + ListsDataKey start_data_key(key, version, current_index); + for (first_half_iter->Seek(start_data_key.Encode()); first_half_iter->Valid() && current_index <= pivot_index; + first_half_iter->Next(), current_index++) { + ParsedBaseDataValue parsed_value(first_half_iter->value()); + if (current_index == pivot_index) { + if (before_or_after == After) { + list_nodes.push_back(parsed_value.UserValue().ToString()); + } + break; + } + list_nodes.push_back(parsed_value.UserValue().ToString()); + } + delete first_half_iter; + + current_index = parsed_lists_meta_value.LeftIndex(); + for (const auto& node : list_nodes) { + ListsDataKey lists_data_key(key, version, current_index++); + BaseDataValue i_val(node); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + parsed_lists_meta_value.ModifyLeftIndex(1); + } else { + target_index = (before_or_after == Before) ? pivot_index : pivot_index + 1; + current_index = pivot_index; + rocksdb::Iterator* after_half_iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + ListsDataKey start_data_key(key, version, current_index); + for (after_half_iter->Seek(start_data_key.Encode()); + after_half_iter->Valid() && current_index < parsed_lists_meta_value.RightIndex(); + after_half_iter->Next(), current_index++) { + if (current_index == pivot_index && before_or_after == BeforeOrAfter::After) { + continue; + } + ParsedBaseDataValue parsed_value(after_half_iter->value()); + list_nodes.push_back(parsed_value.UserValue().ToString()); + } + delete after_half_iter; + + current_index = target_index + 1; + for (const auto& node : list_nodes) { + ListsDataKey lists_data_key(key, version, current_index++); + BaseDataValue i_val(node); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + parsed_lists_meta_value.ModifyRightIndex(1); + } + parsed_lists_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + ListsDataKey lists_target_key(key, version, target_index); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_target_key.Encode(), i_val.Encode()); + *ret = static_cast(parsed_lists_meta_value.Count()); + return db_->Write(default_write_options_, &batch); + } + } + } else if (s.IsNotFound()) { + *ret = 0; + } + return s; +} + +Status Redis::LLen(const Slice& key, uint64_t* len, std::string&& prefetch_meta) { + *len = 0; + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + std::string meta_value(std::move(prefetch_meta)); + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + *len = parsed_lists_meta_value.Count(); + return s; + } + } + return s; +} + +Status Redis::LPop(const Slice& key, int64_t count, std::vector* elements) { + uint32_t statistic = 0; + elements->clear(); + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + auto size = static_cast(parsed_lists_meta_value.Count()); + uint64_t version = parsed_lists_meta_value.Version(); + int32_t start_index = 0; + auto stop_index = static_cast(count<=size?count-1:size-1); + int32_t cur_index = 0; + ListsDataKey lists_data_key(key, version, parsed_lists_meta_value.LeftIndex()+1); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + for (iter->Seek(lists_data_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + statistic++; + ParsedBaseDataValue parsed_base_data_value(iter->value()); + elements->push_back(parsed_base_data_value.UserValue().ToString()); + batch.Delete(handles_[kListsDataCF],iter->key()); + + parsed_lists_meta_value.ModifyCount(-1); + parsed_lists_meta_value.ModifyLeftIndex(-1); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + delete iter; + } + } + if (batch.Count() != 0U) { + s = db_->Write(default_write_options_, &batch); + if (s.ok()) { + batch.Clear(); + } + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + } + return s; +} + +Status Redis::LPush(const Slice& key, const std::vector& values, uint64_t* ret) { + *ret = 0; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t index = 0; + uint64_t version = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { + version = parsed_lists_meta_value.InitialMetaValue(); + } else { + version = parsed_lists_meta_value.Version(); + } + for (const auto& value : values) { + index = parsed_lists_meta_value.LeftIndex(); + parsed_lists_meta_value.ModifyLeftIndex(1); + parsed_lists_meta_value.ModifyCount(1); + ListsDataKey lists_data_key(key, version, index); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + *ret = parsed_lists_meta_value.Count(); + } else if (s.IsNotFound()) { + char str[8]; + EncodeFixed64(str, values.size()); + ListsMetaValue lists_meta_value(Slice(str, sizeof(uint64_t))); + version = lists_meta_value.UpdateVersion(); + for (const auto& value : values) { + index = lists_meta_value.LeftIndex(); + lists_meta_value.ModifyLeftIndex(1); + ListsDataKey lists_data_key(key, version, index); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); + *ret = lists_meta_value.RightIndex() - lists_meta_value.LeftIndex() - 1; + } else { + return s; + } + return db_->Write(default_write_options_, &batch); +} + +Status Redis::LPushx(const Slice& key, const std::vector& values, uint64_t* len) { + *len = 0; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_lists_meta_value.Version(); + for (const auto& value : values) { + uint64_t index = parsed_lists_meta_value.LeftIndex(); + parsed_lists_meta_value.ModifyCount(1); + parsed_lists_meta_value.ModifyLeftIndex(1); + ListsDataKey lists_data_key(key, version, index); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + *len = parsed_lists_meta_value.Count(); + return db_->Write(default_write_options_, &batch); + } + } + return s; +} + +Status Redis::LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + std::string meta_value; + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t origin_left_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t origin_right_index = parsed_lists_meta_value.RightIndex() - 1; + uint64_t sublist_left_index = start >= 0 ? origin_left_index + start : origin_right_index + start + 1; + uint64_t sublist_right_index = stop >= 0 ? origin_left_index + stop : origin_right_index + stop + 1; + + if (sublist_left_index > sublist_right_index || sublist_left_index > origin_right_index || + sublist_right_index < origin_left_index) { + return Status::OK(); + } else { + if (sublist_left_index < origin_left_index) { + sublist_left_index = origin_left_index; + } + if (sublist_right_index > origin_right_index) { + sublist_right_index = origin_right_index; + } + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kListsDataCF]); + uint64_t current_index = sublist_left_index; + ListsDataKey start_data_key(key, version, current_index); + for (iter->Seek(start_data_key.Encode()); iter->Valid() && current_index <= sublist_right_index; + iter->Next(), current_index++) { + ParsedBaseDataValue parsed_value(iter->value()); + ret->push_back(parsed_value.UserValue().ToString()); + } + delete iter; + return Status::OK(); + } + } + } else { + return s; + } +} + +Status Redis::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t* ttl_millsec) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + std::string meta_value; + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + // ttl + *ttl_millsec = parsed_lists_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t origin_left_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t origin_right_index = parsed_lists_meta_value.RightIndex() - 1; + uint64_t sublist_left_index = start >= 0 ? origin_left_index + start : origin_right_index + start + 1; + uint64_t sublist_right_index = stop >= 0 ? origin_left_index + stop : origin_right_index + stop + 1; + + if (sublist_left_index > sublist_right_index + || sublist_left_index > origin_right_index + || sublist_right_index < origin_left_index) { + return Status::OK(); + } else { + if (sublist_left_index < origin_left_index) { + sublist_left_index = origin_left_index; + } + if (sublist_right_index > origin_right_index) { + sublist_right_index = origin_right_index; + } + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kListsDataCF]); + uint64_t current_index = sublist_left_index; + ListsDataKey start_data_key(key, version, current_index); + for (iter->Seek(start_data_key.Encode()); + iter->Valid() && current_index <= sublist_right_index; + iter->Next(), current_index++) { + ParsedBaseDataValue parsed_value(iter->value()); + ret->push_back(parsed_value.UserValue().ToString()); + } + delete iter; + return Status::OK(); + } + } + } else { + return s; + } +} + +Status Redis::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret) { + *ret = 0; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t current_index; + std::vector target_index; + std::vector delete_index; + uint64_t rest = (count < 0) ? -count : count; + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t start_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t stop_index = parsed_lists_meta_value.RightIndex() - 1; + ListsDataKey start_data_key(key, version, start_index); + ListsDataKey stop_data_key(key, version, stop_index); + if (count >= 0) { + current_index = start_index; + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + for (iter->Seek(start_data_key.Encode()); + iter->Valid() && current_index <= stop_index && ((count == 0) || rest != 0); + iter->Next(), current_index++) { + ParsedBaseDataValue parsed_value(iter->value()); + if (value.compare(parsed_value.UserValue()) == 0) { + target_index.push_back(current_index); + if (count != 0) { + rest--; + } + } + } + delete iter; + } else { + current_index = stop_index; + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + for (iter->Seek(stop_data_key.Encode()); + iter->Valid() && current_index >= start_index && ((count == 0) || rest != 0); + iter->Prev(), current_index--) { + ParsedBaseDataValue parsed_value(iter->value()); + if (value.compare(parsed_value.UserValue()) == 0) { + target_index.push_back(current_index); + if (count != 0) { + rest--; + } + } + } + delete iter; + } + if (target_index.empty()) { + *ret = 0; + return Status::NotFound(); + } else { + rest = target_index.size(); + uint64_t sublist_left_index = (count >= 0) ? target_index[0] : target_index[target_index.size() - 1]; + uint64_t sublist_right_index = (count >= 0) ? target_index[target_index.size() - 1] : target_index[0]; + uint64_t left_part_len = sublist_right_index - start_index; + uint64_t right_part_len = stop_index - sublist_left_index; + if (left_part_len <= right_part_len) { + uint64_t left = sublist_right_index; + current_index = sublist_right_index; + ListsDataKey sublist_right_key(key, version, sublist_right_index); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + for (iter->Seek(sublist_right_key.Encode()); iter->Valid() && current_index >= start_index; + iter->Prev(), current_index--) { + ParsedBaseDataValue parsed_value(iter->value()); + if (value.compare(parsed_value.UserValue()) == 0 && rest > 0) { + rest--; + } else { + ListsDataKey lists_data_key(key, version, left--); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), iter->value()); + } + } + delete iter; + uint64_t left_index = parsed_lists_meta_value.LeftIndex(); + for (uint64_t idx = 0; idx < target_index.size(); ++idx) { + delete_index.push_back(left_index + idx + 1); + } + parsed_lists_meta_value.ModifyLeftIndex(-target_index.size()); + } else { + uint64_t right = sublist_left_index; + current_index = sublist_left_index; + ListsDataKey sublist_left_key(key, version, sublist_left_index); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + for (iter->Seek(sublist_left_key.Encode()); iter->Valid() && current_index <= stop_index; + iter->Next(), current_index++) { + ParsedBaseDataValue parsed_value(iter->value()); + if ((value.compare(parsed_value.UserValue()) == 0) && rest > 0) { + rest--; + } else { + ListsDataKey lists_data_key(key, version, right++); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), iter->value()); + } + } + delete iter; + uint64_t right_index = parsed_lists_meta_value.RightIndex(); + for (uint64_t idx = 0; idx < target_index.size(); ++idx) { + delete_index.push_back(right_index - idx - 1); + } + parsed_lists_meta_value.ModifyRightIndex(-target_index.size()); + } + parsed_lists_meta_value.ModifyCount(-target_index.size()); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + for (const auto& idx : delete_index) { + ListsDataKey lists_data_key(key, version, idx); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); + } + *ret = target_index.size(); + return db_->Write(default_write_options_, &batch); + } + } + } else if (s.IsNotFound()) { + *ret = 0; + } + return s; +} + +Status Redis::LSet(const Slice& key, int64_t index, const Slice& value) { + uint32_t statistic = 0; + ScopeRecordLock l(lock_mgr_, key); + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t target_index = + index >= 0 ? parsed_lists_meta_value.LeftIndex() + index + 1 : parsed_lists_meta_value.RightIndex() + index; + if (target_index <= parsed_lists_meta_value.LeftIndex() || + target_index >= parsed_lists_meta_value.RightIndex()) { + return Status::Corruption("index out of range"); + } + ListsDataKey lists_data_key(key, version, target_index); + BaseDataValue i_val(value); + s = db_->Put(default_write_options_, handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + statistic++; + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + return s; + } + } + return s; +} + +Status Redis::LTrim(const Slice& key, int64_t start, int64_t stop) { + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint32_t statistic = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + uint64_t version = parsed_lists_meta_value.Version(); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t origin_left_index = parsed_lists_meta_value.LeftIndex() + 1; + uint64_t origin_right_index = parsed_lists_meta_value.RightIndex() - 1; + uint64_t sublist_left_index = start >= 0 ? origin_left_index + start : origin_right_index + start + 1; + uint64_t sublist_right_index = stop >= 0 ? origin_left_index + stop : origin_right_index + stop + 1; + + if (sublist_left_index > sublist_right_index || sublist_left_index > origin_right_index || + sublist_right_index < origin_left_index) { + parsed_lists_meta_value.InitialMetaValue(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } else { + if (sublist_left_index < origin_left_index) { + sublist_left_index = origin_left_index; + } + + if (sublist_right_index > origin_right_index) { + sublist_right_index = origin_right_index; + } + + uint64_t delete_node_num = + (sublist_left_index - origin_left_index) + (origin_right_index - sublist_right_index); + parsed_lists_meta_value.ModifyLeftIndex(-(sublist_left_index - origin_left_index)); + parsed_lists_meta_value.ModifyRightIndex(-(origin_right_index - sublist_right_index)); + parsed_lists_meta_value.ModifyCount(-delete_node_num); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + for (uint64_t idx = origin_left_index; idx < sublist_left_index; ++idx) { + statistic++; + ListsDataKey lists_data_key(key, version, idx); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); + } + for (uint64_t idx = origin_right_index; idx > sublist_right_index; --idx) { + statistic++; + ListsDataKey lists_data_key(key, version, idx); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); + } + } + } + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + return s; +} + +Status Redis::RPop(const Slice& key, int64_t count, std::vector* elements) { + uint32_t statistic = 0; + elements->clear(); + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + auto size = static_cast(parsed_lists_meta_value.Count()); + uint64_t version = parsed_lists_meta_value.Version(); + int32_t start_index = 0; + auto stop_index = static_cast(count<=size?count-1:size-1); + int32_t cur_index = 0; + ListsDataKey lists_data_key(key, version, parsed_lists_meta_value.RightIndex()-1); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kListsDataCF]); + for (iter->SeekForPrev(lists_data_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Prev(), ++cur_index) { + statistic++; + ParsedBaseDataValue parsed_value(iter->value()); + elements->push_back(parsed_value.UserValue().ToString()); + batch.Delete(handles_[kListsDataCF],iter->key()); + + parsed_lists_meta_value.ModifyCount(-1); + parsed_lists_meta_value.ModifyRightIndex(-1); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + delete iter; + } + } + if (batch.Count() != 0U) { + s = db_->Write(default_write_options_, &batch); + if (s.ok()) { + batch.Clear(); + } + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + } + return s; +} + +Status Redis::RPoplpush(const Slice& source, const Slice& destination, std::string* element) { + element->clear(); + uint32_t statistic = 0; + Status s; + rocksdb::WriteBatch batch; + MultiScopeRecordLock l(lock_mgr_, {source.ToString(), destination.ToString()}); + if (source.compare(destination) == 0) { + std::string meta_value; + BaseMetaKey base_source(source); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_source.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + std::string target; + uint64_t version = parsed_lists_meta_value.Version(); + uint64_t last_node_index = parsed_lists_meta_value.RightIndex() - 1; + ListsDataKey lists_data_key(source, version, last_node_index); + s = db_->Get(default_read_options_, handles_[kListsDataCF], lists_data_key.Encode(), &target); + if (s.ok()) { + *element = target; + ParsedBaseDataValue parsed_value(element); + parsed_value.StripSuffix(); + if (parsed_lists_meta_value.Count() == 1) { + return Status::OK(); + } else { + uint64_t target_index = parsed_lists_meta_value.LeftIndex(); + ListsDataKey lists_target_key(source, version, target_index); + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); + batch.Put(handles_[kListsDataCF], lists_target_key.Encode(), target); + statistic++; + parsed_lists_meta_value.ModifyRightIndex(-1); + parsed_lists_meta_value.ModifyLeftIndex(1); + batch.Put(handles_[kMetaCF], base_source.Encode(), meta_value); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kLists, source.ToString(), statistic); + return s; + } + } else { + return s; + } + } + } else { + return s; + } + } + + uint64_t version; + std::string target; + std::string source_meta_value; + BaseMetaKey base_source(source); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_source.Encode(), &source_meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, source_meta_value)) { + if (ExpectedStale(source_meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + source.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(source_meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&source_meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + version = parsed_lists_meta_value.Version(); + uint64_t last_node_index = parsed_lists_meta_value.RightIndex() - 1; + ListsDataKey lists_data_key(source, version, last_node_index); + s = db_->Get(default_read_options_, handles_[kListsDataCF], lists_data_key.Encode(), &target); + if (s.ok()) { + batch.Delete(handles_[kListsDataCF], lists_data_key.Encode()); + statistic++; + parsed_lists_meta_value.ModifyCount(-1); + parsed_lists_meta_value.ModifyRightIndex(-1); + batch.Put(handles_[kMetaCF], base_source.Encode(), source_meta_value); + } else { + return s; + } + } + } else { + return s; + } + + std::string destination_meta_value; + BaseMetaKey base_destination(destination); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_destination.Encode(), &destination_meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, destination_meta_value)) { + if (ExpectedStale(destination_meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(destination_meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&destination_meta_value); + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { + version = parsed_lists_meta_value.InitialMetaValue(); + } else { + version = parsed_lists_meta_value.Version(); + } + uint64_t target_index = parsed_lists_meta_value.LeftIndex(); + ListsDataKey lists_data_key(destination, version, target_index); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), target); + parsed_lists_meta_value.ModifyCount(1); + parsed_lists_meta_value.ModifyLeftIndex(1); + batch.Put(handles_[kMetaCF], base_destination.Encode(), destination_meta_value); + } else if (s.IsNotFound()) { + char str[8]; + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value(Slice(str, sizeof(uint64_t))); + version = lists_meta_value.UpdateVersion(); + uint64_t target_index = lists_meta_value.LeftIndex(); + ListsDataKey lists_data_key(destination, version, target_index); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), target); + lists_meta_value.ModifyLeftIndex(1); + batch.Put(handles_[kMetaCF], base_destination.Encode(), lists_meta_value.Encode()); + } else { + return s; + } + + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kLists, source.ToString(), statistic); + if (s.ok()) { + ParsedBaseDataValue parsed_value(&target); + parsed_value.StripSuffix(); + *element = target; + } + return s; +} + +Status Redis::RPush(const Slice& key, const std::vector& values, uint64_t* ret) { + *ret = 0; + rocksdb::WriteBatch batch; + + uint64_t index = 0; + uint64_t version = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale() || parsed_lists_meta_value.Count() == 0) { + version = parsed_lists_meta_value.InitialMetaValue(); + } else { + version = parsed_lists_meta_value.Version(); + } + for (const auto& value : values) { + index = parsed_lists_meta_value.RightIndex(); + parsed_lists_meta_value.ModifyRightIndex(1); + parsed_lists_meta_value.ModifyCount(1); + ListsDataKey lists_data_key(key, version, index); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + *ret = parsed_lists_meta_value.Count(); + } else if (s.IsNotFound()) { + char str[8]; + EncodeFixed64(str, values.size()); + ListsMetaValue lists_meta_value(Slice(str, sizeof(uint64_t))); + version = lists_meta_value.UpdateVersion(); + for (const auto& value : values) { + index = lists_meta_value.RightIndex(); + lists_meta_value.ModifyRightIndex(1); + ListsDataKey lists_data_key(key, version, index); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), lists_meta_value.Encode()); + *ret = lists_meta_value.RightIndex() - lists_meta_value.LeftIndex() - 1; + } else { + return s; + } + return db_->Write(default_write_options_, &batch); +} + +Status Redis::RPushx(const Slice& key, const std::vector& values, uint64_t* len) { + *len = 0; + rocksdb::WriteBatch batch; + + ScopeRecordLock l(lock_mgr_, key); + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_lists_meta_value.Version(); + for (const auto& value : values) { + uint64_t index = parsed_lists_meta_value.RightIndex(); + parsed_lists_meta_value.ModifyCount(1); + parsed_lists_meta_value.ModifyRightIndex(1); + ListsDataKey lists_data_key(key, version, index); + BaseDataValue i_val(value); + batch.Put(handles_[kListsDataCF], lists_data_key.Encode(), i_val.Encode()); + } + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + *len = parsed_lists_meta_value.Count(); + return db_->Write(default_write_options_, &batch); + } + } + return s; +} + +Status Redis::ListsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } + + if (ttl_millsec > 0) { + parsed_lists_meta_value.SetRelativeTimestamp(ttl_millsec); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } else { + parsed_lists_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +Status Redis::ListsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t statistic = parsed_lists_meta_value.Count(); + parsed_lists_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + } + } + return s; +} + +Status Redis::ListsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + if (timestamp_millsec > 0) { + parsed_lists_meta_value.SetEtime(static_cast(timestamp_millsec)); + } else { + parsed_lists_meta_value.InitialMetaValue(); + } + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +Status Redis::ListsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + // Check if the list has set expiration time before attempting to persist + if (parsed_lists_meta_value.Etime() == 0) { + return Status::NotFound("Not have an associated timeout"); + } else { + parsed_lists_meta_value.SetEtime(0); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + } + return s; +} + +Status Redis::ListsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kLists, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kLists)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (parsed_lists_meta_value.IsStale()) { + *ttl_millsec = -2; + return Status::NotFound("Stale"); + } else if (parsed_lists_meta_value.Count() == 0) { + *ttl_millsec = -2; + return Status::NotFound(); + } else { + // Return -1 for lists with no set expiration, and calculate remaining time for others + *ttl_millsec = parsed_lists_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + } + } else if (s.IsNotFound()) { + *ttl_millsec = -2; + } + return s; +} + +void Redis::ScanLists() { + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + auto current_time = static_cast(time(nullptr)); + + LOG(INFO) << "*************** " << "rocksdb instance: " << index_ << " List Meta ***************"; + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { + if (!ExpectedMetaValue(DataType::kLists, meta_iter->value().ToString())) { + continue; + } + ParsedListsMetaValue parsed_lists_meta_value(meta_iter->value()); + ParsedBaseMetaKey parsed_meta_key(meta_iter->value()); + int32_t survival_time = 0; + if (parsed_lists_meta_value.Etime() != 0) { + survival_time = parsed_lists_meta_value.Etime() - current_time > 0 + ? parsed_lists_meta_value.Etime() - current_time + : -1; + } + + LOG(INFO) << fmt::format( + "[key : {:<30}] [count : {:<10}] [left index : {:<10}] [right index : {:<10}] [timestamp : {:<10}] [version : " + "{}] [survival_time : {}]", + parsed_meta_key.Key().ToString(), parsed_lists_meta_value.Count(), parsed_lists_meta_value.LeftIndex(), + parsed_lists_meta_value.RightIndex(), parsed_lists_meta_value.Etime(), parsed_lists_meta_value.Version(), + survival_time); + } + delete meta_iter; + + LOG(INFO) << "*************** " << "rocksdb instance: " << index_ << " List Data***************"; + auto data_iter = db_->NewIterator(iterator_options, handles_[kListsDataCF]); + for (data_iter->SeekToFirst(); data_iter->Valid(); data_iter->Next()) { + ParsedListsDataKey parsed_lists_data_key(data_iter->key()); + ParsedBaseDataValue parsed_value(data_iter->value()); + + LOG(INFO) << fmt::format("[key : {:<30}] [index : {:<10}] [data : {:<20}] [version : {}]", + parsed_lists_data_key.key().ToString(), parsed_lists_data_key.index(), + parsed_value.UserValue().ToString(), parsed_lists_data_key.Version()); + } + delete data_iter; +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/redis_sets.cc b/tools/pika_migrate/src/storage/src/redis_sets.cc new file mode 100644 index 0000000000..5f33d9574b --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_sets.cc @@ -0,0 +1,1645 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "src/redis.h" + +#include +#include +#include +#include + +#include +#include + +#include "src/base_filter.h" +#include "src/scope_snapshot.h" +#include "src/scope_record_lock.h" +#include "src/base_data_value_format.h" +#include "pstd/include/env.h" +#include "pstd/include/pika_codis_slot.h" +#include "storage/util.h" + +namespace storage { +rocksdb::Status Redis::ScanSetsKeyNum(KeyInfo* key_info) { + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t ttl_sum = 0; + uint64_t invaild_keys = 0; + + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + + pstd::TimeType curtime = pstd::NowMillis(); + + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + if (!ExpectedMetaValue(DataType::kSets, iter->value().ToString())) { + continue; + } + ParsedSetsMetaValue parsed_sets_meta_value(iter->value()); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + invaild_keys++; + } else { + keys++; + if (!parsed_sets_meta_value.IsPermanentSurvival()) { + expires++; + ttl_sum += parsed_sets_meta_value.Etime() - curtime; + } + } + } + delete iter; + + key_info->keys = keys; + key_info->expires = expires; + key_info->avg_ttl = (expires != 0) ? ttl_sum / expires : 0; + key_info->invaild_keys = invaild_keys; + return rocksdb::Status::OK(); +} + +rocksdb::Status Redis::SAdd(const Slice& key, const std::vector& members, int32_t* ret) { + std::unordered_set unique; + std::vector filtered_members; + for (const auto& member : members) { + if (unique.find(member) == unique.end()) { + unique.insert(member); + filtered_members.push_back(member); + } + } + + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + uint64_t version = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + version = parsed_sets_meta_value.InitialMetaValue(); + if (!parsed_sets_meta_value.check_set_count(static_cast(filtered_members.size()))) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.SetCount(static_cast(filtered_members.size())); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + for (const auto& member : filtered_members) { + SetsMemberKey sets_member_key(key, version, member); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + } + *ret = static_cast(filtered_members.size()); + } else { + int32_t cnt = 0; + std::string member_value; + version = parsed_sets_meta_value.Version(); + for (const auto& member : filtered_members) { + SetsMemberKey sets_member_key(key, version, member); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.ok()) { + } else if (s.IsNotFound()) { + cnt++; + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + } else { + return s; + } + } + *ret = cnt; + if (cnt == 0) { + return rocksdb::Status::OK(); + } else { + if (!parsed_sets_meta_value.CheckModifyCount(cnt)) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.ModifyCount(cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + } else if (s.IsNotFound()) { + char str[4]; + EncodeFixed32(str, filtered_members.size()); + SetsMetaValue sets_meta_value(DataType::kSets, Slice(str, 4)); + version = sets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), sets_meta_value.Encode()); + for (const auto& member : filtered_members) { + SetsMemberKey sets_member_key(key, version, member); + BaseDataValue i_val(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); + } + *ret = static_cast(filtered_members.size()); + } else { + return s; + } + return db_->Write(default_write_options_, &batch); +} + +rocksdb::Status Redis::SCard(const Slice& key, int32_t* ret, std::string&& meta) { + *ret = 0; + std::string meta_value(std::move(meta)); + rocksdb::Status s; + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else { + *ret = parsed_sets_meta_value.Count(); + if (*ret == 0) { + return rocksdb::Status::NotFound("Deleted"); + } + } + } + return s; +} + +rocksdb::Status Redis::SDiff(const std::vector& keys, std::vector* members) { + if (keys.empty()) { + return rocksdb::Status::Corruption("SDiff invalid parameter, no keys"); + } + + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::vector vaild_sets; + rocksdb::Status s; + + for (uint32_t idx = 1; idx < keys.size(); ++idx) { + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); + } + } else if (!s.IsNotFound()) { + return s; + } + } + + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + bool found; + Slice prefix; + std::string member_value; + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(keys[0], version, Slice()); + prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + Slice member = parsed_sets_member_key.member(); + + found = false; + for (const auto& key_version : vaild_sets) { + SetsMemberKey sets_member_key(key_version.key, key_version.version, member); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.ok()) { + found = true; + break; + } else if (!s.IsNotFound()) { + delete iter; + return s; + } + } + if (!found) { + members->push_back(member.ToString()); + } + } + delete iter; + } + } else if (!s.IsNotFound()) { + return s; + } + return rocksdb::Status::OK(); +} + +rocksdb::Status Redis::SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { + if (keys.empty()) { + return rocksdb::Status::Corruption("SDiffsotre invalid parameter, no keys"); + } + + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeRecordLock l(lock_mgr_, destination); + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::vector vaild_sets; + rocksdb::Status s; + + for (uint32_t idx = 1; idx < keys.size(); ++idx) { + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); + } + } else if (!s.IsNotFound()) { + return s; + } + } + + std::vector members; + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + bool found; + std::string member_value; + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(keys[0], version, Slice()); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + Slice member = parsed_sets_member_key.member(); + + found = false; + for (const auto& key_version : vaild_sets) { + SetsMemberKey sets_member_key(key_version.key, key_version.version, member); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.ok()) { + found = true; + break; + } else if (!s.IsNotFound()) { + delete iter; + return s; + } + } + if (!found) { + members.push_back(member.ToString()); + } + } + delete iter; + } + } else if (!s.IsNotFound()) { + return s; + } + + uint32_t statistic = 0; + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + statistic = parsed_sets_meta_value.Count(); + version = parsed_sets_meta_value.InitialMetaValue(); + if (!parsed_sets_meta_value.check_set_count(static_cast(members.size()))) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.SetCount(static_cast(members.size())); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); + } else if (s.IsNotFound()) { + char str[4]; + EncodeFixed32(str, members.size()); + SetsMetaValue sets_meta_value(DataType::kSets, Slice(str, 4)); + version = sets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + } else { + return s; + } + for (const auto& member : members) { + SetsMemberKey sets_member_key(destination, version, member); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + } + *ret = static_cast(members.size()); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kSets, destination.ToString(), statistic); + value_to_dest = std::move(members); + return s; +} + +rocksdb::Status Redis::SInter(const std::vector& keys, std::vector* members) { + if (keys.empty()) { + return rocksdb::Status::Corruption("SInter invalid parameter, no keys"); + } + + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::vector vaild_sets; + rocksdb::Status s; + + for (uint32_t idx = 1; idx < keys.size(); ++idx) { + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::OK(); + } else { + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); + } + } else if (s.IsNotFound()) { + return rocksdb::Status::OK(); + } else { + return s; + } + } + + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::OK(); + } else { + bool reliable; + std::string member_value; + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(keys[0], version, Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + Slice prefix = sets_member_key.EncodeSeekKey(); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + Slice member = parsed_sets_member_key.member(); + + reliable = true; + for (const auto& key_version : vaild_sets) { + SetsMemberKey sets_member_key(key_version.key, key_version.version, member); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.ok()) { + continue; + } else if (s.IsNotFound()) { + reliable = false; + break; + } else { + delete iter; + return s; + } + } + if (reliable) { + members->push_back(member.ToString()); + } + } + delete iter; + } + } else if (s.IsNotFound()) { + return rocksdb::Status::OK(); + } else { + return s; + } + return rocksdb::Status::OK(); +} + +rocksdb::Status Redis::SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { + if (keys.empty()) { + return rocksdb::Status::Corruption("SInterstore invalid parameter, no keys"); + } + + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + bool have_invalid_sets = false; + ScopeRecordLock l(lock_mgr_, destination); + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::vector vaild_sets; + rocksdb::Status s; + + for (uint32_t idx = 1; idx < keys.size(); ++idx) { + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + have_invalid_sets = true; + break; + } else { + vaild_sets.push_back({keys[idx], parsed_sets_meta_value.Version()}); + } + } else if (s.IsNotFound()) { + have_invalid_sets = true; + break; + } else { + return s; + } + } + + std::vector members; + if (!have_invalid_sets) { + BaseMetaKey base_meta_key0(keys[0]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key0.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[0] + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + have_invalid_sets = true; + } else { + bool reliable; + std::string member_value; + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(keys[0], version, Slice()); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, keys[0]); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + Slice member = parsed_sets_member_key.member(); + + reliable = true; + for (const auto& key_version : vaild_sets) { + SetsMemberKey sets_member_key(key_version.key, key_version.version, member); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.ok()) { + continue; + } else if (s.IsNotFound()) { + reliable = false; + break; + } else { + delete iter; + return s; + } + } + if (reliable) { + members.push_back(member.ToString()); + } + } + delete iter; + } + } else if (s.IsNotFound()) { + } else { + return s; + } + } + + uint32_t statistic = 0; + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + statistic = parsed_sets_meta_value.Count(); + version = parsed_sets_meta_value.InitialMetaValue(); + if (!parsed_sets_meta_value.check_set_count(static_cast(members.size()))) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.SetCount(static_cast(members.size())); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); + } else if (s.IsNotFound()) { + char str[4]; + EncodeFixed32(str, members.size()); + SetsMetaValue sets_meta_value(DataType::kSets, Slice(str, 4)); + version = sets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + } else { + return s; + } + for (const auto& member : members) { + SetsMemberKey sets_member_key(destination, version, member); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + } + *ret = static_cast(members.size()); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kSets, destination.ToString(), statistic); + value_to_dest = std::move(members); + return s; +} + +rocksdb::Status Redis::SIsmember(const Slice& key, const Slice& member, int32_t* ret) { + *ret = 0; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + std::string member_value; + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(key, version, member); + s = db_->Get(read_options, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + *ret = s.ok() ? 1 : 0; + } + } else if (s.IsNotFound()) { + *ret = 0; + } + return s; +} + +rocksdb::Status Redis::SMembers(const Slice& key, std::vector* members) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(key, version, Slice()); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + members->push_back(parsed_sets_member_key.member().ToString()); + } + delete iter; + } + } + return s; +} + +Status Redis::SMembersWithTTL(const Slice& key, + std::vector* members, + int64_t* ttl_millsec) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.Count() == 0) { + return Status::NotFound(); + } else if (parsed_sets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + // ttl + *ttl_millsec = parsed_sets_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(key, version, Slice()); + Slice prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); + iter->Valid() && iter->key().starts_with(prefix); + iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + members->push_back(parsed_sets_member_key.member().ToString()); + } + delete iter; + } + } + return s; +} + +rocksdb::Status Redis::SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret) { + *ret = 0; + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + + uint64_t version = 0; + uint32_t statistic = 0; + std::string meta_value; + std::vector keys{source.ToString(), destination.ToString()}; + MultiScopeRecordLock ml(lock_mgr_, keys); + + if (source == destination) { + *ret = 1; + return rocksdb::Status::OK(); + } + + BaseMetaKey base_source(source); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_source.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + source.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + std::string member_value; + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(source, version, member); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.ok()) { + *ret = 1; + if (!parsed_sets_meta_value.CheckModifyCount(-1)) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.ModifyCount(-1); + batch.Put(handles_[kMetaCF], base_source.Encode(), meta_value); + batch.Delete(handles_[kSetsDataCF], sets_member_key.Encode()); + statistic++; + } else if (s.IsNotFound()) { + *ret = 0; + return rocksdb::Status::NotFound(); + } else { + return s; + } + } + } else if (s.IsNotFound()) { + *ret = 0; + return rocksdb::Status::NotFound(); + } else { + return s; + } + + BaseMetaKey base_destination(destination); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_destination.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + version = parsed_sets_meta_value.InitialMetaValue(); + parsed_sets_meta_value.SetCount(1); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); + SetsMemberKey sets_member_key(destination, version, member); + BaseDataValue i_val(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); + } else { + std::string member_value; + version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(destination, version, member); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.IsNotFound()) { + if (!parsed_sets_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.ModifyCount(1); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + } else if (!s.ok()) { + return s; + } + } + } else if (s.IsNotFound()) { + char str[4]; + EncodeFixed32(str, 1); + SetsMetaValue sets_meta_value(DataType::kSets, Slice(str, 4)); + version = sets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + SetsMemberKey sets_member_key(destination, version, member); + BaseDataValue iter_value(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), iter_value.Encode()); + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kSets, source.ToString(), 1); + return s; +} + +rocksdb::Status Redis::SPop(const Slice& key, std::vector* members, int64_t cnt) { + std::default_random_engine engine; + + std::string meta_value; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + int32_t length = parsed_sets_meta_value.Count(); + if (length < cnt) { + int32_t size = parsed_sets_meta_value.Count(); + int32_t cur_index = 0; + uint64_t version = parsed_sets_meta_value.Version(); + SetsMemberKey sets_member_key(key, version, Slice()); + auto iter = db_->NewIterator(default_read_options_, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); + iter->Valid() && cur_index < size; + iter->Next(), cur_index++) { + + batch.Delete(handles_[kSetsDataCF], iter->key()); + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + members->push_back(parsed_sets_member_key.member().ToString()); + + } + + //parsed_sets_meta_value.ModifyCount(-cnt); + //batch.Put(handles_[kMetaCF], key, meta_value); + batch.Delete(handles_[kMetaCF], base_meta_key.Encode()); + delete iter; + + } else { + engine.seed(time(nullptr)); + int32_t cur_index = 0; + int32_t size = parsed_sets_meta_value.Count(); + int32_t target_index = -1; + uint64_t version = parsed_sets_meta_value.Version(); + std::unordered_set sets_index; + int32_t modnum = size; + + for (int64_t cur_round = 0; + cur_round < cnt; + cur_round++) { + do { + target_index = static_cast( engine() % modnum); + } while (sets_index.find(target_index) != sets_index.end()); + sets_index.insert(target_index); + } + + SetsMemberKey sets_member_key(key, version, Slice()); + int64_t del_count = 0; + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(default_read_options_, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); + iter->Valid() && cur_index < size; + iter->Next(), cur_index++) { + if (del_count == cnt) { + break; + } + if (sets_index.find(cur_index) != sets_index.end()) { + del_count++; + batch.Delete(handles_[kSetsDataCF], iter->key()); + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + members->push_back(parsed_sets_member_key.member().ToString()); + } + } + + if (!parsed_sets_meta_value.CheckModifyCount(static_cast(-cnt))) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.ModifyCount(static_cast(-cnt)); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + delete iter; + } + } + } else { + return s; + } + return db_->Write(default_write_options_, &batch); +} + +rocksdb::Status Redis::ResetSpopCount(const std::string& key) { return spop_counts_store_->Remove(key); } + +rocksdb::Status Redis::AddAndGetSpopCount(const std::string& key, uint64_t* count) { + size_t old_count = 0; + spop_counts_store_->Lookup(key, &old_count); + spop_counts_store_->Insert(key, old_count + 1); + *count = old_count + 1; + return rocksdb::Status::OK(); +} + +rocksdb::Status Redis::SRandmember(const Slice& key, int32_t count, std::vector* members) { + if (count == 0) { + return rocksdb::Status::OK(); + } + + members->clear(); + auto last_seed = pstd::NowMicros(); + std::default_random_engine engine; + + std::string meta_value; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + std::vector targets; + std::unordered_set unique; + + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + int32_t size = parsed_sets_meta_value.Count(); + uint64_t version = parsed_sets_meta_value.Version(); + if (count > 0) { + count = count <= size ? count : size; + while (targets.size() < static_cast(count)) { + engine.seed(last_seed); + last_seed = static_cast(engine()); + auto pos = static_cast(last_seed % size); + if (unique.find(pos) == unique.end()) { + unique.insert(pos); + targets.push_back(pos); + } + } + } else { + count = -count; + while (targets.size() < static_cast(count)) { + engine.seed(last_seed); + last_seed = static_cast(engine()); + targets.push_back(static_cast(last_seed % size)); + } + } + std::sort(targets.begin(), targets.end()); + + int32_t cur_index = 0; + int32_t idx = 0; + SetsMemberKey sets_member_key(key, version, Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + auto iter = db_->NewIterator(default_read_options_, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); iter->Valid() && cur_index < size; iter->Next(), cur_index++) { + if (static_cast(idx) >= targets.size()) { + break; + } + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + while (static_cast(idx) < targets.size() && cur_index == targets[idx]) { + idx++; + members->push_back(parsed_sets_member_key.member().ToString()); + } + } + + std::shuffle(members->begin(), members->end(), engine); + delete iter; + } + } + return s; +} + +rocksdb::Status Redis::SRem(const Slice& key, const std::vector& members, int32_t* ret) { + *ret = 0; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + uint64_t version = 0; + uint32_t statistic = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + int32_t cnt = 0; + std::string member_value; + version = parsed_sets_meta_value.Version(); + for (const auto& member : members) { + SetsMemberKey sets_member_key(key, version, member); + s = db_->Get(default_read_options_, handles_[kSetsDataCF], sets_member_key.Encode(), &member_value); + if (s.ok()) { + cnt++; + statistic++; + batch.Delete(handles_[kSetsDataCF], sets_member_key.Encode()); + } else if (s.IsNotFound()) { + } else { + return s; + } + } + *ret = cnt; + if (!parsed_sets_meta_value.CheckModifyCount(-cnt)) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.ModifyCount(-cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else if (s.IsNotFound()) { + *ret = 0; + return rocksdb::Status::NotFound(); + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); + return s; +} + +rocksdb::Status Redis::SUnion(const std::vector& keys, std::vector* members) { + if (keys.empty()) { + return rocksdb::Status::Corruption("SUnion invalid parameter, no keys"); + } + + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::vector vaild_sets; + rocksdb::Status s; + + for (const auto & key : keys) { + BaseMetaKey base_meta_key(key); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({key, parsed_sets_meta_value.Version()}); + } + } else if (!s.IsNotFound()) { + return s; + } + } + + Slice prefix; + std::map result_flag; + for (const auto& key_version : vaild_sets) { + SetsMemberKey sets_member_key(key_version.key, key_version.version, Slice()); + prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key_version.key); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + std::string member = parsed_sets_member_key.member().ToString(); + if (result_flag.find(member) == result_flag.end()) { + members->push_back(member); + result_flag[member] = true; + } + } + delete iter; + } + return rocksdb::Status::OK(); +} + +rocksdb::Status Redis::SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { + if (keys.empty()) { + return rocksdb::Status::Corruption("SUnionstore invalid parameter, no keys"); + } + + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + uint64_t version = 0; + ScopeRecordLock l(lock_mgr_, destination); + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::vector vaild_sets; + rocksdb::Status s; + + for (const auto & key : keys) { + BaseMetaKey base_meta_key(key); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (!parsed_sets_meta_value.IsStale() && parsed_sets_meta_value.Count() != 0) { + vaild_sets.push_back({key, parsed_sets_meta_value.Version()}); + } + } else if (!s.IsNotFound()) { + return s; + } + } + + Slice prefix; + std::vector members; + std::map result_flag; + for (const auto& key_version : vaild_sets) { + SetsMemberKey sets_member_key(key_version.key, key_version.version, Slice()); + prefix = sets_member_key.EncodeSeekKey(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key_version.key); + auto iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(prefix); iter->Valid() && iter->key().starts_with(prefix); iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + std::string member = parsed_sets_member_key.member().ToString(); + if (result_flag.find(member) == result_flag.end()) { + members.push_back(member); + result_flag[member] = true; + } + } + delete iter; + } + + uint32_t statistic = 0; + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + statistic = parsed_sets_meta_value.Count(); + version = parsed_sets_meta_value.InitialMetaValue(); + if (!parsed_sets_meta_value.check_set_count(static_cast(members.size()))) { + return Status::InvalidArgument("set size overflow"); + } + parsed_sets_meta_value.SetCount(static_cast(members.size())); + batch.Put(handles_[kMetaCF], destination, meta_value); + } else if (s.IsNotFound()) { + char str[4]; + EncodeFixed32(str, members.size()); + SetsMetaValue sets_meta_value(DataType::kSets, Slice(str, 4)); + version = sets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_destination.Encode(), sets_meta_value.Encode()); + } else { + return s; + } + for (const auto& member : members) { + SetsMemberKey sets_member_key(destination, version, member); + BaseDataValue i_val(Slice{}); + batch.Put(handles_[kSetsDataCF], sets_member_key.Encode(), i_val.Encode()); + } + *ret = static_cast(members.size()); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kSets, destination.ToString(), statistic); + value_to_dest = std::move(members); + return s; +} + +rocksdb::Status Redis::SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* members, int64_t* next_cursor) { + *next_cursor = 0; + members->clear(); + if (cursor < 0) { + *next_cursor = 0; + return rocksdb::Status::OK(); + } + + int64_t rest = count; + int64_t step_length = count; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale() || parsed_sets_meta_value.Count() == 0) { + *next_cursor = 0; + return rocksdb::Status::NotFound(); + } else { + std::string sub_member; + std::string start_point; + uint64_t version = parsed_sets_meta_value.Version(); + s = GetScanStartPoint(DataType::kSets, key, pattern, cursor, &start_point); + if (s.IsNotFound()) { + cursor = 0; + if (isTailWildcard(pattern)) { + start_point = pattern.substr(0, pattern.size() - 1); + } + } + if (isTailWildcard(pattern)) { + sub_member = pattern.substr(0, pattern.size() - 1); + } + + SetsMemberKey sets_member_prefix(key, version, sub_member); + SetsMemberKey sets_member_key(key, version, start_point); + std::string prefix = sets_member_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kSetsDataCF]); + for (iter->Seek(sets_member_key.EncodeSeekKey()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); + iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + std::string member = parsed_sets_member_key.member().ToString(); + if (StringMatch(pattern.data(), pattern.size(), member.data(), member.size(), 0) != 0) { + members->push_back(member); + } + rest--; + } + + if (iter->Valid() && (iter->key().compare(prefix) <= 0 || iter->key().starts_with(prefix))) { + *next_cursor = cursor + step_length; + ParsedSetsMemberKey parsed_sets_member_key(iter->key()); + std::string next_member = parsed_sets_member_key.member().ToString(); + StoreScanNextPoint(DataType::kSets, key, pattern, *next_cursor, next_member); + } else { + *next_cursor = 0; + } + delete iter; + } + } else { + *next_cursor = 0; + return s; + } + return rocksdb::Status::OK(); +} + +rocksdb::Status Redis::SetsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } + + if (ttl_millsec > 0) { + parsed_sets_meta_value.SetRelativeTimestamp(ttl_millsec); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } else { + parsed_sets_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +rocksdb::Status Redis::SetsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + rocksdb::Status s; + BaseMetaKey base_meta_key(key); + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + uint32_t statistic = parsed_sets_meta_value.Count(); + parsed_sets_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); + } + } + return s; +} + +rocksdb::Status Redis::SetsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + if (timestamp_millsec > 0) { + parsed_sets_meta_value.SetEtime(static_cast(timestamp_millsec)); + } else { + parsed_sets_meta_value.InitialMetaValue(); + } + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +rocksdb::Status Redis::SetsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } else { + uint64_t timestamp = parsed_sets_meta_value.Etime(); + if (timestamp == 0) { + return rocksdb::Status::NotFound("Not have an associated timeout"); + } else { + parsed_sets_meta_value.SetEtime(0); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + } + return s; +} + +rocksdb::Status Redis::SetsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + BaseMetaKey base_meta_key(key); + rocksdb::Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kSets)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedSetsMetaValue parsed_setes_meta_value(&meta_value); + if (parsed_setes_meta_value.IsStale()) { + *ttl_millsec = -2; + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_setes_meta_value.Count() == 0) { + *ttl_millsec = -2; + return rocksdb::Status::NotFound(); + } else { + *ttl_millsec = parsed_setes_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + } + } else if (s.IsNotFound()) { + *ttl_millsec = -2; + } + return s; +} + +void Redis::ScanSets() { + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + auto current_time = static_cast(time(nullptr)); + + LOG(INFO) << "***************Sets Meta Data***************"; + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { + if (!ExpectedMetaValue(DataType::kSets, meta_iter->value().ToString())) { + continue; + } + ParsedSetsMetaValue parsed_sets_meta_value(meta_iter->value()); + ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); + int32_t survival_time = 0; + if (parsed_sets_meta_value.Etime() != 0) { + survival_time = parsed_sets_meta_value.Etime() - current_time > 0 + ? parsed_sets_meta_value.Etime() - current_time + : -1; + } + + LOG(INFO) << fmt::format("[key : {:<30}] [count : {:<10}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", + parsed_meta_key.Key().ToString(), parsed_sets_meta_value.Count(), parsed_sets_meta_value.Etime(), + parsed_sets_meta_value.Version(), survival_time); + } + delete meta_iter; + + LOG(INFO) << "***************Sets Member Data***************"; + auto member_iter = db_->NewIterator(iterator_options, handles_[kSetsDataCF]); + for (member_iter->SeekToFirst(); member_iter->Valid(); member_iter->Next()) { + ParsedSetsMemberKey parsed_sets_member_key(member_iter->key()); + + LOG(INFO) << fmt::format("[key : {:<30}] [member : {:<20}] [version : {}]", parsed_sets_member_key.Key().ToString(), + parsed_sets_member_key.member().ToString(), parsed_sets_member_key.Version()); + } + delete member_iter; +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/redis_streams.cc b/tools/pika_migrate/src/storage/src/redis_streams.cc new file mode 100644 index 0000000000..f3abdc5b08 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_streams.cc @@ -0,0 +1,980 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include +#include + +#include "rocksdb/slice.h" +#include "rocksdb/status.h" + +#include "src/redis.h" +#include "src/base_data_key_format.h" +#include "src/base_filter.h" +#include "src/debug.h" +#include "src/pika_stream_meta_value.h" +#include "src/scope_record_lock.h" +#include "src/scope_snapshot.h" +#include "storage/storage.h" +#include "storage/util.h" + +namespace storage { + +Status Redis::XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args) { + // With the lock, we do not need snapshot for read. + // And it's bugy to use snapshot for read when we try to add message with trim. + // such as: XADD key 1-0 field value MINID 1-0 + + // 1 get stream meta + rocksdb::Status s; + StreamMetaValue stream_meta; + s = GetStreamMeta(stream_meta, key, default_read_options_); + if (s.IsNotFound() && args.no_mkstream) { + return Status::NotFound("no_mkstream"); + } else if (s.IsNotFound()) { + stream_meta.InitMetaValue(); + } else if (!s.ok()) { + return Status::Corruption("error from XADD, get stream meta failed: " + s.ToString()); + } + + if (stream_meta.length() == 0) { + if (args.no_mkstream) { + return Status::NotFound("no_mkstream"); + } + stream_meta.InitMetaValue(); + } + + if (stream_meta.last_id().ms == UINT64_MAX && stream_meta.last_id().seq == UINT64_MAX) { + return Status::Corruption("Fatal! Sequence number overflow !"); + } + + // 2 append the message to storage + s = GenerateStreamID(stream_meta, args); + if (!s.ok()) { + return s; + } + +#ifdef DEBUG + // check the serialized current id is larger than last_id + std::string serialized_last_id = stream_meta.last_id().Serialize(); + std::string current_id = args.id.Serialize(); + assert(current_id > serialized_last_id); +#endif + + StreamDataKey stream_data_key(key, stream_meta.version(), args.id.Serialize()); + s = db_->Put(default_write_options_, handles_[kStreamsDataCF], stream_data_key.Encode(), serialized_message); + if (!s.ok()) { + return Status::Corruption("error from XADD, insert stream message failed 1: " + s.ToString()); + } + + // 3 update stream meta + if (stream_meta.length() == 0) { + stream_meta.set_first_id(args.id); + } + stream_meta.set_entries_added(stream_meta.entries_added() + 1); + stream_meta.set_last_id(args.id); + stream_meta.set_length(stream_meta.length() + 1); + // 4 trim the stream if needed + if (args.trim_strategy != StreamTrimStrategy::TRIM_STRATEGY_NONE) { + int32_t count{0}; + s = TrimStream(count, stream_meta, key, args, default_read_options_); + if (!s.ok()) { + return Status::Corruption("error from XADD, trim stream failed: " + s.ToString()); + } + (void)count; + } + + // 5 update stream meta + BaseMetaKey base_meta_key(key); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), stream_meta.value()); + if (!s.ok()) { + return s; + } + + return Status::OK(); +} + +Status Redis::XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count) { + + // 1 get stream meta + rocksdb::Status s; + StreamMetaValue stream_meta; + s = GetStreamMeta(stream_meta, key, default_read_options_); + if (!s.ok()) { + return s; + } + + // 2 do the trim + count = 0; + s = TrimStream(count, stream_meta, key, args, default_read_options_); + if (!s.ok()) { + return s; + } + + // 3 update stream meta + BaseMetaKey base_meta_key(key); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), stream_meta.value()); + if (!s.ok()) { + return s; + } + + return Status::OK(); +} + +Status Redis::XDel(const Slice& key, const std::vector& ids, int32_t& count) { + + // 1 try to get stream meta + StreamMetaValue stream_meta; + auto s = GetStreamMeta(stream_meta, key, default_read_options_); + if (!s.ok()) { + return s; + } + + // 2 do the delete + if (ids.size() > INT32_MAX) { + return Status::InvalidArgument("Too many IDs specified"); + } + count = static_cast(ids.size()); + std::string unused; + for (auto id : ids) { + StreamDataKey stream_data_key(key, stream_meta.version(), id.Serialize()); + s = db_->Get(default_read_options_, handles_[kStreamsDataCF], stream_data_key.Encode(), &unused); + if (s.IsNotFound()) { + --count; + continue; + } else if (!s.ok()) { + return s; + } + } + s = DeleteStreamMessages(key, stream_meta, ids, default_read_options_); + if (!s.ok()) { + return s; + } + + // 3 update stream meta + stream_meta.set_length(stream_meta.length() - count); + for (const auto& id : ids) { + if (id > stream_meta.max_deleted_entry_id()) { + stream_meta.set_max_deleted_entry_id(id); + } + if (id == stream_meta.first_id()) { + s = SetFirstID(key, stream_meta, default_read_options_); + } else if (id == stream_meta.last_id()) { + s = SetLastID(key, stream_meta, default_read_options_); + } + if (!s.ok()) { + return s; + } + } + + return db_->Put(default_write_options_, handles_[kMetaCF], BaseMetaKey(key).Encode(), stream_meta.value()); +} + +Status Redis::XRange(const Slice& key, const StreamScanArgs& args, std::vector& field_values, std::string&& prefetch_meta) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + // 1 get stream meta + rocksdb::Status s; + StreamMetaValue stream_meta; + s = GetStreamMeta(stream_meta, key, read_options, std::move(prefetch_meta)); + if (!s.ok()) { + return s; + } + + // 2 do the scan + std::string next_field; + ScanStreamOptions options(key, stream_meta.version(), args.start_sid, args.end_sid, args.limit, args.start_ex, + args.end_ex, false); + s = ScanStream(options, field_values, next_field, read_options); + (void)next_field; + + return s; +} + +Status Redis::XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& field_values) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + // 1 get stream meta + rocksdb::Status s; + StreamMetaValue stream_meta; + s = GetStreamMeta(stream_meta, key, read_options); + if (!s.ok()) { + return s; + } + + // 2 do the scan + std::string next_field; + ScanStreamOptions options(key, stream_meta.version(), args.start_sid, args.end_sid, args.limit, args.start_ex, + args.end_ex, true); + s = ScanStream(options, field_values, next_field, read_options); + (void)next_field; + + return s; +} + +Status Redis::XLen(const Slice& key, int32_t& len) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + // 1 get stream meta + rocksdb::Status s; + StreamMetaValue stream_meta; + s = GetStreamMeta(stream_meta, key, read_options); + if (!s.ok()) { + return s; + } + + len = stream_meta.length(); + return Status::OK(); +} + +Status Redis::XRead(const StreamReadGroupReadArgs& args, std::vector>& results, + std::vector& reserved_keys) { + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + // 1 prepare stream_metas + rocksdb::Status s; + std::vector> streammeta_idx; + for (int i = 0; i < args.unparsed_ids.size(); i++) { + const auto& key = args.keys[i]; + + StreamMetaValue stream_meta; + auto s = GetStreamMeta(stream_meta, key, read_options); + if (s.IsNotFound()) { + continue; + } else if (!s.ok()) { + return s; + } + + streammeta_idx.emplace_back(std::move(stream_meta), i); + } + + if (streammeta_idx.empty()) { + return Status::OK(); + } + + // 2 do the scan + for (const auto& stream_meta_id : streammeta_idx) { + const auto& stream_meta = stream_meta_id.first; + const auto& idx = stream_meta_id.second; + const auto& unparsed_id = args.unparsed_ids[idx]; + const auto& key = args.keys[idx]; + + // 2.1 try to parse id + storage::streamID id; + if (unparsed_id == "<") { + return Status::Corruption( + "The > ID can be specified only when calling " + "XREADGROUP using the GROUP " + " option."); + } else if (unparsed_id == "$") { + id = stream_meta.last_id(); + } else { + if (!storage::StreamUtils::StreamParseStrictID(unparsed_id, id, 0, nullptr)) { + return Status::Corruption("Invalid stream ID specified as stream "); + } + } + + // 2.2 scan + std::vector field_values; + std::string next_field; + ScanStreamOptions options(key, stream_meta.version(), id, storage::kSTREAMID_MAX, args.count, true); + auto s = ScanStream(options, field_values, next_field, read_options); + (void)next_field; + if (!s.ok() && !s.IsNotFound()) { + return s; + } + results.emplace_back(std::move(field_values)); + reserved_keys.emplace_back(args.keys[idx]); + } + + return Status::OK(); +} + +Status Redis::XInfo(const Slice& key, StreamInfoResult& result) { + // 1 get stream meta + rocksdb::Status s; + StreamMetaValue stream_meta; + s = GetStreamMeta(stream_meta, key, default_read_options_); + if (!s.ok()) { + return s; + } + + // 2 fill the result + result.length = stream_meta.length(); + result.last_id_str = stream_meta.last_id().ToString(); + result.max_deleted_entry_id_str = stream_meta.max_deleted_entry_id().ToString(); + result.entries_added = stream_meta.entries_added(); + result.first_id_str = stream_meta.first_id().ToString(); + + return Status::OK(); +} + +Status Redis::ScanStreamsKeyNum(KeyInfo* key_info) { + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t ttl_sum = 0; + uint64_t invaild_keys = 0; + + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + if (!ExpectedMetaValue(DataType::kStreams, iter->value().ToString())) { + continue; + } + ParsedStreamMetaValue parsed_stream_meta_value(iter->value()); + if (parsed_stream_meta_value.length() == 0) { + invaild_keys++; + } else { + keys++; + } + } + delete iter; + + key_info->keys = keys; + key_info->invaild_keys = invaild_keys; + return Status::OK(); +} + +Status Redis::StreamsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + BaseMetaKey base_meta_key(key); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStreams, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kStreams)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + StreamMetaValue stream_meta_value; + stream_meta_value.ParseFrom(meta_value); + if (stream_meta_value.length() == 0) { + return Status::NotFound(); + } else { + uint32_t statistic = stream_meta_value.length(); + stream_meta_value.InitMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), stream_meta_value.value()); + UpdateSpecificKeyStatistics(DataType::kStreams, key.ToString(), statistic); + } + } + return s; +} + +Status Redis::GetStreamMeta(StreamMetaValue& stream_meta, const rocksdb::Slice& key, + rocksdb::ReadOptions& read_options, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); + BaseMetaKey base_meta_key(key); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStreams, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kStreams)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + } + if (s.ok()) { + stream_meta.ParseFrom(value); + return Status::OK(); + } + return s; +} + +Status Redis::TrimStream(int32_t& count, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options) { + count = 0; + // 1 do the trim + TrimRet trim_ret; + Status s; + if (args.trim_strategy == StreamTrimStrategy::TRIM_STRATEGY_MAXLEN) { + s = TrimByMaxlen(trim_ret, stream_meta, key, args, read_options); + } else { + assert(args.trim_strategy == StreamTrimStrategy::TRIM_STRATEGY_MINID); + s = TrimByMinid(trim_ret, stream_meta, key, args, read_options); + } + + if (!s.ok()) { + return s; + } + + if (trim_ret.count == 0) { + return s; + } + + // 2 update stream meta + streamID first_id; + streamID max_deleted_id; + if (stream_meta.length() == trim_ret.count) { + // all the message in the stream were deleted + first_id = kSTREAMID_MIN; + } else { + first_id.DeserializeFrom(trim_ret.next_field); + } + assert(!trim_ret.max_deleted_field.empty()); + max_deleted_id.DeserializeFrom(trim_ret.max_deleted_field); + + stream_meta.set_first_id(first_id); + if (max_deleted_id > stream_meta.max_deleted_entry_id()) { + stream_meta.set_max_deleted_entry_id(max_deleted_id); + } + stream_meta.set_length(stream_meta.length() - trim_ret.count); + + count = trim_ret.count; + return Status::OK(); +} + +Status Redis::ScanStream(const ScanStreamOptions& op, std::vector& field_values, + std::string& next_field, rocksdb::ReadOptions& read_options) { + std::string start_field; + std::string end_field; + Slice pattern = "*"; // match all the fields from start_field to end_field + Status s; + + // 1 do the scan + if (op.is_reverse) { + start_field = op.end_sid.Serialize(); + if (op.start_sid == kSTREAMID_MAX) { + start_field = ""; + } else { + start_field = op.start_sid.Serialize(); + } + s = StreamReScanRange(op.key, op.version, start_field, end_field, pattern, op.limit, field_values, next_field, + read_options); + } else { + start_field = op.start_sid.Serialize(); + if (op.end_sid == kSTREAMID_MAX) { + end_field = ""; + } else { + end_field = op.end_sid.Serialize(); + } + s = StreamScanRange(op.key, op.version, start_field, end_field, pattern, op.limit, field_values, next_field, + read_options); + } + + // 2 exclude the start_sid and end_sid if needed + if (op.start_ex && !field_values.empty()) { + streamID sid; + sid.DeserializeFrom(field_values.front().field); + if (sid == op.start_sid) { + field_values.erase(field_values.begin()); + } + } + + if (op.end_ex && !field_values.empty()) { + streamID sid; + sid.DeserializeFrom(field_values.back().field); + if (sid == op.end_sid) { + field_values.pop_back(); + } + } + + return s; +} + +Status Redis::GenerateStreamID(const StreamMetaValue& stream_meta, StreamAddTrimArgs& args) { + auto& id = args.id; + if (args.id_given && args.seq_given && id.ms == 0 && id.seq == 0) { + return Status::InvalidArgument("The ID specified in XADD must be greater than 0-0"); + } + + if (!args.id_given || !args.seq_given) { + // if id not given, generate one + if (!args.id_given) { + id.ms = StreamUtils::GetCurrentTimeMs(); + + if (id.ms < stream_meta.last_id().ms) { + id.ms = stream_meta.last_id().ms; + if (stream_meta.last_id().seq == UINT64_MAX) { + id.ms++; + id.seq = 0; + } else { + id.seq++; + } + return Status::OK(); + } + } + + // generate seq + auto last_id = stream_meta.last_id(); + if (id.ms < last_id.ms) { + return Status::InvalidArgument("The ID specified in XADD is equal or smaller"); + } else if (id.ms == last_id.ms) { + if (last_id.seq == UINT64_MAX) { + return Status::InvalidArgument("The ID specified in XADD is equal or smaller"); + } + id.seq = last_id.seq + 1; + } else { + id.seq = 0; + } + + } else { + // Full ID given, check id + auto last_id = stream_meta.last_id(); + if (id.ms < last_id.ms || (id.ms == last_id.ms && id.seq <= last_id.seq)) { + return Status::InvalidArgument("INVALID ID given"); + } + } + return Status::OK(); +} + +Status Redis::TrimByMaxlen(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options) { + Status s; + // we delete the message in batchs, prevent from using too much memory + while (stream_meta.length() - trim_ret.count > args.maxlen) { + auto cur_batch = + (std::min(static_cast(stream_meta.length() - trim_ret.count - args.maxlen), kDEFAULT_TRIM_BATCH_SIZE)); + std::vector id_messages; + + ScanStreamOptions options(key, stream_meta.version(), stream_meta.first_id(), kSTREAMID_MAX, + cur_batch, false, false, false); + s = ScanStream(options, id_messages, trim_ret.next_field, read_options); + if (!s.ok()) { + assert(!s.IsNotFound()); + return s; + } + + assert(id_messages.size() == cur_batch); + trim_ret.count += cur_batch; + trim_ret.max_deleted_field = id_messages.back().field; + + // delete the message in batchs + std::vector ids_to_del; + ids_to_del.reserve(id_messages.size()); + for (auto& fv : id_messages) { + ids_to_del.emplace_back(std::move(fv.field)); + } + s = DeleteStreamMessages(key, stream_meta, ids_to_del, read_options); + if (!s.ok()) { + return s; + } + } + + s = Status::OK(); + return s; +} + +Status Redis::TrimByMinid(TrimRet& trim_ret, StreamMetaValue& stream_meta, const rocksdb::Slice& key, + const StreamAddTrimArgs& args, rocksdb::ReadOptions& read_options) { + Status s; + std::string serialized_min_id; + trim_ret.next_field = stream_meta.first_id().Serialize(); + serialized_min_id = args.minid.Serialize(); + + // we delete the message in batchs, prevent from using too much memory + while (trim_ret.next_field < serialized_min_id && stream_meta.length() - trim_ret.count > 0) { + auto cur_batch = static_cast( + std::min(static_cast(stream_meta.length() - trim_ret.count), kDEFAULT_TRIM_BATCH_SIZE)); + std::vector id_messages; + + ScanStreamOptions options(key, stream_meta.version(), stream_meta.first_id(), args.minid, cur_batch, + false, false, false); + s = ScanStream(options, id_messages, trim_ret.next_field, read_options); + if (!s.ok()) { + assert(!s.IsNotFound()); + return s; + } + + if (!id_messages.empty()) { + if (id_messages.back().field == serialized_min_id) { + // we do not need to delete the message that it's id matches the minid + id_messages.pop_back(); + trim_ret.next_field = serialized_min_id; + } + // duble check + if (!id_messages.empty()) { + trim_ret.max_deleted_field = id_messages.back().field; + } + } + + assert(id_messages.size() <= cur_batch); + trim_ret.count += static_cast(id_messages.size()); + + // do the delete in batch + std::vector fields_to_del; + fields_to_del.reserve(id_messages.size()); + for (auto& fv : id_messages) { + fields_to_del.emplace_back(std::move(fv.field)); + } + + s = DeleteStreamMessages(key, stream_meta, fields_to_del, read_options); + if (!s.ok()) { + return s; + } + } + + s = Status::OK(); + return s; +} + +Status Redis::StreamScanRange(const Slice& key, const uint64_t version, const Slice& id_start, + const std::string& id_end, const Slice& pattern, int32_t limit, + std::vector& id_messages, std::string& next_id, + rocksdb::ReadOptions& read_options) { + next_id.clear(); + id_messages.clear(); + + auto remain = limit; + std::string meta_value; + + bool start_no_limit = id_start.compare("") == 0; + bool end_no_limit = id_end.empty(); + + if (!start_no_limit && !end_no_limit && (id_start.compare(id_end) > 0)) { + return Status::InvalidArgument("error in given range"); + } + + StreamDataKey streams_data_prefix(key, version, Slice()); + StreamDataKey streams_start_data_key(key, version, id_start); + std::string prefix = streams_data_prefix.EncodeSeekKey().ToString(); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kStreamsDataCF]); + for (iter->Seek(start_no_limit ? prefix : streams_start_data_key.Encode()); + iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Next()) { + ParsedStreamDataKey parsed_streams_data_key(iter->key()); + std::string id = parsed_streams_data_key.id().ToString(); + if (!end_no_limit && id.compare(id_end) > 0) { + break; + } + if (StringMatch(pattern.data(), pattern.size(), id.data(), id.size(), 0) != 0) { + id_messages.push_back({id, iter->value().ToString()}); + } + remain--; + } + + if (iter->Valid() && iter->key().starts_with(prefix)) { + ParsedStreamDataKey parsed_streams_data_key(iter->key()); + if (end_no_limit || parsed_streams_data_key.id().compare(id_end) <= 0) { + next_id = parsed_streams_data_key.id().ToString(); + } + } + delete iter; + + return Status::OK(); +} + +Status Redis::StreamReScanRange(const Slice& key, const uint64_t version, const Slice& id_start, + const std::string& id_end, const Slice& pattern, int32_t limit, + std::vector& id_messages, std::string& next_id, + rocksdb::ReadOptions& read_options) { + next_id.clear(); + id_messages.clear(); + + auto remain = limit; + std::string meta_value; + + bool start_no_limit = id_start.compare("") == 0; + bool end_no_limit = id_end.empty(); + + if (!start_no_limit && !end_no_limit && (id_start.compare(id_end) < 0)) { + return Status::InvalidArgument("error in given range"); + } + + uint64_t start_key_version = start_no_limit ? version + 1 : version; + std::string start_key_id = start_no_limit ? "" : id_start.ToString(); + StreamDataKey streams_data_prefix(key, version, Slice()); + StreamDataKey streams_start_data_key(key, start_key_version, start_key_id); + std::string prefix = streams_data_prefix.EncodeSeekKey().ToString(); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kStreamsDataCF]); + for (iter->SeekForPrev(streams_start_data_key.Encode().ToString()); + iter->Valid() && remain > 0 && iter->key().starts_with(prefix); iter->Prev()) { + ParsedStreamDataKey parsed_streams_data_key(iter->key()); + std::string id = parsed_streams_data_key.id().ToString(); + if (!end_no_limit && id.compare(id_end) < 0) { + break; + } + if (StringMatch(pattern.data(), pattern.size(), id.data(), id.size(), 0) != 0) { + id_messages.push_back({id, iter->value().ToString()}); + } + remain--; + } + + if (iter->Valid() && iter->key().starts_with(prefix)) { + ParsedStreamDataKey parsed_streams_data_key(iter->key()); + if (end_no_limit || parsed_streams_data_key.id().compare(id_end) >= 0) { + next_id = parsed_streams_data_key.id().ToString(); + } + } + delete iter; + + return Status::OK(); +} + +Status Redis::DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& ids, rocksdb::ReadOptions& read_options) { + std::vector serialized_ids; + serialized_ids.reserve(ids.size()); + for (const auto& id : ids) { + serialized_ids.emplace_back(id.Serialize()); + } + return DeleteStreamMessages(key, stream_meta, serialized_ids, read_options); +} + +Status Redis::DeleteStreamMessages(const rocksdb::Slice& key, const StreamMetaValue& stream_meta, + const std::vector& serialized_ids, + rocksdb::ReadOptions& read_options) { + rocksdb::WriteBatch batch; + for (auto& sid : serialized_ids) { + StreamDataKey stream_data_key(key, stream_meta.version(), sid); + batch.Delete(handles_[kStreamsDataCF], stream_data_key.Encode()); + } + return db_->Write(default_write_options_, &batch); +} + +inline Status Redis::SetFirstID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, + rocksdb::ReadOptions& read_options) { + return SetFirstOrLastID(key, stream_meta, true, read_options); +} + +inline Status Redis::SetLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, + rocksdb::ReadOptions& read_options) { + return SetFirstOrLastID(key, stream_meta, false, read_options); +} + +inline Status Redis::SetFirstOrLastID(const rocksdb::Slice& key, StreamMetaValue& stream_meta, bool is_set_first, + rocksdb::ReadOptions& read_options) { + if (stream_meta.length() == 0) { + stream_meta.set_first_id(kSTREAMID_MIN); + return Status::OK(); + } + + std::vector id_messages; + std::string next_field; + + storage::Status s; + if (is_set_first) { + ScanStreamOptions option(key, stream_meta.version(), kSTREAMID_MIN, kSTREAMID_MAX, 1); + s = ScanStream(option, id_messages, next_field, read_options); + } else { + bool is_reverse = true; + ScanStreamOptions option(key, stream_meta.version(), kSTREAMID_MAX, kSTREAMID_MIN, 1, false, false, is_reverse); + s = ScanStream(option, id_messages, next_field, read_options); + } + (void)next_field; + + if (!s.ok() && !s.IsNotFound()) { + LOG(ERROR) << "Internal error: scan stream failed: " << s.ToString(); + return Status::Corruption("Internal error: scan stream failed: " + s.ToString()); + } + + if (id_messages.empty()) { + LOG(ERROR) << "Internal error: no messages found but stream length is not 0"; + return Status::Corruption("Internal error: no messages found but stream length is not 0"); + } + + streamID id; + id.DeserializeFrom(id_messages[0].field); + stream_meta.set_first_id(id); + return Status::OK(); +} + +bool StreamUtils::StreamGenericParseID(const std::string& var, streamID& id, uint64_t missing_seq, bool strict, + bool* seq_given) { + char buf[128]; + if (var.size() > sizeof(buf) - 1) { + return false; + } + + memcpy(buf, var.data(), var.size()); + buf[var.size()] = '\0'; + + if (strict && (buf[0] == '-' || buf[0] == '+') && buf[1] == '\0') { + // res.SetRes(CmdRes::kInvalidParameter, "Invalid stream ID specified as stream "); + return false; + } + + if (seq_given != nullptr) { + *seq_given = true; + } + + if (buf[0] == '-' && buf[1] == '\0') { + id.ms = 0; + id.seq = 0; + return true; + } else if (buf[0] == '+' && buf[1] == '\0') { + id.ms = UINT64_MAX; + id.seq = UINT64_MAX; + return true; + } + + uint64_t ms; + uint64_t seq; + char* dot = strchr(buf, '-'); + if (dot) { + *dot = '\0'; + } + if (!StreamUtils::string2uint64(buf, ms)) { + return false; + }; + if (dot) { + auto seqlen = strlen(dot + 1); + if (seq_given != nullptr && seqlen == 1 && *(dot + 1) == '*') { + seq = 0; + *seq_given = false; + } else if (!StreamUtils::string2uint64(dot + 1, seq)) { + return false; + } + } else { + seq = missing_seq; + } + id.ms = ms; + id.seq = seq; + return true; +} + +bool StreamUtils::StreamParseID(const std::string& var, streamID& id, uint64_t missing_seq) { + return StreamGenericParseID(var, id, missing_seq, false, nullptr); +} + +bool StreamUtils::StreamParseStrictID(const std::string& var, streamID& id, uint64_t missing_seq, bool* seq_given) { + return StreamGenericParseID(var, id, missing_seq, true, seq_given); +} + +bool StreamUtils::StreamParseIntervalId(const std::string& var, streamID& id, bool* exclude, uint64_t missing_seq) { + if (exclude != nullptr) { + *exclude = (var.size() > 1 && var[0] == '('); + } + if (exclude != nullptr && *exclude) { + return StreamParseStrictID(var.substr(1), id, missing_seq, nullptr); + } else { + return StreamParseID(var, id, missing_seq); + } +} + +bool StreamUtils::string2uint64(const char* s, uint64_t& value) { + if (!s || !*s) { + return false; + } + + char* end; + errno = 0; + uint64_t tmp = strtoull(s, &end, 10); + if (*end || errno == ERANGE) { + // Conversion either didn't consume the entire string, or overflow occurred + return false; + } + + value = tmp; + return true; +} + +bool StreamUtils::string2int64(const char* s, int64_t& value) { + if (!s || !*s) { + return false; + } + + char* end; + errno = 0; + int64_t tmp = std::strtoll(s, &end, 10); + if (*end || errno == ERANGE) { + // Conversion either didn't consume the entire string, or overflow occurred + return false; + } + + value = tmp; + return true; +} + +bool StreamUtils::string2int32(const char* s, int32_t& value) { + if (!s || !*s) { + return false; + } + + char* end; + errno = 0; + long tmp = strtol(s, &end, 10); + if (*end || errno == ERANGE || tmp < INT_MIN || tmp > INT_MAX) { + // Conversion either didn't consume the entire string, + // or overflow or underflow occurred + return false; + } + + value = static_cast(tmp); + return true; +} + +bool StreamUtils::SerializeMessage(const std::vector& field_values, std::string& message, int field_pos) { + assert(field_values.size() - field_pos >= 2 && (field_values.size() - field_pos) % 2 == 0); + assert(message.empty()); + // count the size of serizlized message + uint32_t size = 0; + for (int i = field_pos; i < field_values.size(); i++) { + size += field_values[i].size() + sizeof(uint32_t); + } + message.reserve(size); + + // serialize message + for (int i = field_pos; i < field_values.size(); i++) { + uint32_t len = field_values[i].size(); + message.append(reinterpret_cast(&len), sizeof(len)); + message.append(field_values[i]); + } + + return true; +} + +bool StreamUtils::DeserializeMessage(const std::string& message, std::vector& parsed_message) { + uint32_t pos = 0; + while (pos < message.size()) { + // Read the length of the next field value from the message + uint32_t len = *reinterpret_cast(&message[pos]); + pos += sizeof(uint32_t); + + // Check if the calculated end of the string is still within the message bounds + if (pos + len > message.size()) { + LOG(ERROR) << "Invalid message format, failed to parse message"; + return false; // Error: not enough data in the message string + } + + // Extract the field value and add it to the vector + parsed_message.push_back(message.substr(pos, len)); + pos += len; + } + + return true; +} + +uint64_t StreamUtils::GetCurrentTimeMs() { + return std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()) + .count(); +} +}; // namespace storage diff --git a/tools/pika_migrate/src/storage/src/redis_streams.h b/tools/pika_migrate/src/storage/src/redis_streams.h new file mode 100644 index 0000000000..848fe94900 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_streams.h @@ -0,0 +1,143 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include +#include +#include "pika_stream_meta_value.h" +#include "pika_stream_types.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" + +namespace storage { + +// the max number of each delete operation in XTRIM command,to avoid too much memory usage. +// eg. if a XTIRM command need to trim 10000 items, the implementation will use rocsDB's delete operation (10000 / +// kDEFAULT_TRIM_BATCH_SIZE) times +const static int32_t kDEFAULT_TRIM_BATCH_SIZE = 1000; +struct StreamAddTrimArgs { + // XADD options + streamID id; + bool id_given{false}; + bool seq_given{false}; + bool no_mkstream{false}; + + // XADD + XTRIM common options + StreamTrimStrategy trim_strategy{TRIM_STRATEGY_NONE}; + int trim_strategy_arg_idx{0}; + + // TRIM_STRATEGY_MAXLEN options + uint64_t maxlen{0}; + streamID minid; +}; + +struct StreamReadGroupReadArgs { + // XREAD + XREADGROUP common options + std::vector keys; + std::vector unparsed_ids; + int32_t count{INT32_MAX}; // The limit of read, in redis this is uint64_t, but PKHScanRange only support int32_t + uint64_t block{0}; // 0 means no block + + // XREADGROUP options + std::string group_name; + std::string consumer_name; + bool noack_{false}; +}; + +struct StreamScanArgs { + streamID start_sid; + streamID end_sid; + int32_t limit{INT32_MAX}; + bool start_ex{false}; // exclude first message + bool end_ex{false}; // exclude last message + bool is_reverse{false}; // scan in reverse order +}; + +struct StreamInfoResult { + int32_t length{0}; + std::string last_id_str; + std::string max_deleted_entry_id_str; + uint64_t entries_added{0}; + std::string first_id_str; +}; + +class StreamUtils { + public: + StreamUtils() = default; + ~StreamUtils() = default; + + static bool string2uint64(const char* s, uint64_t& value); + static bool string2int64(const char* s, int64_t& value); + static bool string2int32(const char* s, int32_t& value); + + static uint64_t GetCurrentTimeMs(); + + // serialize the message to a string. + // format: {field1.size, field1, value1.size, value1, field2.size, field2, ...} + static bool SerializeMessage(const std::vector& field_values, std::string& serialized_message, + int field_pos); + + // deserialize the message from a string with the format of SerializeMessage. + static bool DeserializeMessage(const std::string& message, std::vector& parsed_message); + + // Parse a stream ID in the format given by clients to Pika, that is + // -, and converts it into a streamID structure. The ID may be in incomplete + // form, just stating the milliseconds time part of the stream. In such a case + // the missing part is set according to the value of 'missing_seq' parameter. + // + // The IDs "-" and "+" specify respectively the minimum and maximum IDs + // that can be represented. If 'strict' is set to 1, "-" and "+" will be + // treated as an invalid ID. + // + // The ID form -* specifies a millisconds-only ID, leaving the sequence part + // to be autogenerated. When a non-NULL 'seq_given' argument is provided, this + // form is accepted and the argument is set to 0 unless the sequence part is + // specified. + static bool StreamGenericParseID(const std::string& var, streamID& id, uint64_t missing_seq, bool strict, + bool* seq_given); + + // Wrapper for streamGenericParseID() with 'strict' argument set to + // 0, to be used when - and + are acceptable IDs. + static bool StreamParseID(const std::string& var, streamID& id, uint64_t missing_seq); + + // Wrapper for streamGenericParseID() with 'strict' argument set to + // 1, to be used when we want to return an error if the special IDs + or - + // are provided. + static bool StreamParseStrictID(const std::string& var, streamID& id, uint64_t missing_seq, bool* seq_given); + + // Helper for parsing a stream ID that is a range query interval. When the + // exclude argument is NULL, streamParseID() is called and the interval + // is treated as close (inclusive). Otherwise, the exclude argument is set if + // the interval is open (the "(" prefix) and streamParseStrictID() is + // called in that case. + static bool StreamParseIntervalId(const std::string& var, streamID& id, bool* exclude, uint64_t missing_seq); +}; + +struct ScanStreamOptions { + const rocksdb::Slice key; // the key of the stream + uint64_t version; // the version of the stream + streamID start_sid; + streamID end_sid; + int32_t limit; + bool start_ex; // exclude first message + bool end_ex; // exclude last message + bool is_reverse; // scan in reverse order + ScanStreamOptions(const rocksdb::Slice skey, uint64_t version, streamID start_sid, streamID end_sid, int32_t count, + bool start_ex = false, bool end_ex = false, bool is_reverse = false) + : key(skey), + version(version), + start_sid(start_sid), + end_sid(end_sid), + limit(count), + start_ex(start_ex), + end_ex(end_ex), + is_reverse(is_reverse) {} +}; +} + diff --git a/tools/pika_migrate/src/storage/src/redis_strings.cc b/tools/pika_migrate/src/storage/src/redis_strings.cc new file mode 100644 index 0000000000..e402f175ca --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_strings.cc @@ -0,0 +1,1767 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include + +#include +#include + +#include "pstd/include/pika_codis_slot.h" +#include "src/base_key_format.h" +#include "src/scope_record_lock.h" +#include "src/scope_snapshot.h" +#include "src/strings_filter.h" +#include "src/redis.h" +#include "storage/util.h" + +namespace storage { +Status Redis::ScanStringsKeyNum(KeyInfo* key_info) { + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t ttl_sum = 0; + uint64_t invaild_keys = 0; + + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + + pstd::TimeType curtime = pstd::NowMillis(); + + // Note: This is a string type and does not need to pass the column family as + // a parameter, use the default column family + rocksdb::Iterator* iter = db_->NewIterator(iterator_options); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + if (!ExpectedMetaValue(DataType::kStrings, iter->value().ToString())) { + continue; + } + ParsedStringsValue parsed_strings_value(iter->value()); + if (parsed_strings_value.IsStale()) { + invaild_keys++; + } else { + keys++; + if (!parsed_strings_value.IsPermanentSurvival()) { + expires++; + ttl_sum += parsed_strings_value.Etime() - curtime; + } + } + } + delete iter; + + key_info->keys = keys; + key_info->expires = expires; + key_info->avg_ttl = (expires != 0) ? ttl_sum / expires : 0; + key_info->invaild_keys = invaild_keys; + return Status::OK(); +} + +Status Redis::Append(const Slice& key, const Slice& value, int32_t* ret, int64_t* expired_timestamp_millsec, std::string& out_new_value) { + std::string old_value; + *ret = 0; + *expired_timestamp_millsec = 0; + ScopeRecordLock l(lock_mgr_, key); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&old_value); + if (parsed_strings_value.IsStale()) { + *ret = static_cast(value.size()); + StringsValue strings_value(value); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else { + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); + std::string new_value = old_user_value + value.ToString(); + out_new_value = new_value; + StringsValue strings_value(new_value); + strings_value.SetEtime(timestamp); + *ret = static_cast(new_value.size()); + *expired_timestamp_millsec = timestamp; + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } + } else if (s.IsNotFound()) { + *ret = static_cast(value.size()); + StringsValue strings_value(value); + *expired_timestamp_millsec = 0; + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } + return s; +} + +int GetBitCount(const unsigned char* value, int64_t bytes) { + int bit_num = 0; + static const unsigned char bitsinbyte[256] = { + 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, + 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, + 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, + 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, + 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, + 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, + 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}; + for (int i = 0; i < bytes; i++) { + bit_num += bitsinbyte[static_cast(value[i])]; + } + return bit_num; +} + +Status Redis::BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, + bool have_range) { + *ret = 0; + std::string value; + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + const auto bit_value = reinterpret_cast(value.data()); + auto value_length = static_cast(value.length()); + if (have_range) { + if (start_offset < 0) { + start_offset = start_offset + value_length; + } + if (end_offset < 0) { + end_offset = end_offset + value_length; + } + if (start_offset < 0) { + start_offset = 0; + } + if (end_offset < 0) { + end_offset = 0; + } + + if (end_offset >= value_length) { + end_offset = value_length - 1; + } + if (start_offset > end_offset) { + return Status::OK(); + } + } else { + start_offset = 0; + end_offset = std::max(value_length - 1, static_cast(0)); + } + *ret = GetBitCount(bit_value + start_offset, end_offset - start_offset + 1); + } + } else { + return s; + } + return Status::OK(); +} + +std::string BitOpOperate(BitOpType op, const std::vector& src_values, int64_t max_len) { + char byte; + char output; + auto dest_value = std::make_unique(max_len); + for (int64_t j = 0; j < max_len; j++) { + if (j < static_cast(src_values[0].size())) { + output = src_values[0][j]; + } else { + output = 0; + } + if (op == kBitOpNot) { + output = static_cast(~output); + } + for (size_t i = 1; i < src_values.size(); i++) { + if (static_cast(src_values[i].size()) - 1 >= j) { + byte = src_values[i][j]; + } else { + byte = 0; + } + switch (op) { + case kBitOpNot: + break; + case kBitOpAnd: + output = static_cast(output & byte); + break; + case kBitOpOr: + output = static_cast(output | byte); + break; + case kBitOpXor: + output = static_cast(output ^ byte); + break; + case kBitOpDefault: + break; + } + } + dest_value[j] = output; + } + std::string dest_str(dest_value.get(), max_len); + return dest_str; +} + +Status Redis::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, std::string& value_to_dest, int64_t* ret) { + Status s; + if (op == kBitOpNot && src_keys.size() != 1) { + return Status::InvalidArgument("the number of source keys is not right"); + } else if (src_keys.empty()) { + return Status::InvalidArgument("the number of source keys is not right"); + } + + int64_t max_len = 0; + int64_t value_len = 0; + std::vector src_values; + for (const auto & src_key : src_keys) { + std::string value; + BaseKey base_key(src_key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + dest_key + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + src_values.emplace_back(""); + value_len = 0; + } else { + parsed_strings_value.StripSuffix(); + src_values.push_back(value); + value_len = static_cast(value.size()); + } + } else if (s.IsNotFound()) { + src_values.emplace_back(""); + value_len = 0; + } else { + return s; + } + max_len = std::max(max_len, value_len); + } + + std::string dest_value = BitOpOperate(op, src_values, max_len); + value_to_dest = dest_value; + *ret = static_cast(dest_value.size()); + + StringsValue strings_value(Slice(dest_value.c_str(), max_len)); + ScopeRecordLock l(lock_mgr_, dest_key); + BaseKey base_dest_key(dest_key); + return db_->Put(default_write_options_, base_dest_key.Encode(), strings_value.Encode()); +} + +Status Redis::Decrby(const Slice& key, int64_t value, int64_t* ret) { + std::string old_value; + std::string new_value; + ScopeRecordLock l(lock_mgr_, key); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&old_value); + if (parsed_strings_value.IsStale()) { + *ret = -value; + new_value = std::to_string(*ret); + StringsValue strings_value(new_value); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else { + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); + char* end = nullptr; + errno = 0; + int64_t ival = strtoll(old_user_value.c_str(), &end, 10); + if (errno == ERANGE || *end != 0) { + return Status::Corruption("Value is not a integer"); + } + if ((value >= 0 && LLONG_MIN + value > ival) || (value < 0 && LLONG_MAX + value < ival)) { + return Status::InvalidArgument("Overflow"); + } + *ret = ival - value; + new_value = std::to_string(*ret); + StringsValue strings_value(new_value); + strings_value.SetEtime(timestamp); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } + } else if (s.IsNotFound()) { + *ret = -value; + new_value = std::to_string(*ret); + StringsValue strings_value(new_value); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else { + return s; + } +} + +Status Redis::Get(const Slice& key, std::string* value) { + value->clear(); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(value); + if (parsed_strings_value.IsStale()) { + value->clear(); + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + } + } + return s; +} + +Status Redis::MGet(const Slice& key, std::string* value) { + value->clear(); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + return Status::NotFound(); + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(value); + if (parsed_strings_value.IsStale()) { + value->clear(); + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + } + } + return s; +} + +void ClearValueAndSetTTL(std::string* value, int64_t* ttl, int64_t ttl_value) { + value->clear(); + *ttl = ttl_value; +} + +int64_t CalculateTTL(int64_t expiry_time) { + pstd::TimeType current_time = pstd::NowMillis(); + return expiry_time - current_time >= 0 ? expiry_time - current_time : -2; +} + +Status HandleParsedStringsValue(ParsedStringsValue& parsed_strings_value, std::string* value, int64_t* ttl_millsec) { + if (parsed_strings_value.IsStale()) { + ClearValueAndSetTTL(value, ttl_millsec, -2); + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + int64_t expiry_time = parsed_strings_value.Etime(); + *ttl_millsec = (expiry_time == 0) ? -1 : CalculateTTL(expiry_time); + } + return Status::OK(); +} + +Status Redis::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec) { + value->clear(); + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + " get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + + if (s.ok()) { + ParsedStringsValue parsed_strings_value(value); + return HandleParsedStringsValue(parsed_strings_value, value, ttl_millsec); + } else if (s.IsNotFound()) { + ClearValueAndSetTTL(value, ttl_millsec, -2); + } + + return s; +} + +Status Redis::MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec) { + value->clear(); + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + s = Status::NotFound(); + } + + if (s.ok()) { + ParsedStringsValue parsed_strings_value(value); + return HandleParsedStringsValue(parsed_strings_value, value, ttl_millsec); + } else if (s.IsNotFound()) { + ClearValueAndSetTTL(value, ttl_millsec, -2); + } + + return s; +} + +Status Redis::GetBit(const Slice& key, int64_t offset, int32_t* ret) { + std::string meta_value; + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &meta_value); + if (s.ok() || s.IsNotFound()) { + std::string data_value; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&meta_value); + if (parsed_strings_value.IsStale()) { + *ret = 0; + return Status::OK(); + } else { + data_value = parsed_strings_value.UserValue().ToString(); + } + } + size_t byte = offset >> 3; + size_t bit = 7 - (offset & 0x7); + if (byte + 1 > data_value.length()) { + *ret = 0; + } else { + *ret = ((data_value[byte] & (1 << bit)) >> bit); + } + } else { + return s; + } + return Status::OK(); +} + +Status Redis::Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret) { + *ret = ""; + std::string value; + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + auto size = static_cast(value.size()); + int64_t start_t = start_offset >= 0 ? start_offset : size + start_offset; + int64_t end_t = end_offset >= 0 ? end_offset : size + end_offset; + if (start_t > size - 1 || (start_t != 0 && start_t > end_t) || (start_t != 0 && end_t < 0)) { + return Status::OK(); + } + if (start_t < 0) { + start_t = 0; + } + if (end_t >= size) { + end_t = size - 1; + } + if (start_t == 0 && end_t < 0) { + end_t = 0; + } + *ret = value.substr(start_t, end_t - start_t + 1); + return Status::OK(); + } + } else { + return s; + } +} + +Status Redis::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, + std::string* ret, std::string* value, int64_t* ttl_millsec) { + *ret = ""; + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), value); + std::string meta_value = *value; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(value); + if (parsed_strings_value.IsStale()) { + value->clear(); + *ttl_millsec = -2; + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + // get ttl + *ttl_millsec = parsed_strings_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + + int64_t size = value->size(); + int64_t start_t = start_offset >= 0 ? start_offset : size + start_offset; + int64_t end_t = end_offset >= 0 ? end_offset : size + end_offset; + if (start_t > size - 1 || + (start_t != 0 && start_t > end_t) || + (start_t != 0 && end_t < 0) + ) { + return Status::OK(); + } + if (start_t < 0) { + start_t = 0; + } + if (end_t >= size) { + end_t = size - 1; + } + if (start_t == 0 && end_t < 0) { + end_t = 0; + } + *ret = value->substr(start_t, end_t-start_t+1); + return Status::OK(); + } + } else if (s.IsNotFound()) { + value->clear(); + *ttl_millsec = -2; + } + return s; +} + +Status Redis::GetSet(const Slice& key, const Slice& value, std::string* old_value) { + ScopeRecordLock l(lock_mgr_, key); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), old_value); + std::string meta_value = *old_value; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(old_value); + if (parsed_strings_value.IsStale()) { + *old_value = ""; + } else { + parsed_strings_value.StripSuffix(); + } + } else if (!s.IsNotFound()) { + return s; + } + StringsValue strings_value(value); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); +} + +Status Redis::Incrby(const Slice& key, int64_t value, int64_t* ret, int64_t* expired_timestamp_millsec) { + std::string old_value; + std::string new_value; + ScopeRecordLock l(lock_mgr_, key); + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + char buf[32] = {0}; + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument("WRONGTYPE, key: " + key.ToString() + + ", expect type: " + DataTypeStrings[static_cast(DataType::kStrings)] + + ", get type: " + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&old_value); + if (parsed_strings_value.IsStale()) { + *ret = value; + Int64ToStr(buf, 32, value); + StringsValue strings_value(buf); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else { + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); + char* end = nullptr; + int64_t ival = strtoll(old_user_value.c_str(), &end, 10); + if (*end != 0) { + return Status::Corruption("Value is not a integer"); + } + if ((value >= 0 && LLONG_MAX - value < ival) || (value < 0 && LLONG_MIN - value > ival)) { + return Status::InvalidArgument("Overflow"); + } + *ret = ival + value; + new_value = std::to_string(*ret); + StringsValue strings_value(new_value); + strings_value.SetEtime(timestamp); + *expired_timestamp_millsec = timestamp; + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } + } else if (s.IsNotFound()) { + *ret = value; + Int64ToStr(buf, 32, value); + StringsValue strings_value(buf); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else { + return s; + } +} + +Status Redis::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret, int64_t* expired_timestamp_sec) { + std::string old_value; + std::string new_value; + *expired_timestamp_sec = 0; + long double long_double_by; + if (StrToLongDouble(value.data(), value.size(), &long_double_by) == -1) { + return Status::Corruption("Value is not a vaild float"); + } + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&old_value); + if (parsed_strings_value.IsStale()) { + LongDoubleToStr(long_double_by, &new_value); + *ret = new_value; + StringsValue strings_value(new_value); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else { + uint64_t timestamp = parsed_strings_value.Etime(); + std::string old_user_value = parsed_strings_value.UserValue().ToString(); + long double total; + long double old_number; + if (StrToLongDouble(old_user_value.data(), old_user_value.size(), &old_number) == -1) { + return Status::Corruption("Value is not a vaild float"); + } + total = old_number + long_double_by; + if (LongDoubleToStr(total, &new_value) == -1) { + return Status::InvalidArgument("Overflow"); + } + *ret = new_value; + StringsValue strings_value(new_value); + strings_value.SetEtime(timestamp); + *expired_timestamp_sec = timestamp; + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } + } else if (s.IsNotFound()) { + LongDoubleToStr(long_double_by, &new_value); + *ret = new_value; + StringsValue strings_value(new_value); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else { + return s; + } +} + +Status Redis::MSet(const std::vector& kvs) { + std::vector keys; + keys.reserve(kvs.size()); + for (const auto& kv : kvs) { + keys.push_back(kv.key); + } + + MultiScopeRecordLock ml(lock_mgr_, keys); + rocksdb::WriteBatch batch; + for (const auto& kv : kvs) { + BaseKey base_key(kv.key); + StringsValue strings_value(kv.value); + batch.Put(base_key.Encode(), strings_value.Encode()); + } + return db_->Write(default_write_options_, &batch); +} + +Status Redis::MSetnx(const std::vector& kvs, int32_t* ret) { + Status s; + bool exists = false; + *ret = 0; + std::string value; + for (const auto & kv : kvs) { + BaseKey base_key(kv.key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (s.ok() && !ExpectedStale(value)) { + exists = true; + break; + } + // when reaches here, either s is not found or s is ok but expired + } + if (!exists) { + s = MSet(kvs); + if (s.ok()) { + *ret = 1; + } + } + return s; +} + +Status Redis::Set(const Slice& key, const Slice& value) { + StringsValue strings_value(value); + ScopeRecordLock l(lock_mgr_, key); + + BaseKey base_key(key); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); +} + +Status Redis::Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec) { + bool not_found = true; + std::string old_value; + StringsValue strings_value(value); + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(old_value); + if (!parsed_strings_value.IsStale()) { + not_found = false; + } + } else if (!s.IsNotFound()) { + return s; + } + + if (not_found) { + *ret = 0; + return s; + } else { + *ret = 1; + if (ttl_millsec > 0) { + strings_value.SetRelativeTimeInMillsec(ttl_millsec); + } + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } +} + +Status Redis::SetBit(const Slice& key, int64_t offset, int32_t on, int32_t* ret) { + std::string meta_value; + if (offset < 0) { + return Status::InvalidArgument("offset < 0"); + } + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok() || s.IsNotFound()) { + std::string data_value; + uint64_t timestamp = 0; + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&meta_value); + if (!parsed_strings_value.IsStale()) { + data_value = parsed_strings_value.UserValue().ToString(); + timestamp = parsed_strings_value.Etime(); + } + } + size_t byte = offset >> 3; + size_t bit = 7 - (offset & 0x7); + char byte_val; + size_t value_lenth = data_value.length(); + if (byte + 1 > value_lenth) { + *ret = 0; + byte_val = 0; + } else { + *ret = ((data_value[byte] & (1 << bit)) >> bit); + byte_val = data_value[byte]; + } + if (*ret == on) { + return Status::OK(); + } + byte_val = static_cast(byte_val & (~(1 << bit))); + byte_val = static_cast(byte_val | ((on & 0x1) << bit)); + if (byte + 1 <= value_lenth) { + data_value.replace(byte, 1, &byte_val, 1); + } else { + data_value.append(byte + 1 - value_lenth - 1, 0); + data_value.append(1, byte_val); + } + StringsValue strings_value(data_value); + strings_value.SetEtime(timestamp); + return db_->Put(rocksdb::WriteOptions(), base_key.Encode(), strings_value.Encode()); + } else { + return s; + } +} + +Status Redis::Setex(const Slice& key, const Slice& value, int64_t ttl_millsec) { + if (ttl_millsec <= 0) { + return Status::InvalidArgument("invalid expire time"); + } + StringsValue strings_value(value); + auto s = strings_value.SetRelativeTimeInMillsec(ttl_millsec); + if (s != Status::OK()) { + return s; + } + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); +} + +Status Redis::Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec) { + *ret = 0; + std::string old_value; + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (s.ok() && !ExpectedStale(old_value)) { + return s; + } + // when reaches here, either s is not found or s is ok but expired + s = Status::NotFound(); + + StringsValue strings_value(value); + if (ttl_millsec > 0) { + strings_value.SetRelativeTimeInMillsec(ttl_millsec); + } + s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + if (s.ok()) { + *ret = 1; + } + return s; +} + +Status Redis::Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, + int64_t ttl_millsec) { + *ret = 0; + std::string old_value; + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&old_value); + if (parsed_strings_value.IsStale()) { + *ret = 0; + } else { + if (value.compare(parsed_strings_value.UserValue()) == 0) { + StringsValue strings_value(new_value); + if (ttl_millsec > 0) { + strings_value.SetRelativeTimeInMillsec(ttl_millsec); + } + s = db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + if (!s.ok()) { + return s; + } + *ret = 1; + } else { + *ret = -1; + } + } + } else if (s.IsNotFound()) { + *ret = 0; + } else { + return s; + } + return Status::OK(); +} + +Status Redis::Delvx(const Slice& key, const Slice& value, int32_t* ret) { + *ret = 0; + std::string old_value; + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&old_value); + if (parsed_strings_value.IsStale()) { + *ret = 0; + return Status::NotFound("Stale"); + } else { + if (value.compare(parsed_strings_value.UserValue()) == 0) { + *ret = 1; + return db_->Delete(default_write_options_, base_key.Encode()); + } else { + *ret = -1; + } + } + } else if (s.IsNotFound()) { + *ret = 0; + } + return s; +} + +Status Redis::Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret) { + std::string old_value; + std::string new_value; + if (start_offset < 0) { + return Status::InvalidArgument("offset < 0"); + } + ScopeRecordLock l(lock_mgr_, key); + + BaseKey base_key(key); + Status s = db_->Get(default_read_options_, base_key.Encode(), &old_value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, old_value)) { + if (ExpectedStale(old_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(old_value))]); + } + } + if (s.ok()) { + uint64_t timestamp = 0; + ParsedStringsValue parsed_strings_value(&old_value); + parsed_strings_value.StripSuffix(); + if (parsed_strings_value.IsStale()) { + std::string tmp(start_offset, '\0'); + new_value = tmp.append(value.data()); + *ret = static_cast(new_value.length()); + } else { + timestamp = parsed_strings_value.Etime(); + if (static_cast(start_offset) > old_value.length()) { + old_value.resize(start_offset); + new_value = old_value.append(value.data()); + } else { + std::string head = old_value.substr(0, start_offset); + std::string tail; + if ((start_offset + value.size()) < old_value.length()) { + tail = old_value.substr(start_offset + value.size()); + } + new_value = head + value.data() + tail; + } + } + *ret = static_cast(new_value.length()); + StringsValue strings_value(new_value); + strings_value.SetEtime(timestamp); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } else if (s.IsNotFound()) { + std::string tmp(start_offset, '\0'); + new_value = tmp.append(value.data()); + *ret = static_cast(new_value.length()); + StringsValue strings_value(new_value); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); + } + return s; +} + +Status Redis::Strlen(const Slice& key, int32_t* len) { + std::string value; + Status s = Get(key, &value); + if (s.ok()) { + *len = static_cast(value.size()); + } else { + *len = 0; + } + return s; +} + +int32_t GetBitPos(const unsigned char* s, unsigned int bytes, int bit) { + uint64_t word = 0; + uint64_t skip_val = 0; + auto value = const_cast(s); + auto l = reinterpret_cast(value); + int pos = 0; + if (bit == 0) { + skip_val = std::numeric_limits::max(); + } else { + skip_val = 0; + } + // skip 8 bytes at one time, find the first int64 that should not be skipped + while (bytes >= sizeof(*l)) { + if (*l != skip_val) { + break; + } + l++; + bytes = bytes - sizeof(*l); + pos += static_cast(8 * sizeof(*l)); + } + auto c = reinterpret_cast(l); + for (size_t j = 0; j < sizeof(*l); j++) { + word = word << 8; + if (bytes != 0U) { + word = word | *c; + c++; + bytes--; + } + } + if (bit == 1 && word == 0) { + return -1; + } + // set each bit of mask to 0 except msb + uint64_t mask = std::numeric_limits::max(); + mask = mask >> 1; + mask = ~(mask); + while (mask != 0U) { + if (static_cast((word & mask) != 0) == bit) { + return pos; + } + pos++; + mask = mask >> 1; + } + return pos; +} + +Status Redis::BitPos(const Slice& key, int32_t bit, int64_t* ret) { + Status s; + std::string value; + + BaseKey base_key(key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + if (bit == 1) { + *ret = -1; + } else if (bit == 0) { + *ret = 0; + } + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + const auto bit_value = reinterpret_cast(value.data()); + auto value_length = static_cast(value.length()); + int64_t start_offset = 0; + int64_t end_offset = std::max(value_length - 1, static_cast(0)); + int64_t bytes = end_offset - start_offset + 1; + int64_t pos = GetBitPos(bit_value + start_offset, bytes, bit); + if (pos == (8 * bytes) && bit == 0) { + pos = -1; + } + if (pos != -1) { + pos = pos + 8 * start_offset; + } + *ret = pos; + } + } else { + return s; + } + return Status::OK(); +} + +Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret) { + Status s; + std::string value; + + BaseKey base_key(key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + if (bit == 1) { + *ret = -1; + } else if (bit == 0) { + *ret = 0; + } + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + const auto bit_value = reinterpret_cast(value.data()); + auto value_length = static_cast(value.length()); + int64_t end_offset = std::max(value_length - 1, static_cast(0)); + if (start_offset < 0) { + start_offset = start_offset + value_length; + } + if (start_offset < 0) { + start_offset = 0; + } + if (start_offset > end_offset) { + *ret = -1; + return Status::OK(); + } + if (start_offset > value_length - 1) { + *ret = -1; + return Status::OK(); + } + int64_t bytes = end_offset - start_offset + 1; + int64_t pos = GetBitPos(bit_value + start_offset, bytes, bit); + if (pos == (8 * bytes) && bit == 0) { + pos = -1; + } + if (pos != -1) { + pos = pos + 8 * start_offset; + } + *ret = pos; + } + } else { + return s; + } + return Status::OK(); +} + +Status Redis::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret) { + Status s; + std::string value; + + BaseKey base_key(key); + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + if (bit == 1) { + *ret = -1; + } else if (bit == 0) { + *ret = 0; + } + return Status::NotFound("Stale"); + } else { + parsed_strings_value.StripSuffix(); + const auto bit_value = reinterpret_cast(value.data()); + auto value_length = static_cast(value.length()); + if (start_offset < 0) { + start_offset = start_offset + value_length; + } + if (start_offset < 0) { + start_offset = 0; + } + if (end_offset < 0) { + end_offset = end_offset + value_length; + } + // converting to int64_t just avoid warning + if (end_offset > static_cast(value.length()) - 1) { + end_offset = value_length - 1; + } + if (end_offset < 0) { + end_offset = 0; + } + if (start_offset > end_offset) { + *ret = -1; + return Status::OK(); + } + if (start_offset > value_length - 1) { + *ret = -1; + return Status::OK(); + } + int64_t bytes = end_offset - start_offset + 1; + int64_t pos = GetBitPos(bit_value + start_offset, bytes, bit); + if (pos == (8 * bytes) && bit == 0) { + pos = -1; + } + if (pos != -1) { + pos = pos + 8 * start_offset; + } + *ret = pos; + } + } else { + return s; + } + return Status::OK(); +} + +//TODO(wangshaoyi): timestamp uint64_t +Status Redis::PKSetexAt(const Slice& key, const Slice& value, int64_t time_stamp_millsec_) { + StringsValue strings_value(value); + if (time_stamp_millsec_ < 0) { + time_stamp_millsec_ = pstd::NowMillis() - 1; + } + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + strings_value.SetEtime(uint64_t(time_stamp_millsec_)); + return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); +} + +Status Redis::StringsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); + + BaseKey base_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s; + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + return Status::NotFound("Stale"); + } + if (ttl_millsec > 0) { + parsed_strings_value.SetRelativeTimestamp(ttl_millsec); + return db_->Put(default_write_options_, base_key.Encode(), value); + } else { + return db_->Delete(default_write_options_, base_key.Encode()); + } + } + return s; +} + +Status Redis::StringsDel(const Slice& key, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseKey base_key(key); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + return Status::NotFound("Stale"); + } + return db_->Delete(default_write_options_, base_key.Encode()); + } + return s; +} + +Status Redis::StringsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseKey base_key(key); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + Status s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + if (timestamp_millsec > 0) { + parsed_strings_value.SetEtime(static_cast(timestamp_millsec)); + return db_->Put(default_write_options_, base_key.Encode(), value); + } else { + return db_->Delete(default_write_options_, base_key.Encode()); + } + } + } + return s; +} + +Status Redis::StringsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseKey base_key(key); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + uint64_t timestamp = parsed_strings_value.Etime(); + if (timestamp == 0) { + return Status::NotFound("Not have an associated timeout"); + } else { + parsed_strings_value.SetEtime(0); + return db_->Put(default_write_options_, base_key.Encode(), value); + } + } + } + return s; +} + +Status Redis::StringsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta) { + std::string value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseKey base_key(key); + Status s; + + // value is empty means no meta value get before, + // we should get meta first + if (value.empty()) { + s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (s.ok() && !ExpectedMetaValue(DataType::kStrings, value)) { + if (ExpectedStale(value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expect type: " + + DataTypeStrings[static_cast(DataType::kStrings)] + ", get type: " + + DataTypeStrings[static_cast(GetMetaValueType(value))]); + } + } + } + if (s.ok()) { + ParsedStringsValue parsed_strings_value(&value); + if (parsed_strings_value.IsStale()) { + *ttl_millsec = -2; + return Status::NotFound("Stale"); + } else { + *ttl_millsec = parsed_strings_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + } + } else if (s.IsNotFound()) { + *ttl_millsec = -2; + } + return s; +} + +void Redis::ScanStrings() { + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + auto current_time = static_cast(time(nullptr)); + + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " " << "String Data***************"; + auto iter = db_->NewIterator(iterator_options); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + if (!ExpectedMetaValue(DataType::kStrings, iter->value().ToString())) { + continue; + } + ParsedBaseKey parsed_strings_key(iter->key()); + ParsedStringsValue parsed_strings_value(iter->value()); + int32_t survival_time = 0; + if (parsed_strings_value.Etime() != 0) { + survival_time = + parsed_strings_value.Etime() - current_time > 0 ? parsed_strings_value.Etime() - current_time : -1; + } + LOG(INFO) << fmt::format("[key : {:<30}] [value : {:<30}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", parsed_strings_key.Key().ToString(), + parsed_strings_value.UserValue().ToString(), parsed_strings_value.Etime(), parsed_strings_value.Version(), + survival_time); + + } + delete iter; +} + +rocksdb::Status Redis::Exists(const Slice& key) { + std::string meta_value; + uint64_t llen = 0; + int32_t ret = 0; + BaseMetaKey base_meta_key(key); + std::vector id_messages; + storage::StreamScanArgs arg; + storage::StreamUtils::StreamParseIntervalId("-", arg.start_sid, &arg.start_ex, 0); + storage::StreamUtils::StreamParseIntervalId("+", arg.end_sid, &arg.end_ex, UINT64_MAX); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + auto type = static_cast(static_cast(meta_value[0])); + switch (type) { + case DataType::kSets: + return SCard(key, &ret, std::move(meta_value)); + case DataType::kZSets: + return ZCard(key, &ret, std::move(meta_value)); + case DataType::kHashes: + return HLen(key, &ret, std::move(meta_value)); + case DataType::kLists: + return LLen(key, &llen, std::move(meta_value)); + case DataType::kStreams: + return XRange(key, arg, id_messages, std::move(meta_value)); + case DataType::kStrings: + return ExpectedStale(meta_value) ? rocksdb::Status::NotFound() : rocksdb::Status::OK(); + default: + return rocksdb::Status::NotFound(); + } + } + return rocksdb::Status::NotFound(); +} + +rocksdb::Status Redis::Del(const Slice& key) { + std::string meta_value; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + auto type = static_cast(static_cast(meta_value[0])); + switch (type) { + case DataType::kSets: + return SetsDel(key, std::move(meta_value)); + case DataType::kZSets: + return ZsetsDel(key, std::move(meta_value)); + case DataType::kHashes: + return HashesDel(key, std::move(meta_value)); + case DataType::kLists: + return ListsDel(key, std::move(meta_value)); + case DataType::kStrings: + return StringsDel(key, std::move(meta_value)); + case DataType::kStreams: + return StreamsDel(key, std::move(meta_value)); + default: + return rocksdb::Status::NotFound(); + } + } + return rocksdb::Status::NotFound(); +} + +rocksdb::Status Redis::Expire(const Slice& key, int64_t ttl_millsec) { + std::string meta_value; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + auto type = static_cast(static_cast(meta_value[0])); + switch (type) { + case DataType::kSets: + return SetsExpire(key, ttl_millsec, std::move(meta_value)); + case DataType::kZSets: + return ZsetsExpire(key, ttl_millsec, std::move(meta_value)); + case DataType::kHashes: + return HashesExpire(key, ttl_millsec, std::move(meta_value)); + case DataType::kLists: + return ListsExpire(key, ttl_millsec, std::move(meta_value)); + case DataType::kStrings: + return StringsExpire(key, ttl_millsec, std::move(meta_value)); + default: + return rocksdb::Status::NotFound(); + } + } + return rocksdb::Status::NotFound(); +} + +rocksdb::Status Redis::Expireat(const Slice& key, int64_t timestamp_millsec) { + std::string meta_value; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + auto type = static_cast(static_cast(meta_value[0])); + switch (type) { + case DataType::kSets: + return SetsExpireat(key, timestamp_millsec, std::move(meta_value)); + case DataType::kZSets: + return ZsetsExpireat(key, timestamp_millsec, std::move(meta_value)); + case DataType::kHashes: + return HashesExpireat(key, timestamp_millsec, std::move(meta_value)); + case DataType::kLists: + return ListsExpireat(key, timestamp_millsec, std::move(meta_value)); + case DataType::kStrings: + return StringsExpireat(key, timestamp_millsec, std::move(meta_value)); + default: + return rocksdb::Status::NotFound(); + } + } + return rocksdb::Status::NotFound(); +} + +rocksdb::Status Redis::Persist(const Slice& key) { + std::string meta_value; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + auto type = static_cast(static_cast(meta_value[0])); + switch (type) { + case DataType::kSets: + return SetsPersist(key, std::move(meta_value)); + case DataType::kZSets: + return ZsetsPersist(key, std::move(meta_value)); + case DataType::kHashes: + return HashesPersist(key, std::move(meta_value)); + case DataType::kLists: + return ListsPersist(key, std::move(meta_value)); + case DataType::kStrings: + return StringsPersist(key, std::move(meta_value)); + default: + return rocksdb::Status::NotFound(); + } + } + return rocksdb::Status::NotFound(); +} + +rocksdb::Status Redis::TTL(const Slice& key, int64_t* ttl_millsec) { + std::string meta_value; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + auto type = static_cast(static_cast(meta_value[0])); + switch (type) { + case DataType::kSets: + return SetsTTL(key, ttl_millsec, std::move(meta_value)); + case DataType::kZSets: + return ZsetsTTL(key, ttl_millsec, std::move(meta_value)); + case DataType::kHashes: + return HashesTTL(key, ttl_millsec, std::move(meta_value)); + case DataType::kLists: + return ListsTTL(key, ttl_millsec, std::move(meta_value)); + case DataType::kStrings: + return StringsTTL(key, ttl_millsec, std::move(meta_value)); + default: + return rocksdb::Status::NotFound(); + } + } + return rocksdb::Status::NotFound(); +} + +rocksdb::Status Redis::GetType(const storage::Slice& key, enum DataType& type) { + std::string meta_value; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + type = static_cast(static_cast(meta_value[0])); + } + return Status::OK(); +} + +rocksdb::Status Redis::IsExist(const storage::Slice& key) { + std::string meta_value; + BaseMetaKey base_meta_key(key); + rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok()) { + if (ExpectedStale(meta_value)) { + return Status::NotFound(); + } + return Status::OK(); + } + return rocksdb::Status::NotFound(); +} + +/* + * Example Delete the specified prefix key + */ +rocksdb::Status Redis::PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, std::vector* remove_keys, const int64_t& max_count) { + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + + std::string key; + std::string meta_value; + int64_t total_delete = 0; + rocksdb::Status s; + rocksdb::WriteBatch batch; + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + iter->SeekToFirst(); + while (iter->Valid() && static_cast(batch.Count()) < max_count) { + auto meta_type = static_cast(static_cast(iter->value()[0])); + ParsedBaseMetaKey parsed_meta_key(iter->key().ToString()); + key = iter->key().ToString(); + meta_value = iter->value().ToString(); + + if (meta_type == DataType::kStrings) { + ParsedStringsValue parsed_strings_value(&meta_value); + if (!parsed_strings_value.IsStale() && + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { + batch.Delete(key); + remove_keys->push_back(parsed_meta_key.Key().data()); + } + } else if (meta_type == DataType::kLists) { + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + if (!parsed_lists_meta_value.IsStale() && (parsed_lists_meta_value.Count() != 0U) && + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != + 0)) { + parsed_lists_meta_value.InitialMetaValue(); + batch.Put(handles_[kMetaCF], iter->key(), meta_value); + remove_keys->push_back(parsed_meta_key.Key().data()); + } + } else if (meta_type == DataType::kStreams) { + StreamMetaValue stream_meta_value; + stream_meta_value.ParseFrom(meta_value); + if ((stream_meta_value.length() != 0) && + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != 0)) { + stream_meta_value.InitMetaValue(); + batch.Put(handles_[kMetaCF], key, stream_meta_value.value()); + remove_keys->push_back(parsed_meta_key.Key().data()); + } + } else { + ParsedBaseMetaValue parsed_meta_value(&meta_value); + if (!parsed_meta_value.IsStale() && (parsed_meta_value.Count() != 0) && + (StringMatch(pattern.data(), pattern.size(), parsed_meta_key.Key().data(), parsed_meta_key.Key().size(), 0) != + 0)) { + parsed_meta_value.InitialMetaValue(); + batch.Put(handles_[kMetaCF], iter->key(), meta_value); + remove_keys->push_back(parsed_meta_key.Key().data()); + } + } + iter->Next(); + } + if (batch.Count() != 0U) { + s = db_->Write(default_write_options_, &batch); + if (s.ok()) { + total_delete += static_cast(batch.Count()); + batch.Clear(); + } else { + remove_keys->erase(remove_keys->end() - batch.Count(), remove_keys->end()); + } + } + + *ret = total_delete; + delete iter; + return s; +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/redis_zsets.cc b/tools/pika_migrate/src/storage/src/redis_zsets.cc new file mode 100644 index 0000000000..fa6c78f912 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/redis_zsets.cc @@ -0,0 +1,2013 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "src/base_key_format.h" +#include "src/base_data_value_format.h" +#include "pstd/include/pika_codis_slot.h" +#include "src/scope_record_lock.h" +#include "src/scope_snapshot.h" +#include "src/zsets_filter.h" +#include "src/redis.h" +#include "storage/util.h" + +namespace storage { +Status Redis::ScanZsetsKeyNum(KeyInfo* key_info) { + uint64_t keys = 0; + uint64_t expires = 0; + uint64_t ttl_sum = 0; + uint64_t invaild_keys = 0; + + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + + pstd::TimeType curtime = pstd::NowMillis(); + + rocksdb::Iterator* iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + if (!ExpectedMetaValue(DataType::kZSets, iter->value().ToString())) { + continue; + } + ParsedZSetsMetaValue parsed_zsets_meta_value(iter->value()); + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { + invaild_keys++; + } else { + keys++; + if (!parsed_zsets_meta_value.IsPermanentSurvival()) { + expires++; + ttl_sum += parsed_zsets_meta_value.Etime() - curtime; + } + } + } + delete iter; + + key_info->keys = keys; + key_info->expires = expires; + key_info->avg_ttl = (expires != 0) ? ttl_sum / expires : 0; + key_info->invaild_keys = invaild_keys; + return Status::OK(); +} + +Status Redis::ZPopMax(const Slice& key, const int64_t count, std::vector* score_members) { + uint32_t statistic = 0; + score_members->clear(); + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + int64_t num = parsed_zsets_meta_value.Count(); + num = num <= count ? num : count; + uint64_t version = parsed_zsets_meta_value.Version(); + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::max(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); + int32_t del_cnt = 0; + for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && del_cnt < num; iter->Prev()) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + score_members->emplace_back( + ScoreMember{parsed_zsets_score_key.score(), parsed_zsets_score_key.member().ToString()}); + ZSetsMemberKey zsets_member_key(key, version, parsed_zsets_score_key.member()); + ++statistic; + ++del_cnt; + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); + } + delete iter; + if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; + } + } else { + return s; + } +} + +Status Redis::ZPopMin(const Slice& key, const int64_t count, std::vector* score_members) { + uint32_t statistic = 0; + score_members->clear(); + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + int64_t num = parsed_zsets_meta_value.Count(); + num = num <= count ? num : count; + uint64_t version = parsed_zsets_meta_value.Version(); + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); + int32_t del_cnt = 0; + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && del_cnt < num; iter->Next()) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + score_members->emplace_back( + ScoreMember{parsed_zsets_score_key.score(), parsed_zsets_score_key.member().ToString()}); + ZSetsMemberKey zsets_member_key(key, version, parsed_zsets_score_key.member()); + ++statistic; + ++del_cnt; + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); + } + delete iter; + if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; + } + } else { + return s; + } +} + +Status Redis::ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret) { + *ret = 0; + uint32_t statistic = 0; + std::unordered_set unique; + std::vector filtered_score_members; + for (const auto& sm : score_members) { + if (unique.find(sm.member) == unique.end()) { + unique.insert(sm.member); + filtered_score_members.push_back(sm); + } + } + + char score_buf[8]; + uint64_t version = 0; + std::string meta_value; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + bool vaild = true; + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { + vaild = false; + version = parsed_zsets_meta_value.InitialMetaValue(); + } else { + vaild = true; + version = parsed_zsets_meta_value.Version(); + } + + int32_t cnt = 0; + std::string data_value; + for (const auto& sm : filtered_score_members) { + bool not_found = true; + ZSetsMemberKey zsets_member_key(key, version, sm.member); + if (vaild) { + s = db_->Get(default_read_options_, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); + if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); + not_found = false; + uint64_t tmp = DecodeFixed64(data_value.data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + double old_score = *reinterpret_cast(ptr_tmp); + if (old_score == sm.score) { + continue; + } else { + ZSetsScoreKey zsets_score_key(key, version, old_score, sm.member); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); + // delete old zsets_score_key and overwirte zsets_member_key + // but in different column_families so we accumulative 1 + statistic++; + } + } else if (!s.IsNotFound()) { + return s; + } + } + + const void* ptr_score = reinterpret_cast(&sm.score); + EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); + BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); + + ZSetsScoreKey zsets_score_key(key, version, sm.score, sm.member); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); + if (not_found) { + cnt++; + } + } + if (!parsed_zsets_meta_value.CheckModifyCount(cnt)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + *ret = cnt; + } else if (s.IsNotFound()) { + char buf[4]; + EncodeFixed32(buf, filtered_score_members.size()); + ZSetsMetaValue zsets_meta_value(DataType::kZSets, Slice(buf, 4)); + version = zsets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); + for (const auto& sm : filtered_score_members) { + ZSetsMemberKey zsets_member_key(key, version, sm.member); + const void* ptr_score = reinterpret_cast(&sm.score); + EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); + BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); + + ZSetsScoreKey zsets_score_key(key, version, sm.score, sm.member); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); + } + *ret = static_cast(filtered_score_members.size()); + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; +} + +Status Redis::ZCard(const Slice& key, int32_t* card, std::string&& prefetch_meta) { + *card = 0; + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + std::string meta_value(std::move(prefetch_meta)); + if (meta_value.empty()) { + BaseMetaKey base_meta_key(key); + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + *card = 0; + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + *card = 0; + return Status::NotFound(); + } else { + *card = parsed_zsets_meta_value.Count(); + } + } + return s; +} + +Status Redis::ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret) { + *ret = 0; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t cnt = 0; + int32_t cur_index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, min, Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + bool left_pass = false; + bool right_pass = false; + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + if (parsed_zsets_score_key.key() != key) { + break; + } + if (parsed_zsets_score_key.Version() != version) { + break; + } + if ((left_close && min <= parsed_zsets_score_key.score()) || + (!left_close && min < parsed_zsets_score_key.score())) { + left_pass = true; + } + if ((right_close && parsed_zsets_score_key.score() <= max) || + (!right_close && parsed_zsets_score_key.score() < max)) { + right_pass = true; + } + if (left_pass && right_pass) { + cnt++; + } else if (!right_pass) { + break; + } + } + delete iter; + *ret = cnt; + } + } + return s; +} + +Status Redis::ZIncrby(const Slice& key, const Slice& member, double increment, double* ret) { + *ret = 0; + uint32_t statistic = 0; + double score = 0; + char score_buf[8]; + uint64_t version = 0; + std::string meta_value; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { + version = parsed_zsets_meta_value.InitialMetaValue(); + } else { + version = parsed_zsets_meta_value.Version(); + } + std::string data_value; + ZSetsMemberKey zsets_member_key(key, version, member); + s = db_->Get(default_read_options_, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); + if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); + uint64_t tmp = DecodeFixed64(data_value.data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + double old_score = *reinterpret_cast(ptr_tmp); + score = old_score + increment; + ZSetsScoreKey zsets_score_key(key, version, old_score, member); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); + // delete old zsets_score_key and overwirte zsets_member_key + // but in different column_families so we accumulative 1 + statistic++; + } else if (s.IsNotFound()) { + score = increment; + if (!parsed_zsets_meta_value.CheckModifyCount(1)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(1); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } else { + return s; + } + } else if (s.IsNotFound()) { + char buf[4]; + EncodeFixed32(buf, 1); + ZSetsMetaValue zsets_meta_value(DataType::kZSets, Slice(buf, 4)); + version = zsets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), zsets_meta_value.Encode()); + score = increment; + } else { + return s; + } + ZSetsMemberKey zsets_member_key(key, version, member); + const void* ptr_score = reinterpret_cast(&score); + EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); + BaseDataValue zsets_member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), zsets_member_i_val.Encode()); + + ZSetsScoreKey zsets_score_key(key, version, score, member); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); + *ret = score; + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; +} + +Status Redis::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { + score_members->clear(); + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t start_index = start >= 0 ? start : count + start; + int32_t stop_index = stop >= 0 ? stop : count + stop; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= count ? count - 1 : stop_index; + if (start_index > stop_index || start_index >= count || stop_index < 0) { + return s; + } + int32_t cur_index = 0; + ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + if (cur_index >= start_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + score_member.score = parsed_zsets_score_key.score(); + score_member.member = parsed_zsets_score_key.member().ToString(); + score_members->push_back(score_member); + } + } + delete iter; + } + } + return s; +} + +Status Redis::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, + int64_t* ttl_millsec) { + score_members->clear(); + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else { + // ttl + *ttl_millsec = parsed_zsets_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t start_index = start >= 0 ? start : count + start; + int32_t stop_index = stop >= 0 ? stop : count + stop; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= count ? count - 1 : stop_index; + if (start_index > stop_index + || start_index >= count + || stop_index < 0) { + return s; + } + int32_t cur_index = 0; + ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, + std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); + iter->Valid() && cur_index <= stop_index; + iter->Next(), ++cur_index) { + if (cur_index >= start_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + score_member.score = parsed_zsets_score_key.score(); + score_member.member = parsed_zsets_score_key.member().ToString(); + score_members->push_back(score_member); + } + } + delete iter; + } + } + return s; +} + +Status Redis::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + int64_t count, int64_t offset, std::vector* score_members) { + score_members->clear(); + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else if (offset >= 0 && count != 0) { + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + int64_t skipped = 0; + ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, min, Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && index <= stop_index; iter->Next(), ++index) { + bool left_pass = false; + bool right_pass = false; + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + if (parsed_zsets_score_key.key() != key) { + break; + } + if (parsed_zsets_score_key.Version() != version) { + break; + } + if ((left_close && min <= parsed_zsets_score_key.score()) || + (!left_close && min < parsed_zsets_score_key.score())) { + left_pass = true; + } + if ((right_close && parsed_zsets_score_key.score() <= max) || + (!right_close && parsed_zsets_score_key.score() < max)) { + right_pass = true; + } + if (left_pass && right_pass) { + // skip offset + if (skipped < offset) { + ++skipped; + continue; + } + score_member.score = parsed_zsets_score_key.score(); + score_member.member = parsed_zsets_score_key.member().ToString(); + score_members->push_back(score_member); + if (count > 0 && score_members->size() == static_cast(count)) { + break; + } + } + if (!right_pass) { + break; + } + } + delete iter; + } + } + return s; +} + +Status Redis::ZRank(const Slice& key, const Slice& member, int32_t* rank) { + *rank = -1; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + bool found = false; + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && index <= stop_index; iter->Next(), ++index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + if (parsed_zsets_score_key.member().compare(member) == 0) { + found = true; + break; + } + } + delete iter; + if (found) { + *rank = index; + return Status::OK(); + } else { + return Status::NotFound(); + } + } + } + return s; +} + +Status Redis::ZRem(const Slice& key, const std::vector& members, int32_t* ret) { + *ret = 0; + uint32_t statistic = 0; + std::unordered_set unique; + std::vector filtered_members; + for (const auto& member : members) { + if (unique.find(member) == unique.end()) { + unique.insert(member); + filtered_members.push_back(member); + } + } + + std::string meta_value; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + int32_t del_cnt = 0; + std::string data_value; + uint64_t version = parsed_zsets_meta_value.Version(); + for (const auto& member : filtered_members) { + ZSetsMemberKey zsets_member_key(key, version, member); + s = db_->Get(default_read_options_, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); + if (s.ok()) { + del_cnt++; + statistic++; + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); + uint64_t tmp = DecodeFixed64(data_value.data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + double score = *reinterpret_cast(ptr_tmp); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + + ZSetsScoreKey zsets_score_key(key, version, score, member); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); + } else if (!s.IsNotFound()) { + return s; + } + } + *ret = del_cnt; + if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; +} + +Status Redis::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret) { + *ret = 0; + uint32_t statistic = 0; + std::string meta_value; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + std::string member; + int32_t del_cnt = 0; + int32_t cur_index = 0; + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t start_index = start >= 0 ? start : count + start; + int32_t stop_index = stop >= 0 ? stop : count + stop; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= count ? count - 1 : stop_index; + if (start_index > stop_index || start_index >= count) { + return s; + } + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + if (cur_index >= start_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + ZSetsMemberKey zsets_member_key(key, version, parsed_zsets_score_key.member()); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); + del_cnt++; + statistic++; + } + } + delete iter; + *ret = del_cnt; + if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; +} + +Status Redis::ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + int32_t* ret) { + *ret = 0; + uint32_t statistic = 0; + std::string meta_value; + rocksdb::WriteBatch batch; + ScopeRecordLock l(lock_mgr_, key); + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + std::string member; + int32_t del_cnt = 0; + int32_t cur_index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + uint64_t version = parsed_zsets_meta_value.Version(); + ZSetsScoreKey zsets_score_key(key, version, min, Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(default_read_options_, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + bool left_pass = false; + bool right_pass = false; + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + if (parsed_zsets_score_key.key() != key) { + break; + } + if (parsed_zsets_score_key.Version() != version) { + break; + } + if ((left_close && min <= parsed_zsets_score_key.score()) || + (!left_close && min < parsed_zsets_score_key.score())) { + left_pass = true; + } + if ((right_close && parsed_zsets_score_key.score() <= max) || + (!right_close && parsed_zsets_score_key.score() < max)) { + right_pass = true; + } + if (left_pass && right_pass) { + ZSetsMemberKey zsets_member_key(key, version, parsed_zsets_score_key.member()); + batch.Delete(handles_[kZsetsDataCF], zsets_member_key.Encode()); + batch.Delete(handles_[kZsetsScoreCF], iter->key()); + del_cnt++; + statistic++; + } + if (!right_pass) { + break; + } + } + delete iter; + *ret = del_cnt; + if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; +} + +Status Redis::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { + score_members->clear(); + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + int32_t count = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t start_index = stop >= 0 ? count - stop - 1 : -stop - 1; + int32_t stop_index = start >= 0 ? count - start - 1 : -start - 1; + start_index = start_index <= 0 ? 0 : start_index; + stop_index = stop_index >= count ? count - 1 : stop_index; + if (start_index > stop_index || start_index >= count || stop_index < 0) { + return s; + } + int32_t cur_index = count - 1; + ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::max(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && cur_index >= start_index; + iter->Prev(), --cur_index) { + if (cur_index <= stop_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + score_member.score = parsed_zsets_score_key.score(); + score_member.member = parsed_zsets_score_key.member().ToString(); + score_members->push_back(score_member); + } + } + delete iter; + } + } + return s; +} + +Status Redis::ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + int64_t count, int64_t offset, std::vector* score_members) { + score_members->clear(); + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else if (offset >= 0 && count != 0) { + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t left = parsed_zsets_meta_value.Count(); + int64_t skipped = 0; + ScoreMember score_member; + ZSetsScoreKey zsets_score_key(key, version, std::nextafter(max, std::numeric_limits::max()), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && left > 0; iter->Prev(), --left) { + bool left_pass = false; + bool right_pass = false; + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + if (parsed_zsets_score_key.key() != key) { + break; + } + if (parsed_zsets_score_key.Version() != version) { + break; + } + if ((left_close && min <= parsed_zsets_score_key.score()) || + (!left_close && min < parsed_zsets_score_key.score())) { + left_pass = true; + } + if ((right_close && parsed_zsets_score_key.score() <= max) || + (!right_close && parsed_zsets_score_key.score() < max)) { + right_pass = true; + } + if (left_pass && right_pass) { + // skip offset + if (skipped < offset) { + ++skipped; + continue; + } + score_member.score = parsed_zsets_score_key.score(); + score_member.member = parsed_zsets_score_key.member().ToString(); + score_members->push_back(score_member); + if (count > 0 and score_members->size() == static_cast(count)) { + break; + } + } + if (!left_pass) { + break; + } + } + delete iter; + } + } + return s; +} + +Status Redis::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { + *rank = -1; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + bool found = false; + int32_t rev_index = 0; + int32_t left = parsed_zsets_meta_value.Count(); + uint64_t version = parsed_zsets_meta_value.Version(); + ZSetsScoreKey zsets_score_key(key, version, std::numeric_limits::max(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->SeekForPrev(zsets_score_key.Encode()); iter->Valid() && left > 0; iter->Prev(), --left, ++rev_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + if (parsed_zsets_score_key.member().compare(member) == 0) { + found = true; + break; + } + } + delete iter; + if (found) { + *rank = rev_index; + } else { + return Status::NotFound(); + } + } + } + return s; +} + +Status Redis::ZScore(const Slice& key, const Slice& member, double* score) { + *score = 0; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value) && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + uint64_t version = parsed_zsets_meta_value.Version(); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + std::string data_value; + ZSetsMemberKey zsets_member_key(key, version, member); + s = db_->Get(read_options, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); + if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); + uint64_t tmp = DecodeFixed64(data_value.data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + *score = *reinterpret_cast(ptr_tmp); + } else if (s.IsNotFound()) { + return Status::NotFound("Invalid member"); + } else { + return s; + } + } + } else if (!s.IsNotFound()) { + return s; + } + return s; +} + +Status Redis::ZGetAll(const Slice& key, double weight, std::map* value_to_dest) { + Status s; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value) && !ExpectedMetaValue(DataType::kSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.Count() != 0) { + int32_t cur_index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + double score = 0.0; + uint64_t version = parsed_zsets_meta_value.Version(); + ZSetsScoreKey zsets_score_key(key.ToString(), version, std::numeric_limits::lowest(), Slice()); + Slice seek_key = zsets_score_key.Encode(); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(seek_key); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + double score = parsed_zsets_score_key.score() * weight; + score = (score == -0.0) ? 0 : score; + value_to_dest->insert(std::make_pair(parsed_zsets_score_key.member().ToString(), score)); + } + delete iter; + } + } + return s; +} + +Status Redis::ZUnionstore(const Slice& destination, const std::vector& keys, + const std::vector& weights, const AGGREGATE agg, std::map& value_to_dest, int32_t* ret) { + *ret = 0; + uint32_t statistic = 0; + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + uint64_t version; + std::string meta_value; + ScoreMember sm; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + ScopeRecordLock l(lock_mgr_, destination); + std::map member_score_map; + + Status s; + for (size_t idx = 0; idx < keys.size(); ++idx) { + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (!parsed_zsets_meta_value.IsStale() && parsed_zsets_meta_value.Count() != 0) { + int32_t cur_index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + double score = 0; + double weight = idx < weights.size() ? weights[idx] : 1; + version = parsed_zsets_meta_value.Version(); + ZSetsScoreKey zsets_score_key(keys[idx], version, std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, keys[idx]); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; + iter->Next(), ++cur_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + sm.score = parsed_zsets_score_key.score(); + sm.member = parsed_zsets_score_key.member().ToString(); + if (member_score_map.find(sm.member) == member_score_map.end()) { + score = weight * sm.score; + member_score_map[sm.member] = (score == -0.0) ? 0 : score; + } else { + score = member_score_map[sm.member]; + switch (agg) { + case SUM: + score += weight * sm.score; + break; + case MIN: + score = std::min(score, weight * sm.score); + break; + case MAX: + score = std::max(score, weight * sm.score); + break; + } + member_score_map[sm.member] = (score == -0.0) ? 0 : score; + } + } + delete iter; + } + } else if (!s.IsNotFound()) { + return s; + } + } + + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + statistic = parsed_zsets_meta_value.Count(); + version = parsed_zsets_meta_value.InitialMetaValue(); + if (!parsed_zsets_meta_value.check_set_count(static_cast(member_score_map.size()))) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.SetCount(static_cast(member_score_map.size())); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); + } else { + char buf[4]; + EncodeFixed32(buf, member_score_map.size()); + ZSetsMetaValue zsets_meta_value(DataType::kZSets, Slice(buf, 4)); + version = zsets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); + } + + char score_buf[8]; + for (const auto& sm : member_score_map) { + ZSetsMemberKey zsets_member_key(destination, version, sm.first); + + const void* ptr_score = reinterpret_cast(&sm.second); + EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); + BaseDataValue member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), member_i_val.Encode()); + + ZSetsScoreKey zsets_score_key(destination, version, sm.second, sm.first); + BaseDataValue score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), score_i_val.Encode()); + } + *ret = static_cast(member_score_map.size()); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, destination.ToString(), statistic); + value_to_dest = std::move(member_score_map); + return s; +} + +Status Redis::ZInterstore(const Slice& destination, const std::vector& keys, + const std::vector& weights, const AGGREGATE agg, std::vector& value_to_dest, int32_t* ret) { + if (keys.empty()) { + return Status::Corruption("ZInterstore invalid parameter, no keys"); + } + + *ret = 0; + uint32_t statistic = 0; + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + ScopeRecordLock l(lock_mgr_, destination); + + std::string meta_value; + uint64_t version = 0; + bool have_invalid_zsets = false; + ScoreMember item; + std::vector valid_zsets; + std::vector score_members; + std::vector final_score_members; + Status s; + + int32_t cur_index = 0; + int32_t stop_index = 0; + for (size_t idx = 0; idx < keys.size(); ++idx) { + BaseMetaKey base_meta_key(keys[idx]); + s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + keys[idx] + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { + have_invalid_zsets = true; + } else { + valid_zsets.push_back({keys[idx], parsed_zsets_meta_value.Version()}); + if (idx == 0) { + stop_index = parsed_zsets_meta_value.Count() - 1; + } + } + } else if (s.IsNotFound()) { + have_invalid_zsets = true; + } else { + return s; + } + } + + if (!have_invalid_zsets) { + ZSetsScoreKey zsets_score_key(valid_zsets[0].key, valid_zsets[0].version, std::numeric_limits::lowest(), Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, valid_zsets[0].key); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsScoreCF]); + for (iter->Seek(zsets_score_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + ParsedZSetsScoreKey parsed_zsets_score_key(iter->key()); + double score = parsed_zsets_score_key.score(); + std::string member = parsed_zsets_score_key.member().ToString(); + score_members.push_back({score, member}); + } + delete iter; + + std::string data_value; + for (const auto& sm : score_members) { + bool reliable = true; + item.member = sm.member; + item.score = sm.score * (!weights.empty() ? weights[0] : 1); + for (size_t idx = 1; idx < valid_zsets.size(); ++idx) { + double weight = idx < weights.size() ? weights[idx] : 1; + ZSetsMemberKey zsets_member_key(valid_zsets[idx].key, valid_zsets[idx].version, item.member); + s = db_->Get(read_options, handles_[kZsetsDataCF], zsets_member_key.Encode(), &data_value); + if (s.ok()) { + ParsedBaseDataValue parsed_value(&data_value); + parsed_value.StripSuffix(); + uint64_t tmp = DecodeFixed64(data_value.data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + double score = *reinterpret_cast(ptr_tmp); + switch (agg) { + case SUM: + item.score += weight * score; + break; + case MIN: + item.score = std::min(item.score, weight * score); + break; + case MAX: + item.score = std::max(item.score, weight * score); + break; + } + } else if (s.IsNotFound()) { + reliable = false; + break; + } else { + return s; + } + } + if (reliable) { + final_score_members.push_back(item); + } + } + } + + BaseMetaKey base_destination(destination); + s = db_->Get(read_options, handles_[kMetaCF], base_destination.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + destination.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + statistic = parsed_zsets_meta_value.Count(); + version = parsed_zsets_meta_value.InitialMetaValue(); + if (!parsed_zsets_meta_value.check_set_count(static_cast(final_score_members.size()))) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.SetCount(static_cast(final_score_members.size())); + batch.Put(handles_[kMetaCF], base_destination.Encode(), meta_value); + } else { + char buf[4]; + EncodeFixed32(buf, final_score_members.size()); + ZSetsMetaValue zsets_meta_value(DataType::kZSets, Slice(buf, 4)); + version = zsets_meta_value.UpdateVersion(); + batch.Put(handles_[kMetaCF], base_destination.Encode(), zsets_meta_value.Encode()); + } + char score_buf[8]; + for (const auto& sm : final_score_members) { + ZSetsMemberKey zsets_member_key(destination, version, sm.member); + + const void* ptr_score = reinterpret_cast(&sm.score); + EncodeFixed64(score_buf, *reinterpret_cast(ptr_score)); + BaseDataValue member_i_val(Slice(score_buf, sizeof(uint64_t))); + batch.Put(handles_[kZsetsDataCF], zsets_member_key.Encode(), member_i_val.Encode()); + + ZSetsScoreKey zsets_score_key(destination, version, sm.score, sm.member); + BaseDataValue zsets_score_i_val(Slice{}); + batch.Put(handles_[kZsetsScoreCF], zsets_score_key.Encode(), zsets_score_i_val.Encode()); + } + *ret = static_cast(final_score_members.size()); + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, destination.ToString(), statistic); + value_to_dest = std::move(final_score_members); + return s; +} + +Status Redis::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + std::vector* members) { + members->clear(); + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + bool left_no_limit = min.compare("-") == 0; + bool right_not_limit = max.compare("+") == 0; + + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t cur_index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + ZSetsMemberKey zsets_member_key(key, version, Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsDataCF]); + for (iter->Seek(zsets_member_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + bool left_pass = false; + bool right_pass = false; + ParsedZSetsMemberKey parsed_zsets_member_key(iter->key()); + Slice member = parsed_zsets_member_key.member(); + if (left_no_limit || (left_close && min.compare(member) <= 0) || (!left_close && min.compare(member) < 0)) { + left_pass = true; + } + if (right_not_limit || (right_close && max.compare(member) >= 0) || (!right_close && max.compare(member) > 0)) { + right_pass = true; + } + if (left_pass && right_pass) { + members->push_back(member.ToString()); + } + if (!right_pass) { + break; + } + } + delete iter; + } + } + return s; +} + +Status Redis::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, bool right_close, + int32_t* ret) { + std::vector members; + Status s = ZRangebylex(key, min, max, left_close, right_close, &members); + *ret = static_cast(members.size()); + return s; +} + +Status Redis::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, + bool right_close, int32_t* ret) { + *ret = 0; + uint32_t statistic = 0; + rocksdb::WriteBatch batch; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot = nullptr; + + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + ScopeRecordLock l(lock_mgr_, key); + + bool left_no_limit = min.compare("-") == 0; + bool right_not_limit = max.compare("+") == 0; + + int32_t del_cnt = 0; + std::string meta_value; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t version = parsed_zsets_meta_value.Version(); + int32_t cur_index = 0; + int32_t stop_index = parsed_zsets_meta_value.Count() - 1; + ZSetsMemberKey zsets_member_key(key, version, Slice()); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsDataCF]); + for (iter->Seek(zsets_member_key.Encode()); iter->Valid() && cur_index <= stop_index; iter->Next(), ++cur_index) { + bool left_pass = false; + bool right_pass = false; + ParsedZSetsMemberKey parsed_zsets_member_key(iter->key()); + Slice member = parsed_zsets_member_key.member(); + if (left_no_limit || (left_close && min.compare(member) <= 0) || (!left_close && min.compare(member) < 0)) { + left_pass = true; + } + if (right_not_limit || (right_close && max.compare(member) >= 0) || (!right_close && max.compare(member) > 0)) { + right_pass = true; + } + if (left_pass && right_pass) { + batch.Delete(handles_[kZsetsDataCF], iter->key()); + + ParsedBaseDataValue parsed_value(iter->value()); + uint64_t tmp = DecodeFixed64(parsed_value.UserValue().data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + double score = *reinterpret_cast(ptr_tmp); + ZSetsScoreKey zsets_score_key(key, version, score, member); + batch.Delete(handles_[kZsetsScoreCF], zsets_score_key.Encode()); + del_cnt++; + statistic++; + } + if (!right_pass) { + break; + } + } + delete iter; + } + if (del_cnt > 0) { + if (!parsed_zsets_meta_value.CheckModifyCount(-del_cnt)) { + return Status::InvalidArgument("zset size overflow"); + } + parsed_zsets_meta_value.ModifyCount(-del_cnt); + batch.Put(handles_[kMetaCF], base_meta_key.Encode(), meta_value); + *ret = del_cnt; + } + } else { + return s; + } + s = db_->Write(default_write_options_, &batch); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; +} + +Status Redis::ZsetsExpire(const Slice& key, int64_t ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } + + if (ttl_millsec > 0) { + parsed_zsets_meta_value.SetRelativeTimestamp(ttl_millsec); + } else { + parsed_zsets_meta_value.InitialMetaValue(); + } + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + return s; +} + +Status Redis::ZsetsDel(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint32_t statistic = parsed_zsets_meta_value.Count(); + parsed_zsets_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + } + } + return s; +} + +Status Redis::ZsetsExpireat(const Slice& key, int64_t timestamp_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + ScopeRecordLock l(lock_mgr_, key); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + if (timestamp_millsec > 0) { + parsed_zsets_meta_value.SetEtime(uint64_t(timestamp_millsec)); + } else { + parsed_zsets_meta_value.InitialMetaValue(); + } + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + return s; +} + +Status Redis::ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* score_members, int64_t* next_cursor) { + *next_cursor = 0; + score_members->clear(); + if (cursor < 0) { + *next_cursor = 0; + return Status::OK(); + } + + int64_t rest = count; + int64_t step_length = count; + rocksdb::ReadOptions read_options; + const rocksdb::Snapshot* snapshot; + + std::string meta_value; + ScopeSnapshot ss(db_, &snapshot); + read_options.snapshot = snapshot; + + BaseMetaKey base_meta_key(key); + Status s = db_->Get(read_options, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale() || parsed_zsets_meta_value.Count() == 0) { + *next_cursor = 0; + return Status::NotFound(); + } else { + std::string sub_member; + std::string start_point; + uint64_t version = parsed_zsets_meta_value.Version(); + s = GetScanStartPoint(DataType::kZSets, key, pattern, cursor, &start_point); + if (s.IsNotFound()) { + cursor = 0; + if (isTailWildcard(pattern)) { + start_point = pattern.substr(0, pattern.size() - 1); + } + } + if (isTailWildcard(pattern)) { + sub_member = pattern.substr(0, pattern.size() - 1); + } + + ZSetsMemberKey zsets_member_prefix(key, version, sub_member); + ZSetsMemberKey zsets_member_key(key, version, start_point); + std::string prefix = zsets_member_prefix.EncodeSeekKey().ToString(); + KeyStatisticsDurationGuard guard(this, DataType::kZSets, key.ToString()); + rocksdb::Iterator* iter = db_->NewIterator(read_options, handles_[kZsetsDataCF]); + for (iter->Seek(zsets_member_key.Encode()); iter->Valid() && rest > 0 && iter->key().starts_with(prefix); + iter->Next()) { + ParsedZSetsMemberKey parsed_zsets_member_key(iter->key()); + std::string member = parsed_zsets_member_key.member().ToString(); + if (StringMatch(pattern.data(), pattern.size(), member.data(), member.size(), 0) != 0) { + ParsedBaseDataValue parsed_value(iter->value()); + uint64_t tmp = DecodeFixed64(parsed_value.UserValue().data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + double score = *reinterpret_cast(ptr_tmp); + score_members->push_back({score, member}); + } + rest--; + } + + if (iter->Valid() && (iter->key().compare(prefix) <= 0 || iter->key().starts_with(prefix))) { + *next_cursor = cursor + step_length; + ParsedZSetsMemberKey parsed_zsets_member_key(iter->key()); + std::string next_member = parsed_zsets_member_key.member().ToString(); + StoreScanNextPoint(DataType::kZSets, key, pattern, *next_cursor, next_member); + } else { + *next_cursor = 0; + } + delete iter; + } + } else { + *next_cursor = 0; + return s; + } + return Status::OK(); +} + +Status Redis::ZsetsPersist(const Slice& key, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + BaseMetaKey base_meta_key(key); + ScopeRecordLock l(lock_mgr_, key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } else { + uint64_t timestamp = parsed_zsets_meta_value.Etime(); + if (timestamp == 0) { + return Status::NotFound("Not have an associated timeout"); + } else { + parsed_zsets_meta_value.SetEtime(0); + return db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + } + } + } + return s; +} + +Status Redis::ZsetsTTL(const Slice& key, int64_t* ttl_millsec, std::string&& prefetch_meta) { + std::string meta_value(std::move(prefetch_meta)); + BaseMetaKey base_meta_key(key); + Status s; + + // meta_value is empty means no meta value get before, + // we should get meta first + if (meta_value.empty()) { + s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (s.ok() && !ExpectedMetaValue(DataType::kZSets, meta_value)) { + if (ExpectedStale(meta_value)) { + s = Status::NotFound(); + } else { + return Status::InvalidArgument( + "WRONGTYPE, key: " + key.ToString() + ", expected type: " + + DataTypeStrings[static_cast(DataType::kZSets)] + ", got type: " + + DataTypeStrings[static_cast(GetMetaValueType(meta_value))]); + } + } + } + if (s.ok()) { + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + *ttl_millsec = -2; + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + *ttl_millsec = -2; + return Status::NotFound(); + } else { + *ttl_millsec = parsed_zsets_meta_value.Etime(); + if (*ttl_millsec == 0) { + *ttl_millsec = -1; + } else { + pstd::TimeType curtime = pstd::NowMillis(); + *ttl_millsec = *ttl_millsec - curtime >= 0 ? *ttl_millsec - curtime : -2; + } + } + } else if (s.IsNotFound()) { + *ttl_millsec = -2; + } + return s; +} + +void Redis::ScanZsets() { + rocksdb::ReadOptions iterator_options; + const rocksdb::Snapshot* snapshot; + ScopeSnapshot ss(db_, &snapshot); + iterator_options.snapshot = snapshot; + iterator_options.fill_cache = false; + auto current_time = static_cast(time(nullptr)); + + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " ZSets Meta Data***************"; + auto meta_iter = db_->NewIterator(iterator_options, handles_[kMetaCF]); + for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { + if (!ExpectedMetaValue(DataType::kZSets, meta_iter->value().ToString())) { + continue; + } + ParsedBaseMetaKey parsed_meta_key(meta_iter->key()); + ParsedZSetsMetaValue parsed_zsets_meta_value(meta_iter->value()); + int32_t survival_time = 0; + if (parsed_zsets_meta_value.Etime() != 0) { + survival_time = parsed_zsets_meta_value.Etime() - current_time > 0 + ? parsed_zsets_meta_value.Etime() - current_time + : -1; + } + + LOG(INFO) << fmt::format("[key : {:<30}] [count : {:<10}] [timestamp : {:<10}] [version : {}] [survival_time : {}]", + parsed_meta_key.Key().ToString(), parsed_zsets_meta_value.Count(), parsed_zsets_meta_value.Etime(), + parsed_zsets_meta_value.Version(), survival_time); + } + delete meta_iter; + + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " ZSets Member To Score Data***************"; + auto member_iter = db_->NewIterator(iterator_options, handles_[kZsetsDataCF]); + for (member_iter->SeekToFirst(); member_iter->Valid(); member_iter->Next()) { + ParsedZSetsMemberKey parsed_zsets_member_key(member_iter->key()); + ParsedBaseDataValue parsed_value(member_iter->value()); + + uint64_t tmp = DecodeFixed64(parsed_value.UserValue().data()); + const void* ptr_tmp = reinterpret_cast(&tmp); + double score = *reinterpret_cast(ptr_tmp); + + LOG(INFO) << fmt::format("[key : {:<30}] [member : {:<20}] [score : {:<20}] [version : {}]", + parsed_zsets_member_key.Key().ToString(), parsed_zsets_member_key.member().ToString(), + score, parsed_zsets_member_key.Version()); + } + delete member_iter; + + LOG(INFO) << "***************" << "rocksdb instance: " << index_ << " ZSets Score To Member Data***************"; + auto score_iter = db_->NewIterator(iterator_options, handles_[kZsetsScoreCF]); + for (score_iter->SeekToFirst(); score_iter->Valid(); score_iter->Next()) { + ParsedZSetsScoreKey parsed_zsets_score_key(score_iter->key()); + + LOG(INFO) << fmt::format("[key : {:<30}] [score : {:<20}] [member : {:<20}] [version : {}]", + parsed_zsets_score_key.key().ToString(), parsed_zsets_score_key.score(), + parsed_zsets_score_key.member().ToString(), parsed_zsets_score_key.Version()); + } + delete score_iter; +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/scope_record_lock.h b/tools/pika_migrate/src/storage/src/scope_record_lock.h new file mode 100644 index 0000000000..37c14b3076 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/scope_record_lock.h @@ -0,0 +1,24 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_SCOPE_RECORD_LOCK_H_ +#define SRC_SCOPE_RECORD_LOCK_H_ + +#include +#include +#include +#include + +#include "pstd/include/scope_record_lock.h" +#include "src/lock_mgr.h" +#include "storage/storage.h" + +namespace storage { + +using ScopeRecordLock = pstd::lock::ScopeRecordLock; +using MultiScopeRecordLock = pstd::lock::MultiScopeRecordLock; + +} // namespace storage +#endif // SRC_SCOPE_RECORD_LOCK_H_ diff --git a/tools/pika_migrate/src/storage/src/scope_snapshot.h b/tools/pika_migrate/src/storage/src/scope_snapshot.h new file mode 100644 index 0000000000..8fecfc6985 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/scope_snapshot.h @@ -0,0 +1,27 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_SCOPE_SNAPSHOT_H_ +#define SRC_SCOPE_SNAPSHOT_H_ + +#include "rocksdb/db.h" + +#include "pstd/include/noncopyable.h" + +namespace storage { +class ScopeSnapshot : public pstd::noncopyable { + public: + ScopeSnapshot(rocksdb::DB* db, const rocksdb::Snapshot** snapshot) : db_(db), snapshot_(snapshot) { + *snapshot_ = db_->GetSnapshot(); + } + ~ScopeSnapshot() { db_->ReleaseSnapshot(*snapshot_); } + + private: + rocksdb::DB* const db_; + const rocksdb::Snapshot** snapshot_; +}; + +} // namespace storage +#endif // SRC_SCOPE_SNAPSHOT_H_ diff --git a/tools/pika_migrate/src/storage/src/storage.cc b/tools/pika_migrate/src/storage/src/storage.cc new file mode 100644 index 0000000000..cc7ca864f0 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/storage.cc @@ -0,0 +1,2003 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include + +#include "storage/util.h" +#include "storage/storage.h" +#include "scope_snapshot.h" +#include "src/lru_cache.h" +#include "src/mutex_impl.h" +#include "src/options_helper.h" +#include "src/redis_hyperloglog.h" +#include "src/type_iterator.h" +#include "src/redis.h" +#include "include/pika_conf.h" +#include "pstd/include/pika_codis_slot.h" + +namespace storage { +extern std::string BitOpOperate(BitOpType op, const std::vector& src_values, int64_t max_len); +class Redis; +Status StorageOptions::ResetOptions(const OptionType& option_type, + const std::unordered_map& options_map) { + std::unordered_map& options_member_type_info = mutable_cf_options_member_type_info; + char* opt = reinterpret_cast(static_cast(&options)); + if (option_type == OptionType::kDB) { + options_member_type_info = mutable_db_options_member_type_info; + opt = reinterpret_cast(static_cast(&options)); + } + for (const auto& option_member : options_map) { + try { + auto iter = options_member_type_info.find(option_member.first); + if (iter == options_member_type_info.end()) { + return Status::InvalidArgument("Unsupport option member: " + option_member.first); + } + const auto& member_info = iter->second; + if (!ParseOptionMember(member_info.type, option_member.second, opt + member_info.offset)) { + return Status::InvalidArgument("Error parsing option member " + option_member.first); + } + } catch (std::exception& e) { + return Status::InvalidArgument("Error parsing option member " + option_member.first + ":" + + std::string(e.what())); + } + } + return Status::OK(); +} + +// for unit test only +Storage::Storage() : Storage(3, 1024, true) {} + +Storage::Storage(int db_instance_num, int slot_num, bool is_classic_mode) { + cursors_store_ = std::make_unique>(); + cursors_store_->SetCapacity(5000); + slot_indexer_ = std::make_unique(db_instance_num); + is_classic_mode_ = is_classic_mode; + db_instance_num_ = db_instance_num; + slot_num_ = slot_num; + + Status s = StartBGThread(); + if (!s.ok()) { + LOG(FATAL) << "start bg thread failed, " << s.ToString(); + } +} + +Storage::~Storage() { + bg_tasks_should_exit_ = true; + bg_tasks_cond_var_.notify_one(); + + if (is_opened_) { + int ret = 0; + if ((ret = pthread_join(bg_tasks_thread_id_, nullptr)) != 0) { + LOG(ERROR) << "pthread_join failed with bgtask thread error " << ret; + } + for (auto& inst : insts_) { + inst.reset(); + } + } +} + +static std::string AppendSubDirectory(const std::string& db_path, int index) { + if (db_path.back() == '/') { + return db_path + std::to_string(index); + } else { + return db_path + "/" + std::to_string(index); + } +} + +std::vector Storage::GetHashCFHandles(const int idx) { + return insts_[idx]->GetHashCFHandles(); +} + +rocksdb::WriteOptions Storage::GetDefaultWriteOptions(const int idx) const { + return insts_[idx]->GetDefaultWriteOptions(); +} + +Status Storage::Open(const StorageOptions& storage_options, const std::string& db_path) { + mkpath(db_path.c_str(), 0755); + + int inst_count = db_instance_num_; + storage_options_ = storage_options; + for (int index = 0; index < inst_count; index++) { + insts_.emplace_back(std::make_unique(this, index)); + Status s = insts_.back()->Open(storage_options, AppendSubDirectory(db_path, index)); + if (!s.ok()) { + LOG(FATAL) << "open db failed" << s.ToString(); + } + } + + is_opened_.store(true); + return Status::OK(); +} + +Status Storage::LoadCursorStartKey(const DataType& dtype, int64_t cursor, char* type, std::string* start_key) { + std::string index_key = DataTypeTag[static_cast(dtype)] + std::to_string(cursor); + std::string index_value; + Status s = cursors_store_->Lookup(index_key, &index_value); + if (!s.ok() || index_value.size() < 3) { + return s; + } + *type = index_value[0]; + *start_key = index_value.substr(1); + return s; +} + +Status Storage::StoreCursorStartKey(const DataType& dtype, int64_t cursor, char type, const std::string& next_key) { + std::string index_key = DataTypeTag[static_cast(dtype)] + std::to_string(cursor); + // format: data_type tag(1B) | start_key + std::string index_value(1, type); + index_value.append(next_key); + return cursors_store_->Insert(index_key, index_value); +} + +std::unique_ptr& Storage::GetDBInstance(const Slice& key) { return GetDBInstance(key.ToString()); } + +std::unique_ptr& Storage::GetDBInstance(const std::string& key) { + auto inst_index = slot_indexer_->GetInstanceID(GetSlotID(slot_num_, key)); + return insts_[inst_index]; +} + +// Strings Commands +Status Storage::Set(const Slice& key, const Slice& value) { + auto& inst = GetDBInstance(key); + return inst->Set(key, value); +} + +Status Storage::Setxx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->Setxx(key, value, ret, ttl_millsec); +} + +Status Storage::Get(const Slice& key, std::string* value) { + auto& inst = GetDBInstance(key); + return inst->Get(key, value); +} + +Status Storage::GetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->GetWithTTL(key, value, ttl_millsec); +} + +Status Storage::MGetWithTTL(const Slice& key, std::string* value, int64_t* ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->MGetWithTTL(key, value, ttl_millsec); +} + +Status Storage::GetSet(const Slice& key, const Slice& value, std::string* old_value) { + auto& inst = GetDBInstance(key); + return inst->GetSet(key, value, old_value); +} + +Status Storage::SetBit(const Slice& key, int64_t offset, int32_t value, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->SetBit(key, offset, value, ret); +} + +Status Storage::GetBit(const Slice& key, int64_t offset, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->GetBit(key, offset, ret); +} + +Status Storage::MSet(const std::vector& kvs) { + Status s; + for (const auto& kv : kvs) { + auto& inst = GetDBInstance(kv.key); + s = inst->Set(Slice(kv.key), Slice(kv.value)); + if (!s.ok()) { + return s; + } + } + return s; +} + +Status Storage::MGet(const std::vector& keys, std::vector* vss) { + vss->clear(); + Status s; + for(const auto& key : keys) { + auto& inst = GetDBInstance(key); + std::string value; + s = inst->MGet(key, &value); + if (s.ok()) { + vss->push_back({value, Status::OK()}); + } else if (s.IsNotFound()) { + vss->push_back({std::string(), Status::NotFound()}); + } else { + vss->clear(); + return s; + } + } + return Status::OK(); +} + +Status Storage::MGetWithTTL(const std::vector& keys, std::vector* vss) { + vss->clear(); + Status s; + for(const auto& key : keys) { + auto& inst = GetDBInstance(key); + std::string value; + int64_t ttl_millsec; + s = inst->MGetWithTTL(key, &value, &ttl_millsec); + if (s.ok()) { + vss->push_back({value, Status::OK(), ttl_millsec}); + } else if (s.IsNotFound()) { + vss->push_back({std::string(), Status::NotFound(), ttl_millsec}); + } else { + vss->clear(); + return s; + } + } + return Status::OK(); +} + +Status Storage::Setnx(const Slice& key, const Slice& value, int32_t* ret, int64_t ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->Setnx(key, value, ret, ttl_millsec); +} + +// disallowed in codis, only runs in pika classic mode +// TODO: Not concurrent safe now, merge wuxianrong's bugfix after floyd's PR review finishes. +Status Storage::MSetnx(const std::vector& kvs, int32_t* ret) { + assert(is_classic_mode_); + Status s; + for (const auto& kv : kvs) { + auto& inst = GetDBInstance(kv.key); + s = inst->IsExist(Slice(kv.key)); + if (!s.IsNotFound()) { + return s; + } + } + + for (const auto& kv : kvs) { + auto& inst = GetDBInstance(kv.key); + s = inst->Set(Slice(kv.key), Slice(kv.value)); + if (!s.ok()) { + return s; + } + } + if (s.ok()) { + *ret = 1; + } + return s; +} + +Status Storage::Setvx(const Slice& key, const Slice& value, const Slice& new_value, int32_t* ret, int64_t ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->Setvx(key, value, new_value, ret, ttl_millsec); +} + +Status Storage::Delvx(const Slice& key, const Slice& value, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->Delvx(key, value, ret); +} + +Status Storage::Setrange(const Slice& key, int64_t start_offset, const Slice& value, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->Setrange(key, start_offset, value, ret); +} + +Status Storage::Getrange(const Slice& key, int64_t start_offset, int64_t end_offset, std::string* ret) { + auto& inst = GetDBInstance(key); + return inst->Getrange(key, start_offset, end_offset, ret); +} + +Status Storage::GetrangeWithValue(const Slice& key, int64_t start_offset, int64_t end_offset, + std::string* ret, std::string* value, int64_t* ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->GetrangeWithValue(key, start_offset, end_offset, ret, value, ttl_millsec); +} + +Status Storage::Append(const Slice& key, const Slice& value, int32_t* ret, int64_t* expired_timestamp_millsec, std::string& out_new_value) { + auto& inst = GetDBInstance(key); + return inst->Append(key, value, ret, expired_timestamp_millsec, out_new_value); +} + +Status Storage::BitCount(const Slice& key, int64_t start_offset, int64_t end_offset, int32_t* ret, bool have_range) { + auto& inst = GetDBInstance(key); + return inst->BitCount(key, start_offset, end_offset, ret, have_range); +} + +// disallowed in codis proxy, only runs in classic mode +Status Storage::BitOp(BitOpType op, const std::string& dest_key, const std::vector& src_keys, + std::string &value_to_dest, int64_t* ret) { + assert(is_classic_mode_); + if (op == storage::BitOpType::kBitOpNot && src_keys.size() >= 2) { return Status::InvalidArgument(); } + Status s; + int64_t max_len = 0; + int64_t value_len = 0; + std::vector src_values; + for (const auto& src_key : src_keys) { + auto& inst = GetDBInstance(src_key); + std::string value; + s = inst->Get(Slice(src_key), &value); + if (s.ok()) { + src_values.push_back(value); + value_len = value.size(); + } else { + if (!s.IsNotFound()) { + return s; + } + src_values.push_back(""); + value_len = 0; + } + max_len = std::max(max_len, value_len); + } + + std::string dest_value = BitOpOperate(op, src_values, max_len); + value_to_dest = dest_value; + *ret = dest_value.size(); + + auto& dest_inst = GetDBInstance(dest_key); + return dest_inst->Set(Slice(dest_key), Slice(dest_value)); +} + +Status Storage::BitPos(const Slice& key, int32_t bit, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->BitPos(key, bit, ret); +} + +Status Storage::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->BitPos(key, bit, start_offset, ret); +} + +Status Storage::BitPos(const Slice& key, int32_t bit, int64_t start_offset, int64_t end_offset, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->BitPos(key, bit, start_offset, end_offset, ret); +} + +Status Storage::Decrby(const Slice& key, int64_t value, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->Decrby(key, value, ret); +} + +Status Storage::Incrby(const Slice& key, int64_t value, int64_t* ret, int64_t* expired_timestamp_millsec) { + auto& inst = GetDBInstance(key); + return inst->Incrby(key, value, ret, expired_timestamp_millsec); +} + +Status Storage::Incrbyfloat(const Slice& key, const Slice& value, std::string* ret, int64_t* expired_timestamp_sec) { + auto& inst = GetDBInstance(key); + return inst->Incrbyfloat(key, value, ret, expired_timestamp_sec); +} + +Status Storage::Setex(const Slice& key, const Slice& value, int64_t ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->Setex(key, value, ttl_millsec); +} + +Status Storage::Strlen(const Slice& key, int32_t* len) { + auto& inst = GetDBInstance(key); + return inst->Strlen(key, len); +} + +Status Storage::PKSetexAt(const Slice& key, const Slice& value, int64_t time_stamp_millsec_) { + auto& inst = GetDBInstance(key); + if (time_stamp_millsec_ < 0) { + time_stamp_millsec_ = pstd::NowMillis() - 1; + } + return inst->PKSetexAt(key, value, time_stamp_millsec_); +} + +// Hashes Commands +Status Storage::HSet(const Slice& key, const Slice& field, const Slice& value, int32_t* res) { + auto& inst = GetDBInstance(key); + return inst->HSet(key, field, value, res); +} + +Status Storage::HGet(const Slice& key, const Slice& field, std::string* value) { + auto& inst = GetDBInstance(key); + return inst->HGet(key, field, value); +} + +Status Storage::HMSet(const Slice& key, const std::vector& fvs) { + auto& inst = GetDBInstance(key); + return inst->HMSet(key, fvs); +} + +Status Storage::HMGet(const Slice& key, const std::vector& fields, std::vector* vss) { + auto& inst = GetDBInstance(key); + return inst->HMGet(key, fields, vss); +} + +Status Storage::HGetall(const Slice& key, std::vector* fvs) { + auto& inst = GetDBInstance(key); + return inst->HGetall(key, fvs); +} + +Status Storage::HGetallWithTTL(const Slice& key, std::vector* fvs, int64_t* ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->HGetallWithTTL(key, fvs, ttl_millsec); +} + +Status Storage::HKeys(const Slice& key, std::vector* fields) { + auto& inst = GetDBInstance(key); + return inst->HKeys(key, fields); +} + +Status Storage::HVals(const Slice& key, std::vector* values) { + auto& inst = GetDBInstance(key); + return inst->HVals(key, values); +} + +Status Storage::HSetnx(const Slice& key, const Slice& field, const Slice& value, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->HSetnx(key, field, value, ret); +} + +Status Storage::HLen(const Slice& key, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->HLen(key, ret); +} + +Status Storage::HStrlen(const Slice& key, const Slice& field, int32_t* len) { + auto& inst = GetDBInstance(key); + return inst->HStrlen(key, field, len); +} + +Status Storage::HExists(const Slice& key, const Slice& field) { + auto& inst = GetDBInstance(key); + return inst->HExists(key, field); +} + +Status Storage::HIncrby(const Slice& key, const Slice& field, int64_t value, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->HIncrby(key, field, value, ret); +} + +Status Storage::HIncrbyfloat(const Slice& key, const Slice& field, const Slice& by, std::string* new_value) { + auto& inst = GetDBInstance(key); + return inst->HIncrbyfloat(key, field, by, new_value); +} + +Status Storage::HDel(const Slice& key, const std::vector& fields, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->HDel(key, fields, ret); +} + +Status Storage::HScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* field_values, int64_t* next_cursor) { + auto& inst = GetDBInstance(key); + return inst->HScan(key, cursor, pattern, count, field_values, next_cursor); +} + +Status Storage::HScanx(const Slice& key, const std::string& start_field, const std::string& pattern, int64_t count, + std::vector* field_values, std::string* next_field) { + auto& inst = GetDBInstance(key); + return inst->HScanx(key, start_field, pattern, count, field_values, next_field); +} + +Status Storage::PKHScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, + const Slice& pattern, int32_t limit, std::vector* field_values, + std::string* next_field) { + auto& inst = GetDBInstance(key); + return inst->PKHScanRange(key, field_start, field_end, pattern, limit, field_values, next_field); +} + +Status Storage::PKHRScanRange(const Slice& key, const Slice& field_start, const std::string& field_end, + const Slice& pattern, int32_t limit, std::vector* field_values, + std::string* next_field) { + auto& inst = GetDBInstance(key); + return inst->PKHRScanRange(key, field_start, field_end, pattern, limit, field_values, next_field); +} + +// Sets Commands +Status Storage::SAdd(const Slice& key, const std::vector& members, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->SAdd(key, members, ret); +} + +Status Storage::SCard(const Slice& key, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->SCard(key, ret); +} + +Status Storage::SDiff(const std::vector& keys, std::vector* members) { + if (keys.empty()) { + return rocksdb::Status::Corruption("SDiff invalid parameter, no keys"); + } + members->clear(); + + Status s; + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SDiff(keys, members); + return s; + } + + auto& inst = GetDBInstance(keys[0]); + std::vector keys0_members; + s = inst->SMembers(Slice(keys[0]), &keys0_members); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + for (const auto& member : keys0_members) { + int32_t exist = 0; + for (int idx = 1; idx < keys.size(); idx++) { + Slice pkey = Slice(keys[idx]); + auto& inst = GetDBInstance(pkey); + s = inst->SIsmember(pkey, Slice(member), &exist); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (exist) break; + } + if (!exist) { + members->push_back(member); + } + } + return Status::OK(); +} + +Status Storage::SDiffstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SDiffstore(destination, keys, value_to_dest, ret); + return s; + } + + s = SDiff(keys, &value_to_dest); + if (!s.ok()) { + return s; + } + + auto& inst = GetDBInstance(destination); + s = inst->SetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + s = inst->SAdd(destination, value_to_dest, ret); + return s; +} + +Status Storage::SInter(const std::vector& keys, std::vector* members) { + Status s; + members->clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SInter(keys, members); + return s; + } + + std::vector key0_members; + auto& inst = GetDBInstance(keys[0]); + s = inst->SMembers(keys[0], &key0_members); + if (s.IsNotFound()) { + return Status::OK(); + } + if (!s.ok()) { + return s; + } + + for (const auto member : key0_members) { + int32_t exist = 1; + for (int idx = 1; idx < keys.size(); idx++) { + Slice pkey(keys[idx]); + auto& inst = GetDBInstance(keys[idx]); + s = inst->SIsmember(keys[idx], member, &exist); + if (s.ok() && exist > 0) { + continue; + } else if (!s.IsNotFound()) { + return s; + } else { + break; + } + } + if (exist > 0) { + members->push_back(member); + } + } + return Status::OK(); +} + +Status Storage::SInterstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->SInterstore(destination, keys, value_to_dest, ret); + return s; + } + + s = SInter(keys, &value_to_dest); + if (!s.ok()) { + return s; + } + + auto& dest_inst = GetDBInstance(destination); + s = dest_inst->Del(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + s = dest_inst->SAdd(destination, value_to_dest, ret); + return s; +} + +Status Storage::SIsmember(const Slice& key, const Slice& member, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->SIsmember(key, member, ret); +} + +Status Storage::SMembers(const Slice& key, std::vector* members) { + auto& inst = GetDBInstance(key); + return inst->SMembers(key, members); +} + +Status Storage::SMembersWithTTL(const Slice& key, std::vector* members, int64_t * ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->SMembersWithTTL(key, members, ttl_millsec); +} + +Status Storage::SMove(const Slice& source, const Slice& destination, const Slice& member, int32_t* ret) { + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(source); + s = inst->SMove(source, destination, member, ret); + } + + auto& src_inst = GetDBInstance(source); + s = src_inst->SIsmember(source, member, ret); + if (s.IsNotFound()) { + *ret = 0; + return s; + } + if (!s.ok()) { + return s; + } + + s = src_inst->SRem(source, std::vector{member.ToString()}, ret); + if (!s.ok()) { + return s; + } + auto& dest_inst = GetDBInstance(destination); + int unused_ret; + return dest_inst->SAdd(destination, std::vector{member.ToString()}, &unused_ret); +} + +Status Storage::SPop(const Slice& key, std::vector* members, int64_t count) { + auto& inst = GetDBInstance(key); + Status status = inst->SPop(key, members, count); + return status; +} + +Status Storage::SRandmember(const Slice& key, int32_t count, std::vector* members) { + auto& inst = GetDBInstance(key); + return inst->SRandmember(key, count, members); +} + +Status Storage::SRem(const Slice& key, const std::vector& members, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->SRem(key, members, ret); +} + +Status Storage::SUnion(const std::vector& keys, std::vector* members) { + Status s; + members->clear(); + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + return inst->SUnion(keys, members); + } + + using Iter = std::vector::iterator; + using Uset = std::unordered_set; + Uset member_set; + for (const auto& key : keys) { + std::vector vec; + auto& inst = GetDBInstance(key); + s = inst->SMembers(key, &vec); + if (s.IsNotFound()) { + continue; + } + if (!s.ok()) { + return s; + } + std::copy(std::move_iterator(vec.begin()), + std::move_iterator(vec.end()), + std::insert_iterator(member_set, member_set.begin())); + } + + std::copy(member_set.begin(), member_set.end(), std::back_inserter(*members)); + return Status::OK(); +} + +Status Storage::SUnionstore(const Slice& destination, const std::vector& keys, std::vector& value_to_dest, int32_t* ret) { + Status s; + value_to_dest.clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(destination); + s = inst->SUnionstore(destination, keys, value_to_dest, ret); + return s; + } + + s = SUnion(keys, &value_to_dest); + if (!s.ok()) { + return s; + } + *ret = value_to_dest.size(); + auto& dest_inst = GetDBInstance(destination); + s = dest_inst->Del(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + int unused_ret; + return dest_inst->SAdd(destination, value_to_dest, &unused_ret); +} + +Status Storage::SScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* members, int64_t* next_cursor) { + auto& inst = GetDBInstance(key); + return inst->SScan(key, cursor, pattern, count, members, next_cursor); +} + +Status Storage::LPush(const Slice& key, const std::vector& values, uint64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->LPush(key, values, ret); +} + +Status Storage::RPush(const Slice& key, const std::vector& values, uint64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->RPush(key, values, ret); +} + +Status Storage::LRange(const Slice& key, int64_t start, int64_t stop, std::vector* ret) { + ret->clear(); + auto& inst = GetDBInstance(key); + return inst->LRange(key, start, stop, ret); +} + +Status Storage::LRangeWithTTL(const Slice& key, int64_t start, int64_t stop, std::vector* ret, int64_t * ttl_millsec) { + auto& inst = GetDBInstance(key); + return inst->LRangeWithTTL(key, start, stop, ret, ttl_millsec); +} + +Status Storage::LTrim(const Slice& key, int64_t start, int64_t stop) { + auto& inst = GetDBInstance(key); + return inst->LTrim(key, start, stop); +} + +Status Storage::LLen(const Slice& key, uint64_t* len) { + auto& inst = GetDBInstance(key); + return inst->LLen(key, len); +} + +Status Storage::LPop(const Slice& key, int64_t count, std::vector* elements) { + elements->clear(); + auto& inst = GetDBInstance(key); + return inst->LPop(key, count, elements); +} + +Status Storage::RPop(const Slice& key, int64_t count, std::vector* elements) { + elements->clear(); + auto& inst = GetDBInstance(key); + return inst->RPop(key, count, elements); +} + +Status Storage::LIndex(const Slice& key, int64_t index, std::string* element) { + element->clear(); + auto& inst = GetDBInstance(key); + return inst->LIndex(key, index, element); +} + +Status Storage::LInsert(const Slice& key, const BeforeOrAfter& before_or_after, const std::string& pivot, + const std::string& value, int64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->LInsert(key, before_or_after, pivot, value, ret); +} + +Status Storage::LPushx(const Slice& key, const std::vector& values, uint64_t* len) { + auto& inst = GetDBInstance(key); + return inst->LPushx(key, values, len); +} + +Status Storage::RPushx(const Slice& key, const std::vector& values, uint64_t* len) { + auto& inst = GetDBInstance(key); + return inst->RPushx(key, values, len); +} + +Status Storage::LRem(const Slice& key, int64_t count, const Slice& value, uint64_t* ret) { + auto& inst = GetDBInstance(key); + return inst->LRem(key, count, value, ret); +} + +Status Storage::LSet(const Slice& key, int64_t index, const Slice& value) { + auto& inst = GetDBInstance(key); + return inst->LSet(key, index, value); +} + +Status Storage::RPoplpush(const Slice& source, const Slice& destination, std::string* element) { + Status s; + element->clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(source); + s = inst->RPoplpush(source, destination, element); + return s; + } + + auto& source_inst = GetDBInstance(source); + if (source.compare(destination) == 0) { + s = source_inst->RPoplpush(source, destination, element); + return s; + } + + std::vector elements; + s = source_inst->RPop(source, 1, &elements); + if (!s.ok()) { + return s; + } + *element = elements.front(); + std::vector values; + values.emplace_back(*element); + auto& dest_inst = GetDBInstance(destination); + uint64_t ret; + uint64_t llen = 0; + s = dest_inst->LPush(destination, elements, &ret); + if (!s.ok()) { + source_inst->RPush(source, values, &llen); + } + return s; +} + +Status Storage::ZPopMax(const Slice& key, const int64_t count, std::vector* score_members) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZPopMax(key, count, score_members); +} + +Status Storage::ZPopMin(const Slice& key, const int64_t count, std::vector* score_members) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZPopMin(key, count, score_members); +} + +Status Storage::ZAdd(const Slice& key, const std::vector& score_members, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZAdd(key, score_members, ret); +} + +Status Storage::ZCard(const Slice& key, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZCard(key, ret); +} + +Status Storage::ZCount(const Slice& key, double min, double max, bool left_close, bool right_close, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZCount(key, min, max, left_close, right_close, ret); +} + +Status Storage::ZIncrby(const Slice& key, const Slice& member, double increment, double* ret) { + auto& inst = GetDBInstance(key); + return inst->ZIncrby(key, member, increment, ret); +} + +Status Storage::ZRange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRange(key, start, stop, score_members); +} +Status Storage::ZRangeWithTTL(const Slice& key, int32_t start, int32_t stop, std::vector* score_members, + int64_t * ttl_millsec) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangeWithTTL(key, start, stop, score_members, ttl_millsec); +} + +Status Storage::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + std::vector* score_members) { + // maximum number of zset is std::numeric_limits::max() + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), 0, + score_members); +} + +Status Storage::ZRangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + int64_t count, int64_t offset, std::vector* score_members) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangebyscore(key, min, max, left_close, right_close, count, offset, score_members); +} + +Status Storage::ZRank(const Slice& key, const Slice& member, int32_t* rank) { + auto& inst = GetDBInstance(key); + return inst->ZRank(key, member, rank); +} + +Status Storage::ZRem(const Slice& key, const std::vector& members, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZRem(key, members, ret); +} + +Status Storage::ZRemrangebyrank(const Slice& key, int32_t start, int32_t stop, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZRemrangebyrank(key, start, stop, ret); +} + +Status Storage::ZRemrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZRemrangebyscore(key, min, max, left_close, right_close, ret); +} + +Status Storage::ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + int64_t count, int64_t offset, std::vector* score_members) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRevrangebyscore(key, min, max, left_close, right_close, count, offset, score_members); +} + +Status Storage::ZRevrange(const Slice& key, int32_t start, int32_t stop, std::vector* score_members) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRevrange(key, start, stop, score_members); +} + +Status Storage::ZRevrangebyscore(const Slice& key, double min, double max, bool left_close, bool right_close, + std::vector* score_members) { + // maximum number of zset is std::numeric_limits::max() + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRevrangebyscore(key, min, max, left_close, right_close, std::numeric_limits::max(), + 0, score_members); +} + +Status Storage::ZRevrank(const Slice& key, const Slice& member, int32_t* rank) { + auto& inst = GetDBInstance(key); + return inst->ZRevrank(key, member, rank); +} + +Status Storage::ZScore(const Slice& key, const Slice& member, double* ret) { + auto& inst = GetDBInstance(key); + return inst->ZScore(key, member, ret); +} + +Status Storage::ZUnionstore(const Slice& destination, const std::vector& keys, + const std::vector& weights, const AGGREGATE agg, + std::map& value_to_dest, int32_t* ret) { + value_to_dest.clear(); + Status s; + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->ZUnionstore(destination, keys, weights, agg, value_to_dest, ret); + return s; + } + + for (int idx = 0; idx < keys.size(); idx++) { + Slice key = Slice(keys[idx]); + auto& inst = GetDBInstance(key); + std::map member_to_score; + double weight = idx >= weights.size() ? 1 : weights[idx]; + s = inst->ZGetAll(key, weight, &member_to_score); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + for (const auto& key_score : member_to_score) { + const std::string& member = key_score.first; + double score = key_score.second; + if (value_to_dest.find(member) == value_to_dest.end()) { + value_to_dest[member] = score; + continue; + } + switch (agg) { + case SUM: + score += value_to_dest[member]; + break; + case MIN: + score = std::min(value_to_dest[member], score); + break; + case MAX: + score = std::max(value_to_dest[member], score); + break; + } + value_to_dest[member] = (score == -0.0) ? 0 : score; + } + } + + BaseMetaKey base_destination(destination); + auto& inst = GetDBInstance(destination); + s = inst->ZsetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + std::vector score_members; + std::for_each(value_to_dest.begin(), value_to_dest.end(), [&score_members](auto kv) { + score_members.emplace_back(kv.second, kv.first); + }); + *ret = score_members.size(); + int unused_ret; + return inst->ZAdd(destination, score_members, &unused_ret); +} + +Status Storage::ZInterstore(const Slice& destination, const std::vector& keys, + const std::vector& weights, const AGGREGATE agg, + std::vector& value_to_dest, int32_t* ret) { + Status s; + value_to_dest.clear(); + + // in codis mode, users should garentee keys will be hashed to same slot + if (!is_classic_mode_) { + auto& inst = GetDBInstance(keys[0]); + s = inst->ZInterstore(destination, keys, weights, agg, value_to_dest, ret); + return s; + } + + Slice key = Slice(keys[0]); + auto& inst = GetDBInstance(key); + std::map member_to_score; + double weight = weights.empty() ? 1 : weights[0]; + s = inst->ZGetAll(key, weight, &member_to_score); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + + for (const auto member_score : member_to_score) { + std::string member = member_score.first; + double score = member_score.second; + bool reliable = true; + + for (int idx = 1; idx < keys.size(); idx++) { + double weight = idx >= weights.size() ? 1 : weights[idx]; + auto& inst = GetDBInstance(keys[idx]); + double ret_score; + s = inst->ZScore(keys[idx], member, &ret_score); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + if (s.IsNotFound()) { + reliable = false; + break; + } + switch (agg) { + case SUM: + score += ret_score * weight; + break; + case MIN: + score = std::min(score, ret_score * weight); + break; + case MAX: + score = std::max(score, ret_score * weight); + break; + } + } + if (reliable) { + value_to_dest.emplace_back(score, member); + } + } + + BaseMetaKey base_destination(destination); + auto& ninst = GetDBInstance(destination); + + s = ninst->ZsetsDel(destination); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + *ret = value_to_dest.size(); + int unused_ret; + return ninst->ZAdd(destination, value_to_dest, &unused_ret); +} + +Status Storage::ZRangebylex(const Slice& key, const Slice& min, const Slice& max, bool left_close, + bool right_close, std::vector* members) { + members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZRangebylex(key, min, max, left_close, right_close, members); +} + +Status Storage::ZLexcount(const Slice& key, const Slice& min, const Slice& max, bool left_close, + bool right_close, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZLexcount(key, min, max, left_close, right_close, ret); +} + +Status Storage::ZRemrangebylex(const Slice& key, const Slice& min, const Slice& max, + bool left_close, bool right_close, int32_t* ret) { + auto& inst = GetDBInstance(key); + return inst->ZRemrangebylex(key, min, max, left_close, right_close, ret); +} + +Status Storage::ZScan(const Slice& key, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* score_members, int64_t* next_cursor) { + score_members->clear(); + auto& inst = GetDBInstance(key); + return inst->ZScan(key, cursor, pattern, count, score_members, next_cursor); +} + +Status Storage::XAdd(const Slice& key, const std::string& serialized_message, StreamAddTrimArgs& args) { + auto& inst = GetDBInstance(key); + return inst->XAdd(key, serialized_message, args); +} + +Status Storage::XDel(const Slice& key, const std::vector& ids, int32_t& ret) { + auto& inst = GetDBInstance(key); + return inst->XDel(key, ids, ret); +} + +Status Storage::XTrim(const Slice& key, StreamAddTrimArgs& args, int32_t& count) { + auto& inst = GetDBInstance(key); + return inst->XTrim(key, args, count); +} + +Status Storage::XRange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages) { + auto& inst = GetDBInstance(key); + return inst->XRange(key, args, id_messages); +} + +Status Storage::XRevrange(const Slice& key, const StreamScanArgs& args, std::vector& id_messages) { + auto& inst = GetDBInstance(key); + return inst->XRevrange(key, args, id_messages); +} + +Status Storage::XLen(const Slice& key, int32_t& len) { + auto& inst = GetDBInstance(key); + return inst->XLen(key, len); +} + +Status Storage::XRead(const StreamReadGroupReadArgs& args, std::vector>& results, + std::vector& reserved_keys) { + Status s; + for (int i = 0; i < args.unparsed_ids.size(); i++) { + StreamReadGroupReadArgs single_args; + single_args.keys.push_back(args.keys[i]); + single_args.unparsed_ids.push_back(args.unparsed_ids[i]); + single_args.count = args.count; + single_args.block = args.block; + single_args.group_name = args.group_name; + single_args.consumer_name = args.consumer_name; + single_args.noack_ = args.noack_; + auto& inst = GetDBInstance(args.keys[i]); + s = inst->XRead(single_args, results, reserved_keys); + if (!s.ok() && !s.IsNotFound()) { + return s; + } + } + return s; +} + +Status Storage::XInfo(const Slice& key, StreamInfoResult &result) { + auto& inst = GetDBInstance(key); + return inst->XInfo(key, result); +} + +// Keys Commands +int32_t Storage::Expire(const Slice& key, int64_t ttl_millsec) { + auto& inst = GetDBInstance(key); + int32_t ret = 0; + Status s = inst->Expire(key, ttl_millsec); + if (s.ok()) { + ret++; + } else if (!s.IsNotFound()) { + return -1; + } + return ret; +} + + +int64_t Storage::Del(const std::vector& keys) { + Status s; + int64_t count = 0; + for (const auto& key : keys) { + auto& inst = GetDBInstance(key); + s = inst->Del(key); + if (s.ok()) { + count++; + } + } + return count; +} + +int64_t Storage::Exists(const std::vector& keys) { + int64_t count = 0; + Status s; + for (const auto& key : keys) { + auto& inst = GetDBInstance(key); + s = inst->Exists(key); + if (s.ok()) { + count++; + } else if (!s.IsNotFound()) { + return -1; + } + } + return count; +} + +int64_t Storage::Scan(const DataType& dtype, int64_t cursor, const std::string& pattern, int64_t count, + std::vector* keys) { + assert(is_classic_mode_); + keys->clear(); + bool is_finish; + int64_t leftover_visits = count; + int64_t step_length = count; + int64_t cursor_ret = 0; + std::string start_key; + std::string next_key; + std::string prefix; + char key_type; + + // invalid cursor + if (cursor < 0) { + return cursor_ret; + } + + // get seek by corsor + prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; + Status s = LoadCursorStartKey(dtype, cursor, &key_type, &start_key); + if (!s.ok()) { + // If want to scan all the databases, we start with the strings database + key_type = dtype == DataType::kAll ? DataTypeTag[static_cast(DataType::kStrings)] : DataTypeTag[static_cast(dtype)]; + start_key = prefix; + cursor = 0; + } + // collect types to scan + std::vector types; + if (DataType::kAll == dtype) { + auto iter_end = std::end(DataTypeTag); + auto pos = std::find(std::begin(DataTypeTag), iter_end, key_type); + if (pos == iter_end) { + LOG(WARNING) << "Invalid key_type: " << key_type; + return 0; + } + /* + * The reason we need to subtract 2 here is that the last two types of + * DataType are all and none, and we don't need these two types when we + * traverse with the scan iterator, only the first six data types of DataType + */ + std::copy(pos, iter_end - 2, std::back_inserter(types)); + } else { + types.push_back(DataTypeTag[static_cast(dtype)]); + } + + for (const auto& type : types) { + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(type, pattern, + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); + } + + BaseMetaKey base_start_key(start_key); + MergingIterator miter(inst_iters); + miter.Seek(base_start_key.Encode().ToString()); + while (miter.Valid() && count > 0) { + keys->push_back(miter.Key()); + miter.Next(); + count--; + } + + bool is_finish = !miter.Valid(); + if (miter.Valid() && + (miter.Key().compare(prefix) <= 0 || + miter.Key().substr(0, prefix.size()) == prefix)) { + is_finish = false; + } + + // for specific type scan, reach the end + if (is_finish && dtype != DataType::kAll) { + return cursor_ret; + } + + // already get count's element, while iterator is still valid, + // store cursor + if (!is_finish) { + next_key = miter.Key(); + cursor_ret = cursor + step_length; + StoreCursorStartKey(dtype, cursor_ret, type, next_key); + return cursor_ret; + } + + // for all type scan, move to next type, reset start_key + start_key = prefix; + } + return cursor_ret; +} + +Status Storage::PKScanRange(const DataType& data_type, const Slice& key_start, const Slice& key_end, + const Slice& pattern, int32_t limit, std::vector* keys, + std::vector* kvs, std::string* next_key) { + next_key->clear(); + std::string key; + std::string value; + + BaseMetaKey base_key_start(key_start); + BaseMetaKey base_key_end(key_end); + Slice base_key_end_slice(base_key_end.Encode()); + + bool start_no_limit = key_start.empty(); + bool end_no_limit = key_end.empty(); + if (!start_no_limit && !end_no_limit && key_start.compare(key_end) > 0) { + return Status::InvalidArgument("error in given range"); + } + + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(data_type, pattern.ToString(), + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); + } + + MergingIterator miter(inst_iters); + if (start_no_limit) { + miter.SeekToFirst(); + } else { + std::string temp = base_key_start.Encode().ToString(); + miter.Seek(temp); + } + + while (miter.Valid() && limit > 0 && + (end_no_limit || miter.Key().compare(key_end.ToString()) <= 0)) { + if (data_type == DataType::kStrings) { + kvs->push_back({miter.Key(), miter.Value()}); + } else { + keys->push_back(miter.Key()); + } + limit--; + miter.Next(); + } + + if (miter.Valid() && (end_no_limit || miter.Key().compare(key_end.ToString()) <= 0)) { + *next_key = miter.Key(); + } + return Status::OK(); +} + +Status Storage::PKRScanRange(const DataType& data_type, const Slice& key_start, const Slice& key_end, + const Slice& pattern, int32_t limit, std::vector* keys, + std::vector* kvs, std::string* next_key) { + next_key->clear(); + std::string key, value; + BaseMetaKey base_key_start(key_start); + BaseMetaKey base_key_end(key_end); + Slice base_key_start_slice = Slice(base_key_start.Encode()); + + bool start_no_limit = key_start.empty(); + bool end_no_limit = key_end.empty(); + + if (!start_no_limit && !end_no_limit && key_start.compare(key_end) < 0) { + return Status::InvalidArgument("error in given range"); + } + + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(data_type, pattern.ToString(), + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); + } + MergingIterator miter(inst_iters); + if (start_no_limit) { + miter.SeekToLast(); + } else { + miter.SeekForPrev(base_key_start.Encode().ToString()); + } + + while (miter.Valid() && limit > 0 && + (end_no_limit || miter.Key().compare(key_end.ToString()) >= 0)) { + if (data_type == DataType::kStrings) { + kvs->push_back({miter.Key(), miter.Value()}); + } else { + keys->push_back(miter.Key()); + } + limit--; + miter.Prev(); + } + + if (miter.Valid() && (end_no_limit || miter.Key().compare(key_end.ToString()) >= 0)) { + *next_key = miter.Key(); + } + return Status::OK(); +} + +Status Storage::PKPatternMatchDelWithRemoveKeys(const std::string& pattern, int64_t* ret, + std::vector* remove_keys, const int64_t& max_count) { + Status s; + *ret = 0; + for (const auto& inst : insts_) { + int64_t tmp_ret = 0; + s = inst->PKPatternMatchDelWithRemoveKeys(pattern, &tmp_ret, remove_keys, max_count - *ret); + if (!s.ok()) { + return s; + } + *ret += tmp_ret; + if (*ret == max_count) { + return s; + } + } + return s; +} + +Status Storage::Scanx(const DataType& data_type, const std::string& start_key, const std::string& pattern, + int64_t count, std::vector* keys, std::string* next_key) { + Status s; + keys->clear(); + next_key->clear(); + + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr iter_sptr; + iter_sptr.reset(inst->CreateIterator(data_type, pattern, + nullptr/*lower_bound*/, nullptr/*upper_bound*/)); + inst_iters.push_back(iter_sptr); + } + + BaseMetaKey base_start_key(start_key); + MergingIterator miter(inst_iters); + miter.Seek(base_start_key.Encode().ToString()); + while (miter.Valid() && count > 0) { + keys->push_back(miter.Key()); + miter.Next(); + count--; + } + + std::string prefix = isTailWildcard(pattern) ? pattern.substr(0, pattern.size() - 1) : ""; + if (miter.Valid() && (miter.Key().compare(prefix) <= 0 || miter.Key().substr(0, prefix.size()) == prefix)) { + *next_key = miter.Key(); + } else { + *next_key = ""; + } + return Status::OK(); +} + +int32_t Storage::Expireat(const Slice& key, int64_t timestamp_millsec) { + Status s; + int32_t count = 0; + auto& inst = GetDBInstance(key); + s = inst->Expireat(key, timestamp_millsec); + if (s.ok()) { + count++; + } else if (!s.IsNotFound()) { + return -1; + } + return count; +} + +int32_t Storage::Persist(const Slice& key) { + auto& inst = GetDBInstance(key); + int32_t count = 0; + Status s = inst->Persist(key); + if (s.ok()) { + count++; + } else if (!s.IsNotFound()) { + return -1; + } + return count; +} + +int64_t Storage::PTTL(const Slice& key) { + int64_t ttl_millsec = 0; + auto& inst = GetDBInstance(key); + Status s = inst->TTL(key, &ttl_millsec); + if (s.ok() || s.IsNotFound()) { + return ttl_millsec; + } else if (!s.IsNotFound()) { + return -3; + } + return ttl_millsec; +} + +int64_t Storage::TTL(const Slice& key) { + int64_t ttl_millsec = 0; + auto& inst = GetDBInstance(key); + Status s = inst->TTL(key, &ttl_millsec); + if (s.ok() || s.IsNotFound()) { + return ttl_millsec > 0 ? ttl_millsec / 1000 : ttl_millsec; + } else if (!s.IsNotFound()) { + return -3; + } + return ttl_millsec > 0 ? ttl_millsec / 1000 : ttl_millsec; +} + +Status Storage::GetType(const std::string& key, enum DataType& type) { + auto& inst = GetDBInstance(key); + inst->GetType(key, type); + return Status::OK(); +} + +Status Storage::Keys(const DataType& data_type, const std::string& pattern, std::vector* keys) { + keys->clear(); + std::vector types; + types.push_back(data_type); + + for (const auto& type : types) { + std::vector inst_iters; + for (const auto& inst : insts_) { + IterSptr inst_iter; + inst_iter.reset(inst->CreateIterator(type, pattern, nullptr /*lower_bound*/, nullptr /*upper_bound*/)); + inst_iters.push_back(inst_iter); + } + + MergingIterator miter(inst_iters); + miter.SeekToFirst(); + while (miter.Valid()) { + keys->push_back(miter.Key()); + miter.Next(); + } + } + + return Status::OK(); +} + +void Storage::ScanDatabase(const DataType& type) { + for (const auto& inst : insts_) { + switch (type) { + case DataType::kStrings: + inst->ScanStrings(); + break; + case DataType::kHashes: + inst->ScanHashes(); + break; + case DataType::kSets: + inst->ScanSets(); + break; + case DataType::kZSets: + inst->ScanZsets(); + break; + case DataType::kLists: + inst->ScanLists(); + break; + case DataType::kStreams: + // do noting + break; + case DataType::kAll: + inst->ScanStrings(); + inst->ScanHashes(); + inst->ScanSets(); + inst->ScanZsets(); + inst->ScanLists(); + break; + } + } +} + +// HyperLogLog +Status Storage::PfAdd(const Slice& key, const std::vector& values, bool* update) { + *update = false; + if (values.size() >= kMaxKeys) { + return Status::InvalidArgument("Invalid the number of key"); + } + + std::string value; + std::string registers; + std::string result; + auto& inst = GetDBInstance(key); + Status s = inst->HyperloglogGet(key, &value); + if (s.ok()) { + registers = value; + } else if (s.IsNotFound()) { + registers = ""; + } else { + return s; + } + HyperLogLog log(kPrecision, registers); + auto previous = static_cast(log.Estimate()); + for (const auto& value : values) { + result = log.Add(value.data(), value.size()); + } + HyperLogLog update_log(kPrecision, result); + auto now = static_cast(update_log.Estimate()); + if (previous != now || (s.IsNotFound() && values.empty())) { + *update = true; + } + s = inst->HyperloglogSet(key, result); + return s; +} + +Status Storage::PfCount(const std::vector& keys, int64_t* result) { + if (keys.size() >= kMaxKeys || keys.empty()) { + return Status::InvalidArgument("Invalid the number of key"); + } + + std::string value; + std::string first_registers; + auto& inst = GetDBInstance(keys[0]); + Status s = inst->HyperloglogGet(keys[0], &value); + if (s.ok()) { + first_registers = std::string(value.data(), value.size()); + } else if (s.IsNotFound()) { + first_registers = ""; + } else { + return s; + } + HyperLogLog first_log(kPrecision, first_registers); + for (size_t i = 1; i < keys.size(); ++i) { + std::string value; + std::string registers; + auto& inst = GetDBInstance(keys[i]); + s = inst->HyperloglogGet(keys[i], &value); + if (s.ok()) { + registers = value; + } else if (s.IsNotFound()) { + continue; + } else { + return s; + } + HyperLogLog log(kPrecision, registers); + first_log.Merge(log); + } + *result = static_cast(first_log.Estimate()); + return Status::OK(); +} + +Status Storage::PfMerge(const std::vector& keys, std::string& value_to_dest) { + if (keys.size() >= kMaxKeys || keys.empty()) { + return Status::InvalidArgument("Invalid the number of key"); + } + + Status s; + std::string value; + std::string first_registers; + std::string result; + auto& inst = GetDBInstance(keys[0]); + s = inst->HyperloglogGet(keys[0], &value); + if (s.ok()) { + first_registers = std::string(value.data(), value.size()); + } else if (s.IsNotFound()) { + first_registers = ""; + } + + result = first_registers; + HyperLogLog first_log(kPrecision, first_registers); + for (size_t i = 1; i < keys.size(); ++i) { + std::string value; + std::string registers; + auto& tmp_inst = GetDBInstance(keys[i]); + s = tmp_inst->HyperloglogGet(keys[i], &value); + if (s.ok()) { + registers = std::string(value.data(), value.size()); + } else if (s.IsNotFound()) { + continue; + } else { + return s; + } + HyperLogLog log(kPrecision, registers); + result = first_log.Merge(log); + } + auto& ninst = GetDBInstance(keys[0]); + s = ninst->HyperloglogSet(keys[0], result); + value_to_dest = std::move(result); + return s; +} + +static void* StartBGThreadWrapper(void* arg) { + auto s = reinterpret_cast(arg); + s->RunBGTask(); + return nullptr; +} + +Status Storage::StartBGThread() { + int result = pthread_create(&bg_tasks_thread_id_, nullptr, StartBGThreadWrapper, this); + if (result != 0) { + char msg[128]; + snprintf(msg, sizeof(msg), "pthread create: %s", strerror(result)); + return Status::Corruption(msg); + } + return Status::OK(); +} + +Status Storage::AddBGTask(const BGTask& bg_task) { + bg_tasks_mutex_.lock(); + if (bg_task.type == DataType::kAll) { + // if current task it is global compact, + // clear the bg_tasks_queue_; + std::queue empty_queue; + bg_tasks_queue_.swap(empty_queue); + } + bg_tasks_queue_.push(bg_task); + bg_tasks_cond_var_.notify_one(); + bg_tasks_mutex_.unlock(); + return Status::OK(); +} + +Status Storage::RunBGTask() { + BGTask task; + while (!bg_tasks_should_exit_) { + std::unique_lock lock(bg_tasks_mutex_); + bg_tasks_cond_var_.wait(lock, [this]() { return !bg_tasks_queue_.empty() || bg_tasks_should_exit_; }); + + if (!bg_tasks_queue_.empty()) { + task = bg_tasks_queue_.front(); + bg_tasks_queue_.pop(); + } + lock.unlock(); + + if (bg_tasks_should_exit_) { + return Status::Incomplete("bgtask return with bg_tasks_should_exit true"); + } + + if (task.operation == kCleanAll) { + DoCompactRange(task.type, "", ""); + } else if (task.operation == kCompactOldestOrBestDeleteRatioSst) { + LongestNotCompactionSstCompact(task.type, true); + } else if (task.operation == kCompactRange) { + if (task.argv.size() == 1) { + DoCompactSpecificKey(task.type, task.argv[0]); + } + if (task.argv.size() == 2) { + DoCompactRange(task.type, task.argv.front(), task.argv.back()); + } + } + } + return Status::OK(); +} + +Status Storage::LongestNotCompactionSstCompact(const DataType &type, bool sync) { + if (sync) { + Status s; + for (const auto& inst : insts_) { + std::vector compact_result_vec; + s = inst->LongestNotCompactionSstCompact(type, &compact_result_vec); + for (auto compact_result : compact_result_vec) { + if (!compact_result.ok()) { + LOG(ERROR) << compact_result.ToString(); + } + } + } + return s; + } else { + AddBGTask({type, kCompactOldestOrBestDeleteRatioSst}); + } + return Status::OK(); +} + +Status Storage::Compact(const DataType& type, bool sync) { + if (sync) { + return DoCompactRange(type, "", ""); + } else { + AddBGTask({type, kCleanAll}); + } + return Status::OK(); +} + +// run compactrange for all rocksdb instance +Status Storage::DoCompactRange(const DataType& type, const std::string& start, const std::string& end) { + if (type != DataType::kAll) { + return Status::InvalidArgument(""); + } + + std::string start_key, end_key; + CalculateStartAndEndKey(start, &start_key, nullptr); + CalculateStartAndEndKey(end, nullptr, &end_key); + Slice slice_start_key(start_key); + Slice slice_end_key(end_key); + Slice* start_ptr = slice_start_key.empty() ? nullptr : &slice_start_key; + Slice* end_ptr = slice_end_key.empty() ? nullptr : &slice_end_key; + + Status s; + for (const auto& inst : insts_) { + current_task_type_ = Operation::kCleanAll; + s = inst->CompactRange(start_ptr, end_ptr); + if (!s.ok()) { + LOG(ERROR) << "DoCompactRange error: " << s.ToString(); + } + } + current_task_type_ = Operation::kNone; + return s; +} + +Status Storage::CompactRange(const DataType& type, const std::string& start, const std::string& end, bool sync) { + if (sync) { + return DoCompactRange(type, start, end); + } else { + AddBGTask({type, kCompactRange, {start, end}}); + } + return Status::OK(); +} + +Status Storage::DoCompactSpecificKey(const DataType& type, const std::string& key) { + Status s; + auto& inst = GetDBInstance(key); + + std::string start_key; + std::string end_key; + CalculateStartAndEndKey(key, &start_key, &end_key); + Slice slice_begin(start_key); + Slice slice_end(end_key); + s = inst->CompactRange(&slice_begin, &slice_end); + return s; +} + +Status Storage::SetMaxCacheStatisticKeys(uint32_t max_cache_statistic_keys) { + for (const auto& inst : insts_) { + inst->SetMaxCacheStatisticKeys(max_cache_statistic_keys); + } + return Status::OK(); +} + +Status Storage::SetSmallCompactionThreshold(uint32_t small_compaction_threshold) { + for (const auto& inst : insts_) { + inst->SetSmallCompactionThreshold(small_compaction_threshold); + } + return Status::OK(); +} + +Status Storage::SetSmallCompactionDurationThreshold(uint32_t small_compaction_duration_threshold) { + for (const auto& inst : insts_) { + inst->SetSmallCompactionDurationThreshold(small_compaction_duration_threshold); + } + return Status::OK(); +} + +std::string Storage::GetCurrentTaskType() { + int type = current_task_type_; + switch (type) { + case kCleanAll: + return "All"; + case kNone: + default: + return "No"; + } +} + +Status Storage::GetUsage(const std::string& property, uint64_t* const result) { + std::map inst_result; + GetUsage(property, &inst_result); + for (const auto& it : inst_result) { + *result += it.second; + } + return Status::OK(); +} + +Status Storage::GetUsage(const std::string& property, std::map* const inst_result) { + inst_result->clear(); + for (const auto& inst : insts_) { + uint64_t value = 0; + inst->GetProperty(property, &value); + (*inst_result)[inst->GetIndex()] = value; + } + return Status::OK(); +} + +uint64_t Storage::GetProperty(const std::string& property) { + uint64_t out = 0; + uint64_t result = 0; + Status s; + for (const auto& inst : insts_) { + s = inst->GetProperty(property, &out); + result += out; + } + return result; +} + +Status Storage::GetKeyNum(std::vector* key_infos) { + KeyInfo key_info; + key_infos->resize(DataTypeNum); + for (const auto& db : insts_) { + std::vector db_key_infos; + // check the scanner was stopped or not, before scanning the next db + if (scan_keynum_exit_) { + break; + } + auto s = db->ScanKeyNum(&db_key_infos); + if (!s.ok()) { + return s; + } + std::transform(db_key_infos.begin(), db_key_infos.end(), + key_infos->begin(), key_infos->begin(), std::plus<>{}); + } + if (scan_keynum_exit_) { + scan_keynum_exit_ = false; + return Status::Corruption("exit"); + } + return Status::OK(); +} + +Status Storage::StopScanKeyNum() { + scan_keynum_exit_ = true; + return Status::OK(); +} + +rocksdb::DB* Storage::GetDBByIndex(int index) { + if (index < 0 || index >= db_instance_num_) { + LOG(WARNING) << "Invalid DB Index: " << index << "total: " + << db_instance_num_; + return nullptr; + } + return insts_[index]->GetDB(); +} + +Status Storage::SetOptions(const OptionType& option_type, const std::string& db_type, + const std::unordered_map& options) { + Status s; + for (const auto& inst : insts_) { + s = inst->SetOptions(option_type, options); + if (!s.ok()) { + return s; + } + } + s = EnableDymayticOptions(option_type, db_type, options); + return s; +} + +void Storage::SetCompactRangeOptions(const bool is_canceled) { + for (const auto& inst : insts_) { + inst->SetCompactRangeOptions(is_canceled); + } +} + +Status Storage::EnableDymayticOptions(const OptionType& option_type, + const std::string& db_type, const std::unordered_map& options) { + Status s; + auto it = options.find("disable_auto_compactions"); + if (it != options.end() && it->second == "false") { + s = EnableAutoCompaction(option_type, db_type, options); + LOG(WARNING) << "EnableAutoCompaction " << (s.ok() ? "success" : "failed") + << " when Options get disable_auto_compactions: " << it->second << " ,db_type: " << db_type; + } + return s; +} + +Status Storage::EnableAutoCompaction(const OptionType& option_type, + const std::string& db_type, const std::unordered_map& options) { + Status s; + + for (const auto& inst : insts_) { + std::vector cfhds; + auto string_cfhds = inst->GetStringCFHandles(); + auto hash_cfhds = inst->GetHashCFHandles(); + auto list_cfhds = inst->GetListCFHandles(); + auto set_cfhds = inst->GetSetCFHandles(); + auto zset_cfhds = inst->GetZsetCFHandles(); + cfhds.insert(cfhds.end(), string_cfhds.begin(), string_cfhds.end()); + cfhds.insert(cfhds.end(), hash_cfhds.begin(), hash_cfhds.end()); + cfhds.insert(cfhds.end(), list_cfhds.begin(), list_cfhds.end()); + cfhds.insert(cfhds.end(), set_cfhds.begin(), set_cfhds.end()); + cfhds.insert(cfhds.end(), zset_cfhds.begin(), zset_cfhds.end()); + s = inst->GetDB()->EnableAutoCompaction(cfhds); + if (!s.ok()) { + return s; + } + } + return s; +} + +void Storage::GetRocksDBInfo(std::string& info) { + char temp[12] = {0}; + for (const auto& inst : insts_) { + snprintf(temp, sizeof(temp), "instance%d_", inst->GetIndex()); + inst->GetRocksDBInfo(info, temp); + } +} + +const StorageOptions& Storage::GetStorageOptions() { + return storage_options_; +} + +int64_t Storage::IsExist(const Slice& key, std::map* type_status) { + int64_t type_count = 0; + auto& inst = GetDBInstance(key); + Status s = inst->IsExist(key); + if (s.ok()) { + return 1; + } + return type_count; +} + + +void Storage::DisableWal(const bool is_wal_disable) { + for (const auto& inst : insts_) { + inst->SetWriteWalOptions(is_wal_disable); + } +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/storage_murmur3.h b/tools/pika_migrate/src/storage/src/storage_murmur3.h new file mode 100644 index 0000000000..958c5dbf1a --- /dev/null +++ b/tools/pika_migrate/src/storage/src/storage_murmur3.h @@ -0,0 +1,151 @@ +#ifndef MURMUR3_H_ +#define MURMUR3_H_ + +//----------------------------------------------------------------------------- +// MurmurHash3 was written by Austin Appleby, and is placed in the public +// domain. The autohor hereby disclaims copyright to this source code. + +// Note - The x86 and x64 versions do _not_ produce the same results, as the +// algorithms are optimized for their respective platforms. You can still +// compile and run any of them on any platform, but your performance with the +// non-native version will be less than optimal. + +//----------------------------------------------------------------------------- +// Platform-specific functions and macros + +// Microsoft Visual Studio + +#if defined(_MSC_VER) + +typedef unsigned char uint8_t; +typedef unsigned long uint32_t; +typedef unsigned __int64 uint64_t; + +// Other compilers + +#else // defined(_MSC_VER) + +# include + +#endif // !defined(_MSC_VER) + +namespace storage { + +#define FORCE_INLINE __attribute__((always_inline)) + +inline uint32_t rotl32(uint32_t x, uint8_t r) { return (x << r) | (x >> (32 - r)); } + +#define ROTL32(x, y) rotl32(x, y) + +#define BIG_CONSTANT(x) (x##LLU) + +/* NO-OP for little-endian platforms */ +#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define BYTESWAP(x) (x) +# endif +/* if __BYTE_ORDER__ is not predefined (like FreeBSD), use arch */ +#elif defined(__i386) || defined(__x86_64) || defined(__alpha) || defined(__vax) + +# define BYTESWAP(x) (x) +/* use __builtin_bswap32 if available */ +#elif defined(__GNUC__) || defined(__clang__) +# ifdef __has_builtin +# if __has_builtin(__builtin_bswap32) +# define BYTESWAP(x) __builtin_bswap32(x) +# endif // __has_builtin(__builtin_bswap32) +# endif // __has_builtin +#endif // defined(__GNUC__) || defined(__clang__) +/* last resort (big-endian w/o __builtin_bswap) */ +#ifndef BYTESWAP +# define BYTESWAP(x) ((((x)&0xFF) << 24) | (((x) >> 24) & 0xFF) | (((x)&0x0000FF00) << 8) | (((x)&0x00FF0000) >> 8)) +#endif + +//----------------------------------------------------------------------------- +// Block read - if your platform needs to do endian-swapping or can only +// handle aligned reads, do the conversion here + +#define getblock(p, i) BYTESWAP((p)[i]) + +//----------------------------------------------------------------------------- +// Finalization mix - force all bits of a hash block to avalanche + +uint32_t fmix32(uint32_t h) { + h ^= h >> 16; + h *= 0x85ebca6b; + h ^= h >> 13; + h *= 0xc2b2ae35; + h ^= h >> 16; + + return h; +} + +//----------------------------------------------------------------------------- + +#ifdef __cplusplus +extern "C" +#else +extern +#endif + void + MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out) { + const auto data = (const uint8_t*)key; + const int nblocks = len / 4; + int i; + + uint32_t h1 = seed; + + uint32_t c1 = 0xcc9e2d51; + uint32_t c2 = 0x1b873593; + + //---------- + // body + + const auto blocks = (const uint32_t*)(data + nblocks * 4); + + for (i = -nblocks; i != 0; i++) { + uint32_t k1 = getblock(blocks, i); + + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + + h1 ^= k1; + h1 = ROTL32(h1, 13); + h1 = h1 * 5 + 0xe6546b64; + } + + //---------- + // tail + { + const auto tail = (data + nblocks * 4); + + uint32_t k1 = 0; + + switch (len & 3) { + case 3: + k1 ^= tail[2] << 16; + case 2: + k1 ^= tail[1] << 8; + case 1: + k1 ^= tail[0]; + k1 *= c1; + k1 = ROTL32(k1, 15); + k1 *= c2; + h1 ^= k1; + }; + } + + //---------- + // finalization + + h1 ^= len; + + h1 = fmix32(h1); + + *(uint32_t*)out = h1; +} + +} // namespace storage + +#endif diff --git a/tools/pika_migrate/src/storage/src/strings_filter.h b/tools/pika_migrate/src/storage/src/strings_filter.h new file mode 100644 index 0000000000..c53478bb11 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/strings_filter.h @@ -0,0 +1,66 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_STRINGS_FILTER_H_ +#define SRC_STRINGS_FILTER_H_ + +#include +#include + +#include "rocksdb/compaction_filter.h" +#include "src/debug.h" +#include "src/strings_value_format.h" + +namespace storage { + +class StringsFilter : public rocksdb::CompactionFilter { + public: + StringsFilter() = default; + bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, std::string* new_value, + bool* value_changed) const override { + pstd::TimeType unix_time = pstd::NowMillis(); + auto cur_time = static_cast(unix_time); + ParsedStringsValue parsed_strings_value(value); + TRACE("==========================START=========================="); + TRACE("[StringsFilter], key: %s, value = %s, timestamp: %llu, cur_time: %llu", key.ToString().c_str(), + parsed_strings_value.UserValue().ToString().c_str(), parsed_strings_value.Etime(), cur_time); + + if (parsed_strings_value.Etime() != 0 && parsed_strings_value.Etime() < cur_time) { + TRACE("Drop[Stale]"); + return true; + } else { + TRACE("Reserve"); + return false; + } + } + + /* + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + uint64_t expire_time, std::string* new_value, std::string* skip_until) const override { + int64_t unix_time; + rocksdb::Env::Default()->GetCurrentTime(&unix_time); + auto cur_time = static_cast(unix_time); + if (expire_time !=0 && expire_time < cur_time) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + + const char* Name() const override { return "StringsFilter"; } +}; + +class StringsFilterFactory : public rocksdb::CompactionFilterFactory { + public: + StringsFilterFactory() = default; + std::unique_ptr CreateCompactionFilter( + const rocksdb::CompactionFilter::Context& context) override { + return std::unique_ptr(new StringsFilter()); + } + const char* Name() const override { return "StringsFilterFactory"; } +}; + +} // namespace storage +#endif // SRC_STRINGS_FILTER_H_ diff --git a/tools/pika_migrate/src/storage/src/strings_value_format.h b/tools/pika_migrate/src/storage/src/strings_value_format.h new file mode 100644 index 0000000000..550104b339 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/strings_value_format.h @@ -0,0 +1,163 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_STRINGS_VALUE_FORMAT_H_ +#define SRC_STRINGS_VALUE_FORMAT_H_ + +#include + +#include "src/base_value_format.h" +#include "storage/storage_define.h" + + +namespace storage { +/* +* | type | value | reserve | cdate | timestamp | +* | 1B | | 16B | 8B | 8B | +* The first bit in reservse field is used to isolate string and hyperloglog +*/ + // 80H = 1000000B +constexpr uint8_t hyperloglog_reserve_flag = 0x80; +class StringsValue : public InternalValue { + public: + explicit StringsValue(const rocksdb::Slice& user_value) : InternalValue(DataType::kStrings, user_value) {} + virtual rocksdb::Slice Encode() override { + size_t usize = user_value_.size(); + size_t needed = usize + kSuffixReserveLength + 2 * kTimestampLength + kTypeLength; + char* dst = ReAllocIfNeeded(needed); + memcpy(dst, &type_, sizeof(type_)); + dst += sizeof(type_); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), usize); + dst += usize; + memcpy(dst, reserve_, kSuffixReserveLength); + dst += kSuffixReserveLength; + // The most significant bit is 1 for milliseconds and 0 for seconds. + // The previous data was stored in seconds, but the subsequent data was stored in milliseconds + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + dst += kTimestampLength; + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + return {start_, needed}; + } +}; + +class HyperloglogValue : public InternalValue { + public: + explicit HyperloglogValue(const rocksdb::Slice& user_value) : InternalValue(DataType::kStrings, user_value) {} + virtual rocksdb::Slice Encode() override { + size_t usize = user_value_.size(); + size_t needed = usize + kSuffixReserveLength + 2 * kTimestampLength + kTypeLength; + char* dst = ReAllocIfNeeded(needed); + memcpy(dst, &type_, sizeof(type_)); + dst += sizeof(type_); + char* start_pos = dst; + + memcpy(dst, user_value_.data(), usize); + dst += usize; + reserve_[0] |= hyperloglog_reserve_flag; + memcpy(dst, reserve_, kSuffixReserveLength); + dst += kSuffixReserveLength; + EncodeFixed64(dst, ctime_); + dst += kTimestampLength; + EncodeFixed64(dst, etime_); + return {start_, needed}; + } +}; + +class ParsedStringsValue : public ParsedInternalValue { + public: + // Use this constructor after rocksdb::DB::Get(); + explicit ParsedStringsValue(std::string* internal_value_str) : ParsedInternalValue(internal_value_str) { + if (internal_value_str->size() >= kStringsValueMinLength) { + size_t offset = 0; + type_ = static_cast(static_cast((*internal_value_str)[0])); + offset += kTypeLength; + user_value_ = rocksdb::Slice(internal_value_str->data() + offset, + internal_value_str->size() - kStringsValueSuffixLength - offset); + offset += user_value_.size(); + memcpy(reserve_, internal_value_str->data() + offset, kSuffixReserveLength); + offset += kSuffixReserveLength; + uint64_t ctime = DecodeFixed64(internal_value_str->data() + offset); + offset += sizeof(ctime_); + uint64_t etime = DecodeFixed64(internal_value_str->data() + offset); + + ctime_ = (ctime & ~(1ULL << 63)); + // if ctime_==ctime, means ctime_ storaged in seconds + if (ctime_ == ctime) { + ctime_ *= 1000; + } + etime_ = (etime & ~(1ULL << 63)); + // if etime_==etime, means etime_ storaged in seconds + if (etime == etime_) { + etime_ *= 1000; + } + } + } + + // Use this constructor in rocksdb::CompactionFilter::Filter(); + explicit ParsedStringsValue(const rocksdb::Slice& internal_value_slice) : ParsedInternalValue(internal_value_slice) { + if (internal_value_slice.size() >= kStringsValueMinLength) { + size_t offset = 0; + type_ = static_cast(static_cast(internal_value_slice[0])); + offset += kTypeLength; + user_value_ = rocksdb::Slice(internal_value_slice.data() + offset, internal_value_slice.size() - kStringsValueSuffixLength - offset); + offset += user_value_.size(); + memcpy(reserve_, internal_value_slice.data() + offset, kSuffixReserveLength); + offset += kSuffixReserveLength; + uint64_t ctime = DecodeFixed64(internal_value_slice.data() + offset); + offset += kTimestampLength; + uint64_t etime = DecodeFixed64(internal_value_slice.data() + offset); + + ctime_ = (ctime & ~(1ULL << 63)); + // if ctime_==ctime, means ctime_ storaged in seconds + if (ctime_ == ctime) { + ctime_ *= 1000; + } + etime_ = (etime & ~(1ULL << 63)); + // if etime_==etime, means etime_ storaged in seconds + if (etime == etime_) { + etime_ *= 1000; + } + } + } + + void StripSuffix() override { + if (value_) { + value_->erase(0, kTypeLength); + value_->erase(value_->size() - kStringsValueSuffixLength, kStringsValueSuffixLength); + } + } + + // Strings type do not have version field; + void SetVersionToValue() override {} + + void SetCtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - + kStringsValueSuffixLength + kSuffixReserveLength; + uint64_t ctime = ctime_ > 0 ? (ctime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, ctime); + } + } + + void SetEtimeToValue() override { + if (value_) { + char* dst = const_cast(value_->data()) + value_->size() - + kStringsValueSuffixLength + kSuffixReserveLength + kTimestampLength; + uint64_t etime = etime_ > 0 ? (etime_ | (1ULL << 63)) : 0; + EncodeFixed64(dst, etime); + } + } + +private: + const static size_t kStringsValueSuffixLength = 2 * kTimestampLength + kSuffixReserveLength; + const static size_t kStringsValueMinLength = kStringsValueSuffixLength + kTypeLength; +}; + +} // namespace storage +#endif // SRC_STRINGS_VALUE_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/type_iterator.h b/tools/pika_migrate/src/storage/src/type_iterator.h new file mode 100644 index 0000000000..35f9f149ab --- /dev/null +++ b/tools/pika_migrate/src/storage/src/type_iterator.h @@ -0,0 +1,521 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef TYPE_ITERATOR_H_ +#define TYPE_ITERATOR_H_ + +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "glog/logging.h" + +#include "util/heap.h" +#include "storage/util.h" +#include "src/mutex.h" +#include "src/debug.h" +#include "src/base_data_key_format.h" +#include "src/base_key_format.h" +#include "src/base_meta_value_format.h" +#include "src/strings_value_format.h" +#include "src/lists_meta_value_format.h" +#include "src/pika_stream_meta_value.h" +#include "storage/storage_define.h" + +namespace storage { +using ColumnFamilyHandle = rocksdb::ColumnFamilyHandle; +using Comparator = rocksdb::Comparator; + +enum Direction { kForward, kReverse }; + +class TypeIterator { +public: + TypeIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle) { + raw_iter_.reset(db->NewIterator(options, handle)); + } + + virtual ~TypeIterator() {} + + virtual void Seek(const std::string& start_key) { + raw_iter_->Seek(Slice(start_key)); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Next(); + } + } + + void SeekToFirst() { + raw_iter_->SeekToFirst(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Next(); + } + } + + void SeekToLast() { + raw_iter_->SeekToLast(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Prev(); + } + } + + virtual void SeekForPrev(const std::string& start_key) { + raw_iter_->SeekForPrev(Slice(start_key)); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Prev(); + } + } + + void Next() { + raw_iter_->Next(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Next(); + } + } + + void Prev() { + raw_iter_->Prev(); + while (raw_iter_->Valid() && ShouldSkip()) { + raw_iter_->Prev(); + } + } + + virtual bool ShouldSkip() { return false; } + + virtual std::string Key() const { return user_key_; } + + virtual std::string Value() const {return user_value_; } + + virtual bool Valid() { return raw_iter_->Valid(); } + + virtual Status status() { return raw_iter_->status(); } + +protected: + std::unique_ptr raw_iter_; + std::string user_key_; + std::string user_value_; + Direction direction_ = kForward; +}; + +/* + * Since the meta of all data types is in a cf, + * it is necessary to skip data that does not + * belong to your type when iterating with an + * iterator + */ + +class StringsIterator : public TypeIterator { +public: + StringsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~StringsIterator() {} + + bool ShouldSkip() override { + auto type = static_cast(static_cast(raw_iter_->value()[0])); + if (type != DataType::kStrings) { + return true; + } + ParsedStringsValue parsed_value(raw_iter_->value()); + if (parsed_value.IsStale()) { + return true; + } + + ParsedBaseKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class HashesIterator : public TypeIterator { +public: + HashesIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~HashesIterator() {} + + bool ShouldSkip() override { + auto type = static_cast(static_cast(raw_iter_->value()[0])); + if (type != DataType::kHashes) { + return true; + } + ParsedHashesMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class ListsIterator : public TypeIterator { +public: + ListsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~ListsIterator() {} + + bool ShouldSkip() override { + auto type = static_cast(static_cast(raw_iter_->value()[0])); + if (type != DataType::kLists) { + return true; + } + ParsedListsMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class SetsIterator : public TypeIterator { +public: + SetsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~SetsIterator() {} + + bool ShouldSkip() override { + auto type = static_cast(static_cast(raw_iter_->value()[0])); + if (type != DataType::kSets) { + return true; + } + ParsedSetsMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class ZsetsIterator : public TypeIterator { +public: + ZsetsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~ZsetsIterator() {} + + bool ShouldSkip() override { + auto type = static_cast(static_cast(raw_iter_->value()[0])); + if (type != DataType::kZSets) { + return true; + } + ParsedZSetsMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = parsed_meta_value.UserValue().ToString(); + return false; + } +private: + std::string pattern_; +}; + +class StreamsIterator : public TypeIterator { +public: + StreamsIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, + ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~StreamsIterator() {} + + bool ShouldSkip() override { + auto type = static_cast(static_cast(raw_iter_->value()[0])); + if (type != DataType::kStreams) { + return true; + } + ParsedStreamMetaValue parsed_meta_value(raw_iter_->value()); + if (parsed_meta_value.length() == 0) { + return true; + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), + parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + // multiple class members defined in StreamMetaValue, + // so user_value_ just return rocksdb raw value + user_value_ = raw_iter_->value().ToString(); + return false; + } +private: + std::string pattern_; +}; + +/* + * This iterator is used for all types of meta data needed for iteration + */ +class AllIterator : public TypeIterator { + public: + AllIterator(const rocksdb::ReadOptions& options, rocksdb::DB* db, ColumnFamilyHandle* handle, + const std::string& pattern) + : TypeIterator(options, db, handle), pattern_(pattern) {} + ~AllIterator() {} + + bool ShouldSkip() override { + std::string user_value; + auto type = static_cast(static_cast(raw_iter_->value()[0])); + switch (type) { + case DataType::kZSets: + case DataType::kSets: + case DataType::kHashes: + case DataType::kStreams: { + ParsedBaseMetaValue parsed_meta_value(raw_iter_->value()); + user_value = parsed_meta_value.UserValue().ToString(); + if (parsed_meta_value.IsStale() || parsed_meta_value.Count() == 0) { + return true; + } + break; + } + + case DataType::kLists: { + ParsedListsMetaValue parsed_meta_list_value(raw_iter_->value()); + user_value = parsed_meta_list_value.UserValue().ToString(); + if (parsed_meta_list_value.IsStale() || parsed_meta_list_value.Count() == 0) { + return true; + } + break; + } + + default: { + ParsedStringsValue parsed_value(raw_iter_->value()); + user_value = parsed_value.UserValue().ToString(); + if (parsed_value.IsStale()) { + return true; + } + break; + } + } + + ParsedBaseMetaKey parsed_key(raw_iter_->key().ToString()); + if (StringMatch(pattern_.data(), pattern_.size(), parsed_key.Key().data(), parsed_key.Key().size(), 0) == 0) { + return true; + } + user_key_ = parsed_key.Key().ToString(); + user_value_ = user_value; + return false; + } + + private: + std::string pattern_; +}; +using IterSptr = std::shared_ptr; + +class MinMergeComparator { +public: + MinMergeComparator() = default; + bool operator() (IterSptr a, IterSptr b) { + + int a_len = a->Key().size(); + int b_len = b->Key().size(); + return a->Key().compare(b->Key()) > 0; + } +}; + +class MaxMergeComparator { +public: + MaxMergeComparator() = default; + bool operator() (IterSptr a, IterSptr b) { + int a_len = a->Key().size(); + int b_len = b->Key().size(); + return a->Key().compare(b->Key()) < 0; + } +}; + +using MergerMinIterHeap = rocksdb::BinaryHeap; +using MergerMaxIterHeap = rocksdb::BinaryHeap; + +class MergingIterator { +public: + MergingIterator(const std::vector& children) + : current_(nullptr), direction_(kForward) { + std::copy(children.begin(), children.end(), std::back_inserter(children_)); + for (const auto& child : children_) { + if (child->Valid()) { + min_heap_.push(child); + } + } + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + ~MergingIterator() {} + + bool Valid() const { return current_ != nullptr; } + + Status status() const { + Status status; + for (const auto& child : children_) { + status = child->status(); + if (!status.ok()) { + break; + } + } + return status; + } + + bool IsFinished(const std::string& prefix) { + if (Valid() && (Key().compare(prefix) <= 0 || Key().substr(0, prefix.size()) == prefix)) { + return false; + } + return true; + } + + void SeekToFirst() { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->SeekToFirst(); + if (child->Valid()) { + min_heap_.push(child); + } + } + direction_ = kForward; + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + void SeekToLast() { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->SeekToLast(); + if (child->Valid()) { + max_heap_.push(child); + } + } + direction_ = kReverse; + current_ = max_heap_.empty() ? nullptr : max_heap_.top(); + } + + void Seek(const std::string& target) { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->Seek(target); + if (child->Valid()) { + min_heap_.push(child); + } + } + direction_ = kForward; + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + void SeekForPrev(const std::string& start_key) { + min_heap_.clear(); + max_heap_.clear(); + for (auto& child : children_) { + child->SeekForPrev(start_key); + if (child->Valid()) { + max_heap_.push(child); + } + } + direction_ = kReverse; + current_ = max_heap_.empty() ? nullptr : max_heap_.top(); + } + + void Next() { + assert(direction_ == kForward); + current_->Next(); + if (current_->Valid()) { + min_heap_.replace_top(current_); + } else { + min_heap_.pop(); + } + current_ = min_heap_.empty() ? nullptr : min_heap_.top(); + } + + void Prev() { + assert(direction_ == kReverse); + current_->Prev(); + if (current_->Valid()) { + max_heap_.replace_top(current_); + } else { + max_heap_.pop(); + } + current_ = max_heap_.empty() ? nullptr : max_heap_.top(); + } + + std::string Key() { return current_->Key(); } + + std::string Value() { return current_->Value(); } + + Status status() { + Status s; + for (const auto& child : children_) { + s = child->status(); + if (!s.ok()) { + break; + } + } + return s; + } + + bool Valid() { return current_ != nullptr; } + +private: + + MergerMinIterHeap min_heap_; + MergerMaxIterHeap max_heap_; + std::vector children_; + IterSptr current_; + Direction direction_; +}; + +} // end namespace storage + +# endif diff --git a/tools/pika_migrate/src/storage/src/util.cc b/tools/pika_migrate/src/storage/src/util.cc new file mode 100644 index 0000000000..82a4bf82b4 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/util.cc @@ -0,0 +1,292 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include +#include + +#include "pstd/include/pstd_string.h" +#include "pstd/include/pika_codis_slot.h" +#include "src/base_key_format.h" +#include "src/base_data_key_format.h" +#include "src/coding.h" +#include "storage/storage_define.h" +#include "storage/util.h" + +namespace storage { + +/* Convert a long long into a string. Returns the number of + * characters needed to represent the number. + * If the buffer is not big enough to store the string, 0 is returned. + * + * Based on the following article (that apparently does not provide a + * novel approach but only publicizes an already used technique): + * + * https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920 + * + * Modified in order to handle signed integers since the original code was + * designed for unsigned integers. */ +int Int64ToStr(char* dst, size_t dstlen, int64_t svalue) { + return pstd::ll2string(dst, dstlen, svalue); +} + +/* Convert a string into a long long. Returns 1 if the string could be parsed + * into a (non-overflowing) long long, 0 otherwise. The value will be set to + * the parsed value when appropriate. */ +int StrToInt64(const char* s, size_t slen, int64_t* value) { + return pstd::string2int(s, slen, value); +} + +/* Glob-style pattern matching. */ +int StringMatch(const char* pattern, uint64_t pattern_len, const char* str, uint64_t string_len, int nocase) { + return pstd::stringmatchlen(pattern, static_cast(pattern_len), str, static_cast(string_len), nocase); +} + +int StrToLongDouble(const char* s, size_t slen, long double* ldval) { + char* pEnd; + std::string t(s, slen); + if (t.find(' ') != std::string::npos) { + return -1; + } + long double d = strtold(s, &pEnd); + if (pEnd != s + slen) { + return -1; + } + + if (ldval) { + *ldval = d; + } + return 0; +} + +int LongDoubleToStr(long double ldval, std::string* value) { + char buf[256]; + int len; + if (std::isnan(ldval)) { + return -1; + } else if (std::isinf(ldval)) { + /* Libc in odd systems (Hi Solaris!) will format infinite in a + * different way, so better to handle it in an explicit way. */ + if (ldval > 0) { + strcpy(buf, "inf"); + len = 3; + } else { + strcpy(buf, "-inf"); + len = 4; + } + return -1; + } else { + /* We use 17 digits precision since with 128 bit floats that precision + * after rounding is able to represent most small decimal numbers in a + * way that is "non surprising" for the user (that is, most small + * decimal numbers will be represented in a way that when converted + * back into a string are exactly the same as what the user typed.) */ + len = snprintf(buf, sizeof(buf), "%.17Lf", ldval); + /* Now remove trailing zeroes after the '.' */ + if (strchr(buf, '.')) { + char* p = buf + len - 1; + while (*p == '0') { + p--; + len--; + } + if (*p == '.') { + len--; + } + } + value->assign(buf, len); + return 0; + } +} + +int do_mkdir(const char* path, mode_t mode) { + struct stat st; + int status = 0; + + if (stat(path, &st) != 0) { + /* Directory does not exist. EEXIST for race + * condition */ + if (mkdir(path, mode) != 0 && errno != EEXIST) { + status = -1; + } + } else if (!S_ISDIR(st.st_mode)) { + errno = ENOTDIR; + status = -1; + } + + return (status); +} + +/** +** mkpath - ensure all directories in path exist +** Algorithm takes the pessimistic view and works top-down to ensure +** each directory in path exists, rather than optimistically creating +** the last element and working backwards. +*/ +int mkpath(const char* path, mode_t mode) { + char* pp; + char* sp; + int status; + char* copypath = strdup(path); + + status = 0; + pp = copypath; + while (status == 0 && (sp = strchr(pp, '/')) != nullptr) { + if (sp != pp) { + /* Neither root nor double slash in path */ + *sp = '\0'; + status = do_mkdir(copypath, mode); + *sp = '/'; + } + pp = sp + 1; + } + if (status == 0) { + status = do_mkdir(path, mode); + } + free(copypath); + return (status); +} + +int delete_dir(const char* dirname) { + char chBuf[256]; + DIR* dir = nullptr; + struct dirent* ptr; + int ret = 0; + dir = opendir(dirname); + if (nullptr == dir) { + return -1; + } + while ((ptr = readdir(dir)) != nullptr) { + ret = strcmp(ptr->d_name, "."); + if (0 == ret) { + continue; + } + ret = strcmp(ptr->d_name, ".."); + if (0 == ret) { + continue; + } + snprintf(chBuf, sizeof(chBuf), "%s/%s", dirname, ptr->d_name); + ret = is_dir(chBuf); + if (0 == ret) { + // is dir + ret = delete_dir(chBuf); + if (0 != ret) { + return -1; + } + } else if (1 == ret) { + // is file + ret = remove(chBuf); + if (0 != ret) { + return -1; + } + } + } + (void)closedir(dir); + ret = remove(dirname); + if (0 != ret) { + return -1; + } + return 0; +} + +int is_dir(const char* filename) { + struct stat buf; + int ret = stat(filename, &buf); + if (0 == ret) { + if ((buf.st_mode & S_IFDIR) != 0) { + // folder + return 0; + } else { + // file + return 1; + } + } + return -1; +} + +int CalculateStartAndEndKey(const std::string& key, std::string* start_key, std::string* end_key) { + if (key.empty()) { + return 0; + } + size_t usize = kPrefixReserveLength + key.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key.begin(), key.end(), kNeedTransformCharacter); + usize += nzero; + auto dst = std::make_unique(usize); + char* ptr = dst.get(); + memset(ptr, kNeedTransformCharacter, kPrefixReserveLength); + ptr += kPrefixReserveLength; + ptr = storage::EncodeUserKey(Slice(key), ptr, nzero); + if (start_key) { + *start_key = std::string(dst.get(), ptr); + } + if (end_key) { + *end_key = std::string(dst.get(), ptr); + // Encoded key's last two character is "\u0000\u0000", + // so directly upgrade end_key's back character to '\u0001'. + end_key->back() = '\u0001'; + } + return 0; +} + +// requires: +// 1. pattern's length >= 2 +// 2. tail character is '*' +// 3. other position's charactor cannot be *, ?, [,] +bool isTailWildcard(const std::string& pattern) { + if (pattern.size() < 2) { + return false; + } else { + if (pattern.back() != '*') { + return false; + } else { + for (uint32_t idx = 0; idx < pattern.size() - 1; ++idx) { + if (pattern[idx] == '*' || pattern[idx] == '?' || pattern[idx] == '[' || pattern[idx] == ']') { + return false; + } + } + } + } + return true; +} + +void GetFilepath(const char* path, const char* filename, char* filepath) { + strcpy(filepath, path); // NOLINT + if (filepath[strlen(path) - 1] != '/') { + strcat(filepath, "/"); // NOLINT + } + strcat(filepath, filename); // NOLINT +} + +bool DeleteFiles(const char* path) { + DIR* dir; + struct dirent* dirinfo; + struct stat statbuf; + char filepath[256] = {0}; + lstat(path, &statbuf); + + if (S_ISREG(statbuf.st_mode)) // 判断是否是常规文件 + { + remove(path); + } else if (S_ISDIR(statbuf.st_mode)) // 判断是否是目录 + { + if (!(dir = opendir(path))) { + return true; + } + while ((dirinfo = readdir(dir)) != nullptr) { + GetFilepath(path, dirinfo->d_name, filepath); + if (strcmp(dirinfo->d_name, ".") == 0 || strcmp(dirinfo->d_name, "..") == 0) { // 判断是否是特殊目录 + continue; + } + DeleteFiles(filepath); + rmdir(filepath); + } + closedir(dir); + } + return false; +} + +} // namespace storage diff --git a/tools/pika_migrate/src/storage/src/zsets_data_key_format.h b/tools/pika_migrate/src/storage/src/zsets_data_key_format.h new file mode 100644 index 0000000000..3b721a7107 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/zsets_data_key_format.h @@ -0,0 +1,127 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_ZSETS_DATA_KEY_FORMAT_H_ +#define SRC_ZSETS_DATA_KEY_FORMAT_H_ + +#include "src/coding.h" +#include "storage/storage_define.h" + +namespace storage { + +/* zset score to member data key format: +* | reserve1 | key | version | score | member | reserve2 | +* | 8B | | 8B | 8B | | 16B | + */ +class ZSetsScoreKey { + public: + ZSetsScoreKey(const Slice& key, uint64_t version, + double score, const Slice& member) + : key_(key), version_(version), + score_(score), member_(member) {} + + ~ZSetsScoreKey() { + if (start_ != space_) { + delete[] start_; + } + } + + Slice Encode() { + size_t meta_size = sizeof(reserve1_) + sizeof(version_) + sizeof(score_) + sizeof(reserve2_); + size_t usize = key_.size() + member_.size() + kEncodedKeyDelimSize; + size_t nzero = std::count(key_.data(), key_.data() + key_.size(), kNeedTransformCharacter); + usize += nzero; + size_t needed = meta_size + usize; + char* dst = nullptr; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + + // Need to allocate space, delete previous space + if (start_ != space_) { + delete[] start_; + } + } + + start_ = dst; + // reserve1: 8 byte + memcpy(dst, reserve1_, sizeof(reserve1_)); + dst += sizeof(reserve1_); + // key + dst = EncodeUserKey(key_, dst, nzero); + // version 8 byte + EncodeFixed64(dst, version_); + dst += sizeof(version_); + // score + const void* addr_score = reinterpret_cast(&score_); + EncodeFixed64(dst, *reinterpret_cast(addr_score)); + dst += sizeof(score_); + // member + memcpy(dst, member_.data(), member_.size()); + dst += member_.size(); + // reserve2 16 byte + memcpy(dst, reserve2_, sizeof(reserve2_)); + return Slice(start_, needed); + } + + private: + char* start_ = nullptr; + char space_[200]; + char reserve1_[8] = {0}; + Slice key_; + uint64_t version_ = uint64_t(-1); + double score_ = 0.0; + Slice member_; + char reserve2_[16] = {0}; +}; + +class ParsedZSetsScoreKey { + public: + explicit ParsedZSetsScoreKey(const std::string* key) { + const char* ptr = key->data(); + const char* end_ptr = key->data() + key->size(); + decode(ptr, end_ptr); + } + + explicit ParsedZSetsScoreKey(const Slice& key) { + const char* ptr = key.data(); + const char* end_ptr = key.data() + key.size(); + decode(ptr, end_ptr); + } + + void decode(const char* ptr, const char* end_ptr) { + const char* start = ptr; + // skip head reserve1_ + ptr += sizeof(reserve1_); + // skip tail reserve2_ + end_ptr -= sizeof(reserve2_); + // user key + ptr = DecodeUserKey(ptr, std::distance(ptr, end_ptr), &key_str_); + version_ = DecodeFixed64(ptr); + ptr += sizeof(version_); + uint64_t tmp = DecodeFixed64(ptr); + const void* ptr_tmp = reinterpret_cast(&tmp); + score_ = *reinterpret_cast(ptr_tmp); + ptr += sizeof(uint64_t); + member_ = Slice(ptr, std::distance(ptr, end_ptr)); + } + + Slice key() { return Slice(key_str_); } + uint64_t Version() const { return version_; } + double score() const { return score_; } + Slice member() { return member_; } + + private: + std::string key_str_; + char reserve1_[8] = {0}; + uint64_t version_ = uint64_t(-1); + char reserve2_[16] = {0}; + double score_ = 0.0; + Slice member_; +}; + +} // namespace storage +#endif // SRC_ZSETS_DATA_KEY_FORMAT_H_ diff --git a/tools/pika_migrate/src/storage/src/zsets_filter.h b/tools/pika_migrate/src/storage/src/zsets_filter.h new file mode 100644 index 0000000000..629f12e669 --- /dev/null +++ b/tools/pika_migrate/src/storage/src/zsets_filter.h @@ -0,0 +1,146 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef SRC_ZSETS_FILTER_H_ +#define SRC_ZSETS_FILTER_H_ + +#include +#include +#include + +#include "rocksdb/compaction_filter.h" + +#include "base_filter.h" +#include "base_meta_value_format.h" +#include "zsets_data_key_format.h" + +namespace storage { + +class ZSetsScoreFilter : public rocksdb::CompactionFilter { + public: + ZSetsScoreFilter(rocksdb::DB* db, std::vector* handles_ptr, enum DataType type) + : db_(db), cf_handles_ptr_(handles_ptr), type_(type) {} + + bool Filter(int level, const rocksdb::Slice& key, const rocksdb::Slice& value, std::string* new_value, + bool* value_changed) const override { + UNUSED(level); + UNUSED(value); + UNUSED(new_value); + UNUSED(value_changed); + ParsedZSetsScoreKey parsed_zsets_score_key(key); + TRACE("==========================START=========================="); + TRACE("[ScoreFilter], key: %s, score = %lf, member = %s, version = %llu", + parsed_zsets_score_key.key().ToString().c_str(), parsed_zsets_score_key.score(), + parsed_zsets_score_key.member().ToString().c_str(), parsed_zsets_score_key.Version()); + + const char* ptr = key.data(); + int key_size = key.size(); + ptr = SeekUserkeyDelim(ptr + kPrefixReserveLength, key_size - kPrefixReserveLength); + std::string meta_key_enc(key.data(), std::distance(key.data(), ptr)); + meta_key_enc.append(kSuffixReserveLength, kNeedTransformCharacter); + + if (meta_key_enc != cur_key_) { + cur_key_ = meta_key_enc; + cur_meta_etime_ = 0; + cur_meta_version_ = 0; + meta_not_found_ = true; + std::string meta_value; + // destroyed when close the database, Reserve Current key value + if (cf_handles_ptr_->empty()) { + return false; + } + Status s = db_->Get(default_read_options_, (*cf_handles_ptr_)[0], cur_key_, &meta_value); + if (s.ok()) { + /* + * The elimination policy for keys of the Data type is that if the key + * type obtained from MetaCF is inconsistent with the key type in Data, + * it needs to be eliminated + */ + auto type = static_cast(static_cast(meta_value[0])); + if (type != type_) { + return true; + } + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + meta_not_found_ = false; + cur_meta_version_ = parsed_zsets_meta_value.Version(); + cur_meta_etime_ = parsed_zsets_meta_value.Etime(); + } else if (s.IsNotFound()) { + meta_not_found_ = true; + } else { + cur_key_ = ""; + TRACE("Reserve[Get meta_key faild]"); + return false; + } + } + + if (meta_not_found_) { + TRACE("Drop[Meta key not exist]"); + return true; + } + + pstd::TimeType unix_time = pstd::NowMillis(); + if (cur_meta_etime_ != 0 && cur_meta_etime_ < static_cast(unix_time)) { + TRACE("Drop[Timeout]"); + return true; + } + if (cur_meta_version_ > parsed_zsets_score_key.Version()) { + TRACE("Drop[score_key_version < cur_meta_version]"); + return true; + } else { + TRACE("Reserve[score_key_version == cur_meta_version]"); + return false; + } + } + + /* + // Only judge by meta value ttl + virtual rocksdb::CompactionFilter::Decision FilterBlobByKey(int level, const Slice& key, + std::string* new_value, std::string* skip_until) const { + UNUSED(level); + UNUSED(new_value); + UNUSED(skip_until); + bool unused_value_changed; + bool should_remove = Filter(level, key, Slice{}, new_value, &unused_value_changed); + if (should_remove) { + return CompactionFilter::Decision::kRemove; + } + return CompactionFilter::Decision::kKeep; + } + */ + + + const char* Name() const override { return "ZSetsScoreFilter"; } + + private: + rocksdb::DB* db_ = nullptr; + std::vector* cf_handles_ptr_ = nullptr; + rocksdb::ReadOptions default_read_options_; + mutable std::string cur_key_; + mutable bool meta_not_found_ = false; + mutable uint64_t cur_meta_version_ = 0; + mutable uint64_t cur_meta_etime_ = 0; + enum DataType type_ = DataType::kNones; +}; + +class ZSetsScoreFilterFactory : public rocksdb::CompactionFilterFactory { + public: + ZSetsScoreFilterFactory(rocksdb::DB** db_ptr, std::vector* handles_ptr, enum DataType type) + : db_ptr_(db_ptr), cf_handles_ptr_(handles_ptr), type_(type) {} + + std::unique_ptr CreateCompactionFilter( + const rocksdb::CompactionFilter::Context& context) override { + return std::make_unique(*db_ptr_, cf_handles_ptr_, type_); + } + + const char* Name() const override { return "ZSetsScoreFilterFactory"; } + + private: + rocksdb::DB** db_ptr_ = nullptr; + std::vector* cf_handles_ptr_ = nullptr; + enum DataType type_ = DataType::kNones; +}; + +} // namespace storage +#endif // SRC_ZSETS_FILTER_H_ diff --git a/tools/pika_migrate/src/storage/tests/CMakeLists.txt b/tools/pika_migrate/src/storage/tests/CMakeLists.txt new file mode 100644 index 0000000000..09dc7f32cc --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 3.18) + +include(GoogleTest) + +file(GLOB_RECURSE BLACKWINDOW_TEST_SOURCE "${PROJECT_SOURCE_DIR}/tests/*.cc") + +aux_source_directory(../src TEST_SRCS) + +add_compile_definitions(PIKA_ROOT_DIR="${CMAKE_SOURCE_DIR}") + +# set(EXECUTABLE_OUTPUT_PATH ${CMAKE_SOURCE_DIR}/build) +foreach(blackwindow_test_source ${BLACKWINDOW_TEST_SOURCE}) + get_filename_component(storage_test_filename ${blackwindow_test_source} NAME) + string(REPLACE ".cc" "" blackwindow_test_name ${storage_test_filename}) + + # Add the test target + add_executable(${blackwindow_test_name} ${blackwindow_test_source}) + target_include_directories(${blackwindow_test_name} + PUBLIC ${CMAKE_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/include + PUBLIC ${PROJECT_SOURCE_DIR}/.. + ${ROCKSDB_INCLUDE_DIR} + ${ROCKSDB_SOURCE_DIR} + ) + add_dependencies(${blackwindow_test_name} gtest glog gflags ${LIBUNWIND_NAME}) + target_link_libraries(${blackwindow_test_name} + PUBLIC ${GTEST_LIBRARY} + PUBLIC ${ROCKSDB_LIBRARY} + PUBLIC pstd + PUBLIC net + PUBLIC storage + PUBLIC ${GLOG_LIBRARY} + PUBLIC ${GFLAGS_LIBRARY} + PUBLIC ${LIBUNWIND_LIBRARY} + ) + add_test(NAME ${blackwindow_test_name} + COMMAND ${blackwindow_test_name} + WORKING_DIRECTORY .) +endforeach(blackwindow_test_source ${BLACKWINDOW_TEST_SOURCE}) diff --git a/tools/pika_migrate/src/storage/tests/custom_comparator_test.cc b/tools/pika_migrate/src/storage/tests/custom_comparator_test.cc new file mode 100644 index 0000000000..05b472e73e --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/custom_comparator_test.cc @@ -0,0 +1,158 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "src/custom_comparator.h" +#include "src/redis.h" +#include "src/zsets_data_key_format.h" +#include "storage/storage.h" + +using namespace storage; + +// FindShortestSeparator +TEST(ZSetScoreKeyComparator, FindShortestSeparatorTest) { + ZSetsScoreKeyComparatorImpl impl; + + // ***************** Group 1 Test ***************** + ZSetsScoreKey zsets_score_key_start_1("Axlgrep", 1557212501, 3.1415, "abc"); + ZSetsScoreKey zsets_score_key_limit_1("Axlgreq", 1557212501, 3.1415, "abc"); + std::string start_1 = zsets_score_key_start_1.Encode().ToString(); + std::string limit_1 = zsets_score_key_limit_1.Encode().ToString(); + std::string change_start_1 = start_1; + impl.FindShortestSeparator(&change_start_1, Slice(limit_1)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_1); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_1); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_1); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_1, start_1) >= 0); + ASSERT_TRUE(impl.Compare(change_start_1, limit_1) < 0); + + // ***************** Group 2 Test ***************** + ZSetsScoreKey zsets_score_key_start_2("Axlgrep", 1557212501, 3.1314, "abc"); + ZSetsScoreKey zsets_score_key_limit_2("Axlgrep", 1557212502, 3.1314, "abc"); + std::string start_2 = zsets_score_key_start_2.Encode().ToString(); + std::string limit_2 = zsets_score_key_limit_2.Encode().ToString(); + std::string change_start_2 = start_2; + impl.FindShortestSeparator(&change_start_2, Slice(limit_2)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_2); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_2); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_2); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_2, start_2) >= 0); + ASSERT_TRUE(impl.Compare(change_start_2, limit_2) < 0); + + // ***************** Group 3 Test ***************** + ZSetsScoreKey zsets_score_key_start_3("Axlgrep", 1557212501, 3.1415, "abc"); + ZSetsScoreKey zsets_score_key_limit_3("Axlgrep", 1557212501, 4.1415, "abc"); + std::string start_3 = zsets_score_key_start_3.Encode().ToString(); + std::string limit_3 = zsets_score_key_limit_3.Encode().ToString(); + std::string change_start_3 = start_3; + impl.FindShortestSeparator(&change_start_3, Slice(limit_3)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_3); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_3); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_3); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_3, start_3) >= 0); + ASSERT_TRUE(impl.Compare(change_start_3, limit_3) < 0); + + // ***************** Group 4 Test ***************** + ZSetsScoreKey zsets_score_key_start_4("Axlgrep", 1557212501, 3.1415, "abc"); + ZSetsScoreKey zsets_score_key_limit_4("Axlgrep", 1557212501, 5.1415, "abc"); + std::string start_4 = zsets_score_key_start_4.Encode().ToString(); + std::string limit_4 = zsets_score_key_limit_4.Encode().ToString(); + std::string change_start_4 = start_4; + impl.FindShortestSeparator(&change_start_4, Slice(limit_4)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_4); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_4); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_4); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_4, start_4) >= 0); + ASSERT_TRUE(impl.Compare(change_start_4, limit_4) < 0); + + // ***************** Group 5 Test ***************** + ZSetsScoreKey zsets_score_key_start_5("Axlgrep", 1557212501, 3.1415, "abc"); + ZSetsScoreKey zsets_score_key_limit_5("Axlgrep", 1557212501, 3.1415, "abd"); + std::string start_5 = zsets_score_key_start_5.Encode().ToString(); + std::string limit_5 = zsets_score_key_limit_5.Encode().ToString(); + std::string change_start_5 = start_5; + impl.FindShortestSeparator(&change_start_5, Slice(limit_5)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_5); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_5); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_5); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_5, start_5) >= 0); + ASSERT_TRUE(impl.Compare(change_start_5, limit_5) < 0); + + // ***************** Group 6 Test ***************** + ZSetsScoreKey zsets_score_key_start_6("Axlgrep", 1557212501, 3.1415, "abccccccc"); + ZSetsScoreKey zsets_score_key_limit_6("Axlgrep", 1557212501, 3.1415, "abd"); + std::string start_6 = zsets_score_key_start_6.Encode().ToString(); + std::string limit_6 = zsets_score_key_limit_6.Encode().ToString(); + std::string change_start_6 = start_6; + impl.FindShortestSeparator(&change_start_6, Slice(limit_6)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_6); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_6); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_6); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_6, start_6) >= 0); + ASSERT_TRUE(impl.Compare(change_start_6, limit_6) < 0); + + // ***************** Group 7 Test ***************** + ZSetsScoreKey zsets_score_key_start_7("Axlgrep", 1557212501, 3.1415, "abcccaccc"); + ZSetsScoreKey zsets_score_key_limit_7("Axlgrep", 1557212501, 3.1415, "abccccccc"); + std::string start_7 = zsets_score_key_start_7.Encode().ToString(); + std::string limit_7 = zsets_score_key_limit_7.Encode().ToString(); + std::string change_start_7 = start_7; + impl.FindShortestSeparator(&change_start_7, Slice(limit_7)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_7); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_7); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_7); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_7, start_7) >= 0); + ASSERT_TRUE(impl.Compare(change_start_7, limit_7) < 0); + + // ***************** Group 8 Test ***************** + ZSetsScoreKey zsets_score_key_start_8("Axlgrep", 1557212501, 3.1415, ""); + ZSetsScoreKey zsets_score_key_limit_8("Axlgrep", 1557212501, 3.1415, "abccccccc"); + std::string start_8 = zsets_score_key_start_8.Encode().ToString(); + std::string limit_8 = zsets_score_key_limit_8.Encode().ToString(); + std::string change_start_8 = start_8; + impl.FindShortestSeparator(&change_start_8, Slice(limit_8)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_8); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_8); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_8); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_8, start_8) >= 0); + ASSERT_TRUE(impl.Compare(change_start_8, limit_8) < 0); + + // ***************** Group 9 Test ***************** + ZSetsScoreKey zsets_score_key_start_9("Axlgrep", 1557212501, 3.1415, "aaaa"); + ZSetsScoreKey zsets_score_key_limit_9("Axlgrep", 1557212501, 4.1415, ""); + std::string start_9 = zsets_score_key_start_9.Encode().ToString(); + std::string limit_9 = zsets_score_key_limit_9.Encode().ToString(); + std::string change_start_9 = start_9; + impl.FindShortestSeparator(&change_start_9, Slice(limit_9)); + // impl.ParseAndPrintZSetsScoreKey("origin start : ", start_9); + // impl.ParseAndPrintZSetsScoreKey("changed start : ", change_start_9); + // impl.ParseAndPrintZSetsScoreKey("limit : ", limit_9); + // printf("**********************************************************************\n"); + ASSERT_TRUE(impl.Compare(change_start_9, start_9) >= 0); + ASSERT_TRUE(impl.Compare(change_start_9, limit_9) < 0); + + // ***************** Group 10 Test ***************** + ZSetsScoreKey zsets_score_key_start_10("Axlgrep", 1557212502, 3.1415, "abc"); + ZSetsScoreKey zsets_score_key_limit_10("Axlgrep", 1557212752, 3.1415, "abc"); + std::string start_10 = zsets_score_key_start_10.Encode().ToString(); + std::string limit_10 = zsets_score_key_limit_10.Encode().ToString(); + ASSERT_TRUE(impl.Compare(start_10, limit_10) < 0); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/hashes_filter_test.cc b/tools/pika_migrate/src/storage/tests/hashes_filter_test.cc new file mode 100644 index 0000000000..b3fe587504 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/hashes_filter_test.cc @@ -0,0 +1,211 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +// #include +// #include + +// #include "src/redis.h" +// #include "src/base_filter.h" +// #include "storage/storage.h" + +// using namespace storage; + +// // Filter +// TEST(HashesFilterTest, FilterTest) { +// rocksdb::DB* meta_db; +// std::string db_path = "./db/hash_meta"; +// std::vector handles; + +// storage::Options options; +// options.create_if_missing = true; +// rocksdb::Status s = rocksdb::DB::Open(options, db_path, &meta_db); + +// if (s.ok()) { +// // create column family +// rocksdb::ColumnFamilyHandle* cf; +// s = meta_db->CreateColumnFamily(rocksdb::ColumnFamilyOptions(), +// "data_cf", &cf); +// ASSERT_TRUE(s.ok()); +// // close DB +// delete cf; +// delete meta_db; +// } + +// // Open +// rocksdb::ColumnFamilyOptions meta_cf_ops(options); +// rocksdb::ColumnFamilyOptions data_cf_ops(options); +// std::vector column_families; +// // Meta CF +// column_families.push_back(rocksdb::ColumnFamilyDescriptor( +// rocksdb::kDefaultColumnFamilyName, meta_cf_ops)); +// // Data CF +// column_families.push_back(rocksdb::ColumnFamilyDescriptor( +// "data_cf", data_cf_ops)); + +// s = rocksdb::DB::Open(options, db_path, column_families, &handles, &meta_db); +// ASSERT_TRUE(s.ok()); + +// char str[4]; +// bool filter_result; +// bool value_changed; +// int32_t version = 0; +// std::string new_value; + +// /*************** TEST META FILTER ***************/ +// HashesMetaFilter* hashes_meta_filter = new HashesMetaFilter(); +// ASSERT_TRUE(hashes_meta_filter != nullptr); + +// // Timeout timestamp is not set, but it's an empty hash table. +// EncodeFixed32(str, 0); +// HashesMetaValue tmf_meta_value1(std::string(str, sizeof(int32_t))); +// tmf_meta_value1.UpdateVersion(); +// std::this_thread::sleep_for(std::chrono::milliseconds(1000)); +// filter_result = hashes_meta_filter->Filter(0, "FILTER_TEST_KEY", +// tmf_meta_value1.Encode(), &new_value, &value_changed); +// ASSERT_EQ(filter_result, true); + +// // Timeout timestamp is not set, it's not an empty hash table. +// EncodeFixed32(str, 1); +// HashesMetaValue tmf_meta_value2(std::string(str, sizeof(int32_t))); +// tmf_meta_value2.UpdateVersion(); +// std::this_thread::sleep_for(std::chrono::milliseconds(1000)); +// filter_result = hashes_meta_filter->Filter(0, "FILTER_TEST_KEY", +// tmf_meta_value2.Encode(), &new_value, &value_changed); +// ASSERT_EQ(filter_result, false); + +// // Timeout timestamp is set, but not expired. +// EncodeFixed32(str, 1); +// HashesMetaValue tmf_meta_value3(std::string(str, sizeof(int32_t))); +// tmf_meta_value3.UpdateVersion(); +// tmf_meta_value3.SetRelativeTimestamp(3); +// std::this_thread::sleep_for(std::chrono::milliseconds(1000)); +// filter_result = hashes_meta_filter->Filter(0, "FILTER_TEST_KEY", +// tmf_meta_value3.Encode(), &new_value, &value_changed); +// ASSERT_EQ(filter_result, false); + +// // Timeout timestamp is set, already expired. +// EncodeFixed32(str, 1); +// HashesMetaValue tmf_meta_value4(std::string(str, sizeof(int32_t))); +// tmf_meta_value4.UpdateVersion(); +// tmf_meta_value4.SetRelativeTimestamp(1); +// std::this_thread::sleep_for(std::chrono::milliseconds(2000)); +// filter_result = hashes_meta_filter->Filter(0, "FILTER_TEST_KEY", +// tmf_meta_value4.Encode(), &new_value, &value_changed); +// ASSERT_EQ(filter_result, true); +// delete hashes_meta_filter; + +// /*************** TEST DATA FILTER ***************/ + +// // No timeout is set, version not outmoded. +// HashesDataFilter* hashes_data_filter1 +// = new HashesDataFilter(meta_db, &handles); +// ASSERT_TRUE(hashes_data_filter1 != nullptr); +// EncodeFixed32(str, 1); +// HashesMetaValue tdf_meta_value1(std::string(str, sizeof(int32_t))); +// version = tdf_meta_value1.UpdateVersion(); +// s = meta_db->Put(rocksdb::WriteOptions(), handles[0], +// "FILTER_TEST_KEY", tdf_meta_value1.Encode()); +// ASSERT_TRUE(s.ok()); +// HashesDataKey tdf_data_key1("FILTER_TEST_KEY", version, "FILTER_TEST_FIELD"); +// filter_result = hashes_data_filter1->Filter(0, tdf_data_key1.Encode(), +// "FILTER_TEST_VALUE", &new_value, &value_changed); +// ASSERT_EQ(filter_result, false); +// s = meta_db->Delete(rocksdb::WriteOptions(), +// handles[0], "FILTER_TEST_KEY"); +// ASSERT_TRUE(s.ok()); +// delete hashes_data_filter1; + +// // timeout timestamp is set, but not timeout. +// HashesDataFilter* hashes_data_filter2 +// = new HashesDataFilter(meta_db, &handles); +// ASSERT_TRUE(hashes_data_filter2 != nullptr); +// EncodeFixed32(str, 1); +// HashesMetaValue tdf_meta_value2(std::string(str, sizeof(int32_t))); +// version = tdf_meta_value2.UpdateVersion(); +// tdf_meta_value2.SetRelativeTimestamp(1); +// s = meta_db->Put(rocksdb::WriteOptions(), handles[0], +// "FILTER_TEST_KEY", tdf_meta_value2.Encode()); +// ASSERT_TRUE(s.ok()); +// HashesDataKey tdf_data_key2("FILTER_TEST_KEY", version, "FILTER_TEST_FIELD"); +// filter_result = hashes_data_filter2->Filter(0, tdf_data_key2.Encode(), +// "FILTER_TEST_VALUE", &new_value, &value_changed); +// ASSERT_EQ(filter_result, false); +// s = meta_db->Delete(rocksdb::WriteOptions(), +// handles[0], "FILTER_TEST_KEY"); +// ASSERT_TRUE(s.ok()); +// delete hashes_data_filter2; + +// // timeout timestamp is set, already timeout. +// HashesDataFilter* hashes_data_filter3 +// = new HashesDataFilter(meta_db, &handles); +// ASSERT_TRUE(hashes_data_filter3 != nullptr); +// EncodeFixed32(str, 1); +// HashesMetaValue tdf_meta_value3(std::string(str, sizeof(int32_t))); +// version = tdf_meta_value3.UpdateVersion(); +// tdf_meta_value3.SetRelativeTimestamp(1); +// s = meta_db->Put(rocksdb::WriteOptions(), handles[0], +// "FILTER_TEST_KEY", tdf_meta_value3.Encode()); +// ASSERT_TRUE(s.ok()); +// std::this_thread::sleep_for(std::chrono::milliseconds(2000)); +// HashesDataKey tdf_data_key3("FILTER_TEST_KEY", version, "FILTER_TEST_FIELD"); +// filter_result = hashes_data_filter3->Filter(0, tdf_data_key3.Encode(), +// "FILTER_TEST_VALUE", &new_value, &value_changed); +// ASSERT_EQ(filter_result, true); +// s = meta_db->Delete(rocksdb::WriteOptions(), +// handles[0], "FILTER_TEST_KEY"); +// ASSERT_TRUE(s.ok()); +// delete hashes_data_filter3; + +// // No timeout is set, version outmoded. +// HashesDataFilter* hashes_data_filter4 +// = new HashesDataFilter(meta_db, &handles); +// ASSERT_TRUE(hashes_data_filter4 != nullptr); +// EncodeFixed32(str, 1); +// HashesMetaValue tdf_meta_value4(std::string(str, sizeof(int32_t))); +// version = tdf_meta_value4.UpdateVersion(); +// s = meta_db->Put(rocksdb::WriteOptions(), handles[0], +// "FILTER_TEST_KEY", tdf_meta_value4.Encode()); +// ASSERT_TRUE(s.ok()); +// HashesDataKey tdf_data_key4("FILTER_TEST_KEY", version, "FILTER_TEST_FIELD"); +// version = tdf_meta_value4.UpdateVersion(); +// s = meta_db->Put(rocksdb::WriteOptions(), handles[0], +// "FILTER_TEST_KEY", tdf_meta_value4.Encode()); +// ASSERT_TRUE(s.ok()); +// filter_result = hashes_data_filter4->Filter(0, tdf_data_key4.Encode(), +// "FILTER_TEST_VALUE", &new_value, &value_changed); +// ASSERT_EQ(filter_result, true); +// s = meta_db->Delete(rocksdb::WriteOptions(), +// handles[0], "FILTER_TEST_KEY"); +// ASSERT_TRUE(s.ok()); +// delete hashes_data_filter4; + +// // Hash table meta data has been clear. +// HashesDataFilter* hashes_data_filter5 +// = new HashesDataFilter(meta_db, &handles); +// ASSERT_TRUE(hashes_data_filter5 != nullptr); +// EncodeFixed32(str, 1); +// HashesMetaValue tdf_meta_value5(std::string(str, sizeof(int32_t))); +// version = tdf_meta_value5.UpdateVersion(); +// s = meta_db->Put(rocksdb::WriteOptions(), handles[0], +// "FILTER_TEST_KEY", tdf_meta_value5.Encode()); +// ASSERT_TRUE(s.ok()); +// HashesDataKey tdf_data_key5("FILTER_TEST_KEY", version, "FILTER_TEST_FIELD"); +// s = meta_db->Delete(rocksdb::WriteOptions(), +// handles[0], "FILTER_TEST_KEY"); +// ASSERT_TRUE(s.ok()); +// filter_result = hashes_data_filter5->Filter(0, tdf_data_key5.Encode(), +// "FILTER_TEST_VALUE", &new_value, &value_changed); +// ASSERT_EQ(filter_result, true); +// delete hashes_data_filter5; + +// // Delete Meta db +// delete meta_db; +// } + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/hashes_test.cc b/tools/pika_migrate/src/storage/tests/hashes_test.cc new file mode 100644 index 0000000000..8ee0f0490a --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/hashes_test.cc @@ -0,0 +1,2445 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include +#include + +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" +#include "storage/storage.h" +#include "storage/util.h" + +using namespace storage; + +class HashesTest : public ::testing::Test { + public: + HashesTest() = default; + ~HashesTest() override = default; + + void SetUp() override { + std::string path = "./db/hashes"; + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + } + + void TearDown() override { + std::string path = "./db/hashes"; + DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +static bool field_value_match(storage::Storage* const db, const Slice& key, + const std::vector& expect_field_value) { + std::vector field_value_out; + Status s = db->HGetall(key, &field_value_out); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (field_value_out.size() != expect_field_value.size()) { + return false; + } + if (s.IsNotFound() && expect_field_value.empty()) { + return true; + } + for (const auto& field_value : expect_field_value) { + if (find(field_value_out.begin(), field_value_out.end(), field_value) == field_value_out.end()) { + return false; + } + } + return true; +} + +static bool field_value_match(const std::vector& field_value_out, + const std::vector& expect_field_value) { + if (field_value_out.size() != expect_field_value.size()) { + return false; + } + for (const auto& field_value : expect_field_value) { + if (find(field_value_out.begin(), field_value_out.end(), field_value) == field_value_out.end()) { + return false; + } + } + return true; +} + +static bool size_match(storage::Storage* const db, const Slice& key, int32_t expect_size) { + int32_t size = 0; + Status s = db->HLen(key, &size); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (s.IsNotFound() && (expect_size == 0)) { + return true; + } + return size == expect_size; +} + +static bool make_expired(storage::Storage* const db, const Slice& key) { + std::map type_status; + int ret = db->Expire(key, 1); + if ((ret == 0) || !type_status[storage::DataType::kHashes].ok()) { + return false; + } + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + return true; +} + +// HDel +TEST_F(HashesTest, HDel) { + int32_t ret = 0; + std::vector fvs; + fvs.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + fvs.push_back({"TEST_FIELD4", "TEST_VALUE4"}); + + s = db.HMSet("HDEL_KEY", fvs); + ASSERT_TRUE(s.ok()); + + std::vector fields{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3", "TEST_FIElD2", "TEST_NOT_EXIST_FIELD"}; + s = db.HDel("HDEL_KEY", fields, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.HLen("HDEL_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // Delete not exist hash table + s = db.HDel("HDEL_NOT_EXIST_KEY", fields, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + // Delete timeout hash table + s = db.HMSet("HDEL_TIMEOUT_KEY", fvs); + ASSERT_TRUE(s.ok()); + + std::map type_status; + db.Expire("HDEL_TIMEOUT_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.HDel("HDEL_TIMEOUT_KEY", fields, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); +} + +// HExists +TEST_F(HashesTest, HExistsTest) { + int32_t ret; + s = db.HSet("HEXIST_KEY", "HEXIST_FIELD", "HEXIST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + + s = db.HExists("HEXIST_KEY", "HEXIST_FIELD"); + ASSERT_TRUE(s.ok()); + + // If key does not exist. + s = db.HExists("HEXIST_NOT_EXIST_KEY", "HEXIST_FIELD"); + ASSERT_TRUE(s.IsNotFound()); + + // If field is not present in the hash + s = db.HExists("HEXIST_KEY", "HEXIST_NOT_EXIST_FIELD"); + ASSERT_TRUE(s.IsNotFound()); +} + +// HGet +TEST_F(HashesTest, HGetTest) { + int32_t ret = 0; + std::string value; + s = db.HSet("HGET_KEY", "HGET_TEST_FIELD", "HGET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("HGET_KEY", "HGET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HGET_TEST_VALUE"); + + // If key does not exist. + s = db.HGet("HGET_NOT_EXIST_KEY", "HGET_TEST_FIELD", &value); + ASSERT_TRUE(s.IsNotFound()); + + // If field is not present in the hash + s = db.HGet("HGET_KEY", "HGET_NOT_EXIST_FIELD", &value); + ASSERT_TRUE(s.IsNotFound()); +} + +// HGetall +TEST_F(HashesTest, HGetall) { + int32_t ret = 0; + std::vector mid_fvs_in; + mid_fvs_in.push_back({"MID_TEST_FIELD1", "MID_TEST_VALUE1"}); + mid_fvs_in.push_back({"MID_TEST_FIELD2", "MID_TEST_VALUE2"}); + mid_fvs_in.push_back({"MID_TEST_FIELD3", "MID_TEST_VALUE3"}); + s = db.HMSet("B_HGETALL_KEY", mid_fvs_in); + ASSERT_TRUE(s.ok()); + + std::vector fvs_out; + s = db.HGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fvs_out.size(), 3); + ASSERT_EQ(fvs_out[0].field, "MID_TEST_FIELD1"); + ASSERT_EQ(fvs_out[0].value, "MID_TEST_VALUE1"); + ASSERT_EQ(fvs_out[1].field, "MID_TEST_FIELD2"); + ASSERT_EQ(fvs_out[1].value, "MID_TEST_VALUE2"); + ASSERT_EQ(fvs_out[2].field, "MID_TEST_FIELD3"); + ASSERT_EQ(fvs_out[2].value, "MID_TEST_VALUE3"); + + // Insert some kv who's position above "mid kv" + std::vector pre_fvs_in; + pre_fvs_in.push_back({"PRE_TEST_FIELD1", "PRE_TEST_VALUE1"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD2", "PRE_TEST_VALUE2"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD3", "PRE_TEST_VALUE3"}); + s = db.HMSet("A_HGETALL_KEY", pre_fvs_in); + ASSERT_TRUE(s.ok()); + fvs_out.clear(); + s = db.HGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fvs_out.size(), 3); + ASSERT_EQ(fvs_out[0].field, "MID_TEST_FIELD1"); + ASSERT_EQ(fvs_out[0].value, "MID_TEST_VALUE1"); + ASSERT_EQ(fvs_out[1].field, "MID_TEST_FIELD2"); + ASSERT_EQ(fvs_out[1].value, "MID_TEST_VALUE2"); + ASSERT_EQ(fvs_out[2].field, "MID_TEST_FIELD3"); + ASSERT_EQ(fvs_out[2].value, "MID_TEST_VALUE3"); + + // Insert some kv who's position below "mid kv" + std::vector suf_fvs_in; + suf_fvs_in.push_back({"SUF_TEST_FIELD1", "SUF_TEST_VALUE1"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD2", "SUF_TEST_VALUE2"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD3", "SUF_TEST_VALUE3"}); + s = db.HMSet("C_HGETALL_KEY", suf_fvs_in); + ASSERT_TRUE(s.ok()); + fvs_out.clear(); + s = db.HGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fvs_out.size(), 3); + ASSERT_EQ(fvs_out[0].field, "MID_TEST_FIELD1"); + ASSERT_EQ(fvs_out[0].value, "MID_TEST_VALUE1"); + ASSERT_EQ(fvs_out[1].field, "MID_TEST_FIELD2"); + ASSERT_EQ(fvs_out[1].value, "MID_TEST_VALUE2"); + ASSERT_EQ(fvs_out[2].field, "MID_TEST_FIELD3"); + ASSERT_EQ(fvs_out[2].value, "MID_TEST_VALUE3"); + + // HGetall timeout hash table + fvs_out.clear(); + std::map type_status; + db.Expire("B_HGETALL_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.HGetall("B_HGETALL_KEY", &fvs_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fvs_out.size(), 0); + + // HGetall not exist hash table + fvs_out.clear(); + s = db.HGetall("HGETALL_NOT_EXIST_KEY", &fvs_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fvs_out.size(), 0); +} + +// HIncrby +TEST_F(HashesTest, HIncrby) { + int32_t ret; + int64_t value; + std::string str_value; + + // ***************** Group 1 Test ***************** + s = db.HSet("GP1_HINCRBY_KEY", "GP1_HINCRBY_FIELD", "1", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HIncrby("GP1_HINCRBY_KEY", "GP1_HINCRBY_FIELD", 1, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 2); + + // ***************** Group 2 Test ***************** + s = db.HSet("GP2_HINCRBY_KEY", "GP2_HINCRBY_FIELD", " 1", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HIncrby("GP2_HINCRBY_KEY", "GP2_HINCRBY_FIELD", 1, &value); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(value, 0); + + // ***************** Group 3 Test ***************** + s = db.HSet("GP3_HINCRBY_KEY", "GP3_HINCRBY_FIELD", "1 ", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HIncrby("GP3_HINCRBY_KEY", "GP3_HINCRBY_FIELD", 1, &value); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(value, 0); + + // If key does not exist the value is set to 0 before the + // operation is performed + s = db.HIncrby("HINCRBY_NEW_KEY", "HINCRBY_EXIST_FIELD", 1000, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 1000); + s = db.HGet("HINCRBY_NEW_KEY", "HINCRBY_EXIST_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 1000); + + // If the hash field contains a string that can not be + // represented as integer + s = db.HSet("HINCRBY_KEY", "HINCRBY_STR_FIELD", "HINCRBY_VALEU", &ret); + ASSERT_TRUE(s.ok()); + s = db.HIncrby("HINCRBY_KEY", "HINCRBY_STR_FIELD", 100, &value); + ASSERT_TRUE(s.IsCorruption()); + + // If field does not exist the value is set to 0 before the + // operation is performed + s = db.HIncrby("HINCRBY_KEY", "HINCRBY_NOT_EXIST_FIELD", 100, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 100); + s = db.HGet("HINCRBY_KEY", "HINCRBY_NOT_EXIST_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 100); + + s = db.HSet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", "100", &ret); + ASSERT_TRUE(s.ok()); + + // Positive test + s = db.HIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", 100, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 200); + s = db.HGet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 200); + + // Negative test + s = db.HIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", -100, &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, 100); + s = db.HGet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", &str_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(atoll(str_value.data()), 100); + + // Larger than the maximum number 9223372036854775807 + s = db.HSet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", "10", &ret); + ASSERT_TRUE(s.ok()); + s = db.HIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", 9223372036854775807, &value); + ASSERT_TRUE(s.IsInvalidArgument()); + + // Less than the minimum number -9223372036854775808 + s = db.HSet("HINCRBY_KEY", "HINCRBY_NUM_FIELD", "-10", &ret); + ASSERT_TRUE(s.ok()); + s = db.HIncrby("HINCRBY_KEY", "HINCRBY_NUM_FIELD", -9223372036854775807, &value); + ASSERT_TRUE(s.IsInvalidArgument()); +} + +// HIncrbyfloat +TEST_F(HashesTest, HIncrbyfloat) { + int32_t ret; + std::string new_value; + + // ***************** Group 1 Test ***************** + s = db.HSet("GP1_HINCRBYFLOAT_KEY", "GP1_HINCRBYFLOAT_FIELD", "1.234", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HIncrbyfloat("GP1_HINCRBYFLOAT_KEY", "GP1_HINCRBYFLOAT_FIELD", "1.234", &new_value); + ASSERT_TRUE(s.ok()); + //ASSERT_EQ(new_value, "2.468"); + + // ***************** Group 2 Test ***************** + s = db.HSet("GP2_HINCRBYFLOAT_KEY", "GP2_HINCRBYFLOAT_FIELD", " 1.234", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HIncrbyfloat("GP2_HINCRBYFLOAT_KEY", "GP2_HINCRBYFLOAT_FIELD", "1.234", &new_value); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(new_value, ""); + + // ***************** Group 3 Test ***************** + s = db.HSet("GP3_HINCRBYFLOAT_KEY", "GP3_HINCRBYFLOAT_FIELD", "1.234 ", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HIncrbyfloat("GP3_HINCRBYFLOAT_KEY", "GP3_HINCRBYFLOAT_FIELD", "1.234", &new_value); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(new_value, ""); + + // If the specified increment are not parsable as a double precision + // floating point number + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_FIELD", "HINCRBYFLOAT_BY", &new_value); + ASSERT_TRUE(s.IsCorruption()); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_FIELD", &new_value); + ASSERT_TRUE(s.IsNotFound()); + + // If key does not exist the value is set to 0 before the + // operation is performed + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_FIELD", "12.3456", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "12.3456"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_FIELD", &new_value); + ASSERT_TRUE(s.ok()); + //ASSERT_EQ(new_value, "12.3456"); + s = db.HLen("HINCRBYFLOAT_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // If the current field content are not parsable as a double precision + // floating point number + s = db.HSet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_STR_FIELD", "HINCRBYFLOAT_VALUE", &ret); + ASSERT_TRUE(s.ok()); + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_STR_FIELD", "123.456", &new_value); + ASSERT_TRUE(s.IsCorruption()); + s = db.HLen("HINCRBYFLOAT_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + // If field does not exist the value is set to 0 before the + // operation is performed + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_NOT_EXIST_FIELD", "65.4321000", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "65.4321"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_NOT_EXIST_FIELD", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "65.4321"); + s = db.HLen("HINCRBYFLOAT_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.HSet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_NUM_FIELD", "1000", &ret); + ASSERT_TRUE(s.ok()); + + // Positive test + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_NUM_FIELD", "+123.456789", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "1123.456789"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_NUM_FIELD", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "1123.456789"); + + // Negative test + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_NUM_FIELD", "-123.456789", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "1000"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_NUM_FIELD", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "1000"); + + s = db.HLen("HINCRBYFLOAT_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + // ***** Special test ***** + // case 1 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD1", "2.0e2", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "200"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD1", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "200"); + + // case2 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD2", "5.0e3", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "5000"); + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD2", "2.0e2", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "5200"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD2", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "5200"); + + // case 3 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD3", "5.0e3", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "5000"); + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD3", "-2.0e2", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "4800"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD3", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "4800"); + + // case 4 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD4", ".456789", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0.456789"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD4", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0.456789"); + + // case5 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD5", "-.456789", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "-0.456789"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD5", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "-0.456789"); + + // case6 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD6", "+.456789", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0.456789"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD6", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0.456789"); + + // case7 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD7", "+.456789", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0.456789"); + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD7", "-.456789", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD7", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0"); + + // case8 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD8", "-00000.456789000", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "-0.456789"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD8", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "-0.456789"); + + // case9 + s = db.HIncrbyfloat("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD9", "+00000.456789000", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0.456789"); + s = db.HGet("HINCRBYFLOAT_KEY", "HINCRBYFLOAT_SP_FIELD9", &new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(new_value, "0.456789"); + + s = db.HLen("HINCRBYFLOAT_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 13); +} + +// HKeys +TEST_F(HashesTest, HKeys) { + int32_t ret = 0; + std::vector mid_fvs_in; + mid_fvs_in.push_back({"MID_TEST_FIELD1", "MID_TEST_VALUE1"}); + mid_fvs_in.push_back({"MID_TEST_FIELD2", "MID_TEST_VALUE2"}); + mid_fvs_in.push_back({"MID_TEST_FIELD3", "MID_TEST_VALUE3"}); + s = db.HMSet("B_HKEYS_KEY", mid_fvs_in); + ASSERT_TRUE(s.ok()); + + std::vector fields; + s = db.HKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fields.size(), 3); + ASSERT_EQ(fields[0], "MID_TEST_FIELD1"); + ASSERT_EQ(fields[1], "MID_TEST_FIELD2"); + ASSERT_EQ(fields[2], "MID_TEST_FIELD3"); + + // Insert some kv who's position above "mid kv" + std::vector pre_fvs_in; + pre_fvs_in.push_back({"PRE_TEST_FIELD1", "PRE_TEST_VALUE1"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD2", "PRE_TEST_VALUE2"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD3", "PRE_TEST_VALUE3"}); + s = db.HMSet("A_HKEYS_KEY", pre_fvs_in); + ASSERT_TRUE(s.ok()); + fields.clear(); + s = db.HKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fields.size(), 3); + ASSERT_EQ(fields[0], "MID_TEST_FIELD1"); + ASSERT_EQ(fields[1], "MID_TEST_FIELD2"); + ASSERT_EQ(fields[2], "MID_TEST_FIELD3"); + + // Insert some kv who's position below "mid kv" + std::vector suf_fvs_in; + suf_fvs_in.push_back({"SUF_TEST_FIELD1", "SUF_TEST_VALUE1"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD2", "SUF_TEST_VALUE2"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD3", "SUF_TEST_VALUE3"}); + s = db.HMSet("A_HKEYS_KEY", suf_fvs_in); + ASSERT_TRUE(s.ok()); + fields.clear(); + s = db.HKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(fields.size(), 3); + ASSERT_EQ(fields[0], "MID_TEST_FIELD1"); + ASSERT_EQ(fields[1], "MID_TEST_FIELD2"); + ASSERT_EQ(fields[2], "MID_TEST_FIELD3"); + + // HKeys timeout hash table + fields.clear(); + std::map type_status; + db.Expire("B_HKEYS_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.HKeys("B_HKEYS_KEY", &fields); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fields.size(), 0); + + // HKeys not exist hash table + fields.clear(); + s = db.HKeys("HKEYS_NOT_EXIST_KEY", &fields); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(fields.size(), 0); +} + +// HLen +TEST_F(HashesTest, HLenTest) { + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + std::vector fvs1; + fvs1.push_back({"GP1_TEST_FIELD1", "GP1_TEST_VALUE1"}); + fvs1.push_back({"GP1_TEST_FIELD2", "GP1_TEST_VALUE2"}); + fvs1.push_back({"GP1_TEST_FIELD3", "GP1_TEST_VALUE3"}); + s = db.HMSet("GP1_HLEN_KEY", fvs1); + ASSERT_TRUE(s.ok()); + + s = db.HLen("GP1_HLEN_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + // ***************** Group 2 Test ***************** + std::vector fvs2; + fvs2.push_back({"GP2_TEST_FIELD1", "GP2_TEST_VALUE1"}); + fvs2.push_back({"GP2_TEST_FIELD2", "GP2_TEST_VALUE2"}); + fvs2.push_back({"GP2_TEST_FIELD3", "GP2_TEST_VALUE3"}); + s = db.HMSet("GP2_HLEN_KEY", fvs2); + ASSERT_TRUE(s.ok()); + + s = db.HDel("GP2_HLEN_KEY", {"GP2_TEST_FIELD1", "GP2_TEST_FIELD2"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.HLen("GP2_HLEN_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HDel("GP2_HLEN_KEY", {"GP2_TEST_FIELD3"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HLen("GP2_HLEN_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); +} + +// HMGet +TEST_F(HashesTest, HMGetTest) { + int32_t ret = 0; + std::vector vss; + + // ***************** Group 1 Test ***************** + std::vector fvs1; + fvs1.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs1.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs1.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + s = db.HMSet("GP1_HMGET_KEY", fvs1); + ASSERT_TRUE(s.ok()); + + s = db.HLen("GP1_HMGET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector fields1{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3", "TEST_NOT_EXIST_FIELD"}; + s = db.HMGet("GP1_HMGET_KEY", fields1, &vss); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss.size(), 4); + + ASSERT_TRUE(vss[0].status.ok()); + ASSERT_EQ(vss[0].value, "TEST_VALUE1"); + ASSERT_TRUE(vss[1].status.ok()); + ASSERT_EQ(vss[1].value, "TEST_VALUE2"); + ASSERT_TRUE(vss[2].status.ok()); + ASSERT_EQ(vss[2].value, "TEST_VALUE3"); + ASSERT_TRUE(vss[3].status.IsNotFound()); + ASSERT_EQ(vss[3].value, ""); + + // ***************** Group 2 Test ***************** + std::vector fvs2; + fvs2.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs2.push_back({"TEST_FIELD2", ""}); + s = db.HMSet("GP2_HMGET_KEY", fvs2); + ASSERT_TRUE(s.ok()); + + s = db.HLen("GP2_HMGET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + vss.clear(); + std::vector fields2{"TEST_FIELD1", "TEST_FIELD2", "TEST_NOT_EXIST_FIELD"}; + s = db.HMGet("GP2_HMGET_KEY", fields2, &vss); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss.size(), 3); + + ASSERT_TRUE(vss[0].status.ok()); + ASSERT_EQ(vss[0].value, "TEST_VALUE1"); + ASSERT_TRUE(vss[1].status.ok()); + ASSERT_EQ(vss[1].value, ""); + ASSERT_TRUE(vss[2].status.IsNotFound()); + ASSERT_EQ(vss[2].value, ""); + + // ***************** Group 3 Test ***************** + vss.clear(); + std::vector fields3{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3"}; + s = db.HMGet("GP3_HMGET_KEY", fields3, &vss); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(vss.size(), 3); + + ASSERT_TRUE(vss[0].status.IsNotFound()); + ASSERT_EQ(vss[0].value, ""); + ASSERT_TRUE(vss[1].status.IsNotFound()); + ASSERT_EQ(vss[1].value, ""); + ASSERT_TRUE(vss[2].status.IsNotFound()); + ASSERT_EQ(vss[2].value, ""); + + // ***************** Group 4 Test ***************** + std::vector fvs4; + fvs4.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs4.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs4.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + + s = db.HMSet("GP4_HMGET_KEY", fvs4); + ASSERT_TRUE(s.ok()); + + ASSERT_TRUE(make_expired(&db, "GP4_HMGET_KEY")); + + vss.clear(); + std::vector fields4{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3"}; + s = db.HMGet("GP4_HMGET_KEY", fields4, &vss); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(vss.size(), 3); + + ASSERT_TRUE(vss[0].status.IsNotFound()); + ASSERT_EQ(vss[0].value, ""); + ASSERT_TRUE(vss[1].status.IsNotFound()); + ASSERT_EQ(vss[1].value, ""); + ASSERT_TRUE(vss[2].status.IsNotFound()); + ASSERT_EQ(vss[2].value, ""); +} + +// HMSet +TEST_F(HashesTest, HMSetTest) { + int32_t ret = 0; + std::vector fvs1; + fvs1.push_back({"TEST_FIELD1", "TEST_VALUE1"}); + fvs1.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + + // If field already exists in the hash, it is overwritten + std::vector fvs2; + fvs2.push_back({"TEST_FIELD2", "TEST_VALUE2"}); + fvs2.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + fvs2.push_back({"TEST_FIELD4", "TEST_VALUE4"}); + fvs2.push_back({"TEST_FIELD3", "TEST_VALUE5"}); + + s = db.HMSet("HMSET_KEY", fvs1); + ASSERT_TRUE(s.ok()); + s = db.HLen("HMSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.HMSet("HMSET_KEY", fvs2); + ASSERT_TRUE(s.ok()); + + s = db.HLen("HMSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector vss1; + std::vector fields1{"TEST_FIELD1", "TEST_FIELD2", "TEST_FIELD3", "TEST_FIELD4"}; + s = db.HMGet("HMSET_KEY", fields1, &vss1); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss1.size(), 4); + + ASSERT_EQ(vss1[0].value, "TEST_VALUE1"); + ASSERT_EQ(vss1[1].value, "TEST_VALUE2"); + ASSERT_EQ(vss1[2].value, "TEST_VALUE5"); + ASSERT_EQ(vss1[3].value, "TEST_VALUE4"); + + std::map type_status; + db.Expire("HMSET_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + + // The key has timeout + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + std::vector fvs3; + fvs3.push_back({"TEST_FIELD3", "TEST_VALUE3"}); + fvs3.push_back({"TEST_FIELD4", "TEST_VALUE4"}); + fvs3.push_back({"TEST_FIELD5", "TEST_VALUE5"}); + s = db.HMSet("HMSET_KEY", fvs3); + ASSERT_TRUE(s.ok()); + + s = db.HLen("HMSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector vss2; + std::vector fields2{"TEST_FIELD3", "TEST_FIELD4", "TEST_FIELD5"}; + s = db.HMGet("HMSET_KEY", fields2, &vss2); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss2.size(), 3); + + ASSERT_EQ(vss2[0].value, "TEST_VALUE3"); + ASSERT_EQ(vss2[1].value, "TEST_VALUE4"); + ASSERT_EQ(vss2[2].value, "TEST_VALUE5"); +} + +// HSet +TEST_F(HashesTest, HSetTest) { + int32_t ret = 0; + std::string value; + + // ***************** Group 1 Test ***************** + // If field is a new field in the hash and value was set. + s = db.HSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + // If field already exists in the hash and the value was updated. + s = db.HSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.HLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + // If field already exists in the hash and the value was equal. + s = db.HSet("GP1_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.HLen("GP1_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("GP1_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + // ***************** Group 2 Test ***************** + s = db.HSet("GP2_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HLen("GP2_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("GP2_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + ASSERT_TRUE(make_expired(&db, "GP2_HSET_KEY")); + + s = db.HSet("GP2_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HLen("GP2_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("GP2_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_VALUE"); + + // ***************** Group 3 Test ***************** + s = db.HSet("GP3_HSET_KEY", "HSET_TEST_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HLen("GP3_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("GP3_HSET_KEY", "HSET_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + ASSERT_TRUE(make_expired(&db, "GP3_HSET_KEY")); + + s = db.HSet("GP3_HSET_KEY", "HSET_TEST_NEW_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HLen("GP3_HSET_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("GP3_HSET_KEY", "HSET_TEST_NEW_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSET_TEST_NEW_VALUE"); + + // ***************** Group 4 Test ***************** + // hset after string type key expires, should success + s = db.Setex("GP4_HSET_KEY", "STRING_VALUE_WITH_TTL", 1); + ASSERT_TRUE(s.ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2100)); + s = db.HSet("GP4_HSET_KEY", "HSET_TEST_NEW_FIELD", "HSET_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); +} + +// HSetnx +TEST_F(HashesTest, HSetnxTest) { + int32_t ret; + std::string value; + // If field is a new field in the hash and value was set. + s = db.HSetnx("HSETNX_KEY", "HSETNX_TEST_FIELD", "HSETNX_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.HGet("HSETNX_KEY", "HSETNX_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSETNX_TEST_VALUE"); + + // If field already exists, this operation has no effect. + s = db.HSetnx("HSETNX_KEY", "HSETNX_TEST_FIELD", "HSETNX_TEST_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.HGet("HSETNX_KEY", "HSETNX_TEST_FIELD", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "HSETNX_TEST_VALUE"); +} + +// HVals +TEST_F(HashesTest, HVals) { + int32_t ret = 0; + std::vector mid_fvs_in; + mid_fvs_in.push_back({"MID_TEST_FIELD1", "MID_TEST_VALUE1"}); + mid_fvs_in.push_back({"MID_TEST_FIELD2", "MID_TEST_VALUE2"}); + mid_fvs_in.push_back({"MID_TEST_FIELD3", "MID_TEST_VALUE3"}); + s = db.HMSet("B_HVALS_KEY", mid_fvs_in); + ASSERT_TRUE(s.ok()); + + std::vector values; + s = db.HVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(values.size(), 3); + ASSERT_EQ(values[0], "MID_TEST_VALUE1"); + ASSERT_EQ(values[1], "MID_TEST_VALUE2"); + ASSERT_EQ(values[2], "MID_TEST_VALUE3"); + + // Insert some kv who's position above "mid kv" + std::vector pre_fvs_in; + pre_fvs_in.push_back({"PRE_TEST_FIELD1", "PRE_TEST_VALUE1"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD2", "PRE_TEST_VALUE2"}); + pre_fvs_in.push_back({"PRE_TEST_FIELD3", "PRE_TEST_VALUE3"}); + s = db.HMSet("A_HVALS_KEY", pre_fvs_in); + ASSERT_TRUE(s.ok()); + values.clear(); + s = db.HVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(values.size(), 3); + ASSERT_EQ(values[0], "MID_TEST_VALUE1"); + ASSERT_EQ(values[1], "MID_TEST_VALUE2"); + ASSERT_EQ(values[2], "MID_TEST_VALUE3"); + + // Insert some kv who's position below "mid kv" + std::vector suf_fvs_in; + suf_fvs_in.push_back({"SUF_TEST_FIELD1", "SUF_TEST_VALUE1"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD2", "SUF_TEST_VALUE2"}); + suf_fvs_in.push_back({"SUF_TEST_FIELD3", "SUF_TEST_VALUE3"}); + s = db.HMSet("C_HVALS_KEY", suf_fvs_in); + ASSERT_TRUE(s.ok()); + values.clear(); + s = db.HVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(values.size(), 3); + ASSERT_EQ(values[0], "MID_TEST_VALUE1"); + ASSERT_EQ(values[1], "MID_TEST_VALUE2"); + ASSERT_EQ(values[2], "MID_TEST_VALUE3"); + + // HVals timeout hash table + values.clear(); + std::map type_status; + db.Expire("B_HVALS_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kHashes].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.HVals("B_HVALS_KEY", &values); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(values.size(), 0); + + // HVals not exist hash table + values.clear(); + s = db.HVals("HVALS_NOT_EXIST_KEY", &values); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(values.size(), 0); +} + +// HStrlen +TEST_F(HashesTest, HStrlenTest) { + int32_t ret = 0; + int32_t len = 0; + s = db.HSet("HSTRLEN_KEY", "HSTRLEN_TEST_FIELD", "HSTRLEN_TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.HStrlen("HSTRLEN_KEY", "HSTRLEN_TEST_FIELD", &len); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(len, 18); + + // If the key or the field do not exist, 0 is returned + s = db.HStrlen("HSTRLEN_KEY", "HSTRLEN_NOT_EXIST_FIELD", &len); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(len, 0); +} + +// HScan +TEST_F(HashesTest, HScanTest) { // NOLINT + int64_t cursor = 0; + int64_t next_cursor = 0; + std::vector field_value_out; + + // ***************** Group 1 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp1_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.HMSet("GP1_HSCAN_KEY", gp1_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_HSCAN_KEY", 8)); + + s = db.HScan("GP1_HSCAN_KEY", 0, "*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}, {"b", "v"}, {"c", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP1_HSCAN_KEY", cursor, "*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(field_value_match(field_value_out, {{"d", "v"}, {"e", "v"}, {"f", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP1_HSCAN_KEY", cursor, "*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"g", "v"}, {"h", "v"}})); + + // ***************** Group 2 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp2_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.HMSet("GP2_HSCAN_KEY", gp2_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP2_HSCAN_KEY", 8)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"b", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(field_value_match(field_value_out, {{"c", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 4); + ASSERT_TRUE(field_value_match(field_value_out, {{"d", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(field_value_match(field_value_out, {{"e", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(field_value_match(field_value_out, {{"f", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 7); + ASSERT_TRUE(field_value_match(field_value_out, {{"g", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP2_HSCAN_KEY", cursor, "*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"h", "v"}})); + + // ***************** Group 3 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp3_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.HMSet("GP3_HSCAN_KEY", gp3_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP3_HSCAN_KEY", 8)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP3_HSCAN_KEY", cursor, "*", 5, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 5); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, {"e", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP3_HSCAN_KEY", cursor, "*", 5, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"f", "v"}, {"g", "v"}, {"h", "v"}})); + + // ***************** Group 4 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp4_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.HMSet("GP4_HSCAN_KEY", gp4_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP4_HSCAN_KEY", 8)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP4_HSCAN_KEY", cursor, "*", 10, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 8); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match( + field_value_out, + {{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}})); + + // ***************** Group 5 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp5_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.HMSet("GP5_HSCAN_KEY", gp5_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP5_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP5_HSCAN_KEY", cursor, "*1*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP5_HSCAN_KEY", cursor, "*1*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP5_HSCAN_KEY", cursor, "*1*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}})); + + // ***************** Group 6 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp6_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.HMSet("GP6_HSCAN_KEY", gp6_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP6_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP6_HSCAN_KEY", cursor, "a*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP6_HSCAN_KEY", cursor, "a*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}, {"a_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP6_HSCAN_KEY", cursor, "a*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP6_HSCAN_KEY", cursor, "a*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP6_HSCAN_KEY", cursor, "a*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP6_HSCAN_KEY", cursor, "a*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_3_", "v"}})); + + // ***************** Group 7 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp7_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.HMSet("GP7_HSCAN_KEY", gp7_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP7_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP7_HSCAN_KEY", cursor, "b*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}, {"b_2_", "v"}, {"b_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP7_HSCAN_KEY", cursor, "b*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}, {"b_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP7_HSCAN_KEY", cursor, "b*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP7_HSCAN_KEY", cursor, "b*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP7_HSCAN_KEY", cursor, "b*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP7_HSCAN_KEY", cursor, "b*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_3_", "v"}})); + + // ***************** Group 8 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp8_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.HMSet("GP8_HSCAN_KEY", gp8_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP8_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP8_HSCAN_KEY", cursor, "c*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP8_HSCAN_KEY", cursor, "c*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}, {"c_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP8_HSCAN_KEY", cursor, "c*", 2, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_3_", "v"}})); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP8_HSCAN_KEY", cursor, "c*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_1_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP8_HSCAN_KEY", cursor, "c*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_2_", "v"}})); + + field_value_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.HScan("GP8_HSCAN_KEY", cursor, "c*", 1, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_3_", "v"}})); + + // ***************** Group 9 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp9_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.HMSet("GP9_HSCAN_KEY", gp9_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP9_HSCAN_KEY", 9)); + + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP9_HSCAN_KEY", cursor, "d*", 3, &field_value_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 10 Test ***************** + // {a_1_,v} {a_2_,v} {a_3_,v} {b_1_,v} {b_2_,v} {b_3_,v} {c_1_,v} {c_2_,v} {c_3_,v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp10_field_value{{"a_1_", "v"}, {"a_2_", "v"}, {"a_3_", "v"}, {"b_1_", "v"}, {"b_2_", "v"}, + {"b_3_", "v"}, {"c_1_", "v"}, {"c_2_", "v"}, {"c_3_", "v"}}; + s = db.HMSet("GP10_HSCAN_KEY", gp10_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP10_HSCAN_KEY", 9)); + + ASSERT_TRUE(make_expired(&db, "GP10_HSCAN_KEY")); + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP10_HSCAN_KEY", cursor, "*", 10, &field_value_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 11 Test ***************** + // HScan Not Exist Key + field_value_out.clear(); + cursor = 0, next_cursor = 0; + s = db.HScan("GP11_HSCAN_KEY", cursor, "*", 10, &field_value_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(field_value_match(field_value_out, {})); +} + +// HScanx +TEST_F(HashesTest, HScanxTest) { + std::string start_field; + std::string next_field; + std::vector field_value_out; + + // ***************** Group 1 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp1_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.HMSet("GP1_HSCANX_KEY", gp1_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_HSCANX_KEY", 8)); + + s = db.HScanx("GP1_HSCANX_KEY", "", "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "d"); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}, {"b", "v"}, {"c", "v"}})); + + field_value_out.clear(); + start_field = next_field; + s = db.HScanx("GP1_HSCANX_KEY", start_field, "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "g"); + ASSERT_TRUE(field_value_match(field_value_out, {{"d", "v"}, {"e", "v"}, {"f", "v"}})); + + field_value_out.clear(); + start_field = next_field; + s = db.HScanx("GP1_HSCANX_KEY", start_field, "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {{"g", "v"}, {"h", "v"}})); + + // ***************** Group 2 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp2_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.HMSet("GP2_HSCANX_KEY", gp2_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP2_HSCANX_KEY", 8)); + + s = db.HScanx("GP2_HSCANX_KEY", "a", "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "d"); + ASSERT_TRUE(field_value_match(field_value_out, {{"a", "v"}, {"b", "v"}, {"c", "v"}})); + + field_value_out.clear(); + start_field = next_field; + s = db.HScanx("GP2_HSCANX_KEY", start_field, "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "g"); + ASSERT_TRUE(field_value_match(field_value_out, {{"d", "v"}, {"e", "v"}, {"f", "v"}})); + + field_value_out.clear(); + start_field = next_field; + s = db.HScanx("GP2_HSCANX_KEY", start_field, "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 2); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {{"g", "v"}, {"h", "v"}})); + + // ***************** Group 3 Test ***************** + // {a,v} {b,v} {c,v} {d,v} {e,v} {f,v} {g,v} {h,v} + // 0 1 2 3 4 5 6 7 + std::vector gp3_field_value{{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, + {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}}; + s = db.HMSet("GP3_HSCANX_KEY", gp3_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP3_HSCANX_KEY", 8)); + + field_value_out.clear(); + s = db.HScanx("GP3_HSCANX_KEY", "a", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 8); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match( + field_value_out, + {{"a", "v"}, {"b", "v"}, {"c", "v"}, {"d", "v"}, {"e", "v"}, {"f", "v"}, {"g", "v"}, {"h", "v"}})); + + // ***************** Group 4 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp4_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP4_HSCANX_KEY", gp4_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP4_HSCANX_KEY", 9)); + + field_value_out.clear(); + s = db.HScanx("GP4_HSCANX_KEY", "a_", "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "b_f1"); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP4_HSCANX_KEY", "b_", "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "c_f1"); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_f1", "v"}, {"b_f2", "v"}, {"b_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP4_HSCANX_KEY", "c_", "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP4_HSCANX_KEY", "d_", "*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 5 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp5_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP5_HSCANX_KEY", gp5_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP5_HSCANX_KEY", 9)); + + field_value_out.clear(); + s = db.HScanx("GP5_HSCANX_KEY", "a_", "a_*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "b_f3"); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP5_HSCANX_KEY", "b_", "b_*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "c_f3"); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_f1", "v"}, {"b_f2", "v"}, {"b_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP5_HSCANX_KEY", "c_", "c_*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP5_HSCANX_KEY", "d_", "d_", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 6 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp6_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP6_HSCANX_KEY", gp6_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP6_HSCANX_KEY", 9)); + + field_value_out.clear(); + s = db.HScanx("GP6_HSCANX_KEY", "a_", "a_*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "b_f3"); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP6_HSCANX_KEY", "b_", "b_*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "c_f3"); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_f1", "v"}, {"b_f2", "v"}, {"b_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP6_HSCANX_KEY", "c_", "c_*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP6_HSCANX_KEY", "d_", "d_", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 7 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp7_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP7_HSCANX_KEY", gp7_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP7_HSCANX_KEY", 9)); + + field_value_out.clear(); + s = db.HScanx("GP7_HSCANX_KEY", "a_", "*f2", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_field, "b_f1"); + ASSERT_TRUE(field_value_match(field_value_out, {{"a_f2", "v"}})); + + start_field = next_field; + field_value_out.clear(); + s = db.HScanx("GP7_HSCANX_KEY", start_field, "*f2", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_field, "c_f1"); + ASSERT_TRUE(field_value_match(field_value_out, {{"b_f2", "v"}})); + + start_field = next_field; + field_value_out.clear(); + s = db.HScanx("GP7_HSCANX_KEY", start_field, "*f2", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {{"c_f2", "v"}})); + + // ***************** Group 8 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp8_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP8_HSCANX_KEY", gp8_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP8_HSCANX_KEY", 9)); + + field_value_out.clear(); + s = db.HScanx("GP8_HSCANX_KEY", "a_", "*n*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, "b_f1"); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + start_field = next_field; + field_value_out.clear(); + s = db.HScanx("GP8_HSCANX_KEY", start_field, "*n*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, "c_f1"); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + start_field = next_field; + field_value_out.clear(); + s = db.HScanx("GP8_HSCANX_KEY", start_field, "*n*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 9 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp9_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP9_HSCANX_KEY", gp9_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP9_HSCANX_KEY", 9)); + + ASSERT_TRUE(make_expired(&db, "GP9_HSCANX_KEY")); + + field_value_out.clear(); + s = db.HScanx("GP9_HSCANX_KEY", "", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 10 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp10_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + + field_value_out.clear(); + s = db.HScanx("GP10_HSCANX_KEY", "", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 11 Test ***************** + // {a_f1,v} {a_f2,v} {a_f3,v} {b_f1,v} {b_f2,v} {b_f3,v} {c_f1,v} {c_f2,v}, {c_f3, v} + // 0 1 2 3 4 5 6 7 8 + std::vector gp11_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + std::vector gp11_del_field{"a_f1", "a_f2", "a_f3", "b_f1", "b_f2", "b_f3", "c_f1", "c_f2", "c_f3"}; + + s = db.HMSet("GP11_HSCANX_KEY", gp9_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP11_HSCANX_KEY", 9)); + + int32_t ret = 0; + s = db.HDel("GP11_HSCANX_KEY", gp11_del_field, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + field_value_out.clear(); + s = db.HScanx("GP11_HSCANX_KEY", "", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + // ***************** Group 12 Test ***************** + // {aaa,v} {aab,v} {aac,v} {aad,v} {aaf,v} {aba,v} {abb,v} {abc,v}, {abd, v}, {abf, v} + // 0 1 2 3 4 5 6 7 8 9 + std::vector gp12_field_value{{"aaa", "v"}, {"aab", "v"}, {"aac", "v"}, {"aad", "v"}, {"aaf", "v"}, + {"aba", "v"}, {"abb", "v"}, {"abc", "v"}, {"abd", "v"}, {"abf", "v"}}; + + s = db.HMSet("GP12_HSCANX_KEY", gp12_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP12_HSCANX_KEY", 10)); + + field_value_out.clear(); + s = db.HScanx("GP12_HSCANX_KEY", "aa", "ab*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, "aad"); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + field_value_out.clear(); + s = db.HScanx("GP12_HSCANX_KEY", "aad", "ab*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_field, "abb"); + ASSERT_TRUE(field_value_match(field_value_out, {{"aba", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP12_HSCANX_KEY", "abb", "ab*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 3); + ASSERT_EQ(next_field, "abf"); + ASSERT_TRUE(field_value_match(field_value_out, {{"abb", "v"}, {"abc", "v"}, {"abd", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP12_HSCANX_KEY", "abf", "ab*", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 1); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE(field_value_match(field_value_out, {{"abf", "v"}})); + + field_value_out.clear(); + s = db.HScanx("GP12_HSCANX_KEY", "aa", "ab*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 0); + ASSERT_EQ(next_field, "aba"); + ASSERT_TRUE(field_value_match(field_value_out, {})); + + field_value_out.clear(); + s = db.HScanx("GP12_HSCANX_KEY", "aba", "ab*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(field_value_out.size(), 5); + ASSERT_EQ(next_field, ""); + ASSERT_TRUE( + field_value_match(field_value_out, {{"aba", "v"}, {"abb", "v"}, {"abc", "v"}, {"abd", "v"}, {"abf", "v"}})); +} + +// PKHScanRange +TEST_F(HashesTest, PKHScanRangeTest) { + int32_t ret; + std::string start_field; + std::string next_field; + std::vector field_value_out; + std::vector expect_field_value; + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_start field_end/next_field + std::vector gp1_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP1_PKHSCANRANGE_KEY", gp1_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP1_PKHSCANRANGE_KEY", "", "", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 9; ++idx) { + expect_field_value.push_back(gp1_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_start field_end/next_field + std::vector gp2_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP2_PKHSCANRANGE_KEY", gp2_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP2_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP2_PKHSCANRANGE_KEY", "b", "", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 9; ++idx) { + expect_field_value.push_back(gp2_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_start field_end + std::vector gp3_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP3_PKHSCANRANGE_KEY", gp3_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP3_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP3_PKHSCANRANGE_KEY", "", "r", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 8; ++idx) { + expect_field_value.push_back(gp3_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_start field_end + std::vector gp4_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP4_PKHSCANRANGE_KEY", gp4_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP4_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP4_PKHSCANRANGE_KEY", "d", "p", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 2; idx <= 7; ++idx) { + expect_field_value.push_back(gp4_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_start field_end + std::vector gp5_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP5_PKHSCANRANGE_KEY", gp5_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP5_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP5_PKHSCANRANGE_KEY", "c", "q", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 8; ++idx) { + expect_field_value.push_back(gp5_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_start field_end + std::vector gp6_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP6_PKHSCANRANGE_KEY", gp6_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP6_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP6_PKHSCANRANGE_KEY", "i", "k", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 5; ++idx) { + expect_field_value.push_back(gp6_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ + // field_start/field_end + std::vector gp7_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP7_PKHSCANRANGE_KEY", gp7_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP7_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP7_PKHSCANRANGE_KEY", "i", "i", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 4; ++idx) { + expect_field_value.push_back(gp7_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_end field_start + std::vector gp8_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP8_PKHSCANRANGE_KEY", gp8_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP8_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP8_PKHSCANRANGE_KEY", "k", "i", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ ^ + // field_start next_field field_end + std::vector gp9_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP9_PKHSCANRANGE_KEY", gp9_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP9_PKHSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP9_PKHSCANRANGE_KEY", "c", "q", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 5; ++idx) { + expect_field_value.push_back(gp7_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "m"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ ^ ^ + // field_start deleted next_field field_end + std::vector gp10_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP10_PKHSCANRANGE_KEY", gp10_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP10_PKHSCANRANGE_KEY", 10)); + s = db.HDel("GP10_PKHSCANRANGE_KEY", {"g"}, &ret); + ASSERT_TRUE(s.ok()); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP10_PKHSCANRANGE_KEY", "c", "q", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 6; ++idx) { + if (idx != 3) { + expect_field_value.push_back(gp10_field_value[idx]); + } + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "o"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // a_f1 a_f2 a_f3 b_f1 b_f2 b_f3 c_f1 c_f2 c_f3 + // ^ ^ ^ + // field_start next_field field_end + std::vector gp11_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP11_PKHSCANRANGE_KEY", gp11_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP11_PKHSCANRANGE_KEY", 9)); + ASSERT_TRUE(s.ok()); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP11_PKHSCANRANGE_KEY", "a_f1", "c_f3", "*f1", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 2; ++idx) { + if (idx != 1 && idx != 2) { + expect_field_value.push_back(gp11_field_value[idx]); + } + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "b_f1"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // a c e g i k m o q s + // ^ ^ ^ + // field_start next_field field_end + std::vector gp12_field_value_a{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp12_field_value_b{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp12_field_value_c{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP12_PKHSCANRANGE_KEY_A", gp12_field_value_a); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP12_PKHSCANRANGE_KEY_A", 9)); + s = db.HMSet("GP12_PKHSCANRANGE_KEY_B", gp12_field_value_b); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP12_PKHSCANRANGE_KEY_B", 9)); + s = db.HMSet("GP12_PKHSCANRANGE_KEY_C", gp12_field_value_c); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP12_PKHSCANRANGE_KEY_C", 9)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP12_PKHSCANRANGE_KEY_B", "a", "o", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 4; ++idx) { + expect_field_value.push_back(gp12_field_value_b[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "m"); + + // ************************** Group 13 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // c e g i k m o q s + // ^ ^ ^ + // field_start next_field field_end + std::vector gp13_field_value_a{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp13_field_value_b{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp13_field_value_c{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP13_PKHSCANRANGE_KEY_A", gp13_field_value_a); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP13_PKHSCANRANGE_KEY_A", 9)); + s = db.HMSet("GP13_PKHSCANRANGE_KEY_B", gp13_field_value_b); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP13_PKHSCANRANGE_KEY_B", 9)); + s = db.HMSet("GP13_PKHSCANRANGE_KEY_C", gp13_field_value_c); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP13_PKHSCANRANGE_KEY_C", 9)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP13_PKHSCANRANGE_KEY_B", "e", "q", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 5; ++idx) { + expect_field_value.push_back(gp13_field_value_b[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "o"); + + // ************************** Group 14 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // c e g i k m o q s u + // ^ ^ + // field_start next_field/field_end + std::vector gp14_field_value_a{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp14_field_value_b{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp14_field_value_c{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP14_PKHSCANRANGE_KEY_A", gp14_field_value_a); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP14_PKHSCANRANGE_KEY_A", 9)); + s = db.HMSet("GP14_PKHSCANRANGE_KEY_B", gp14_field_value_b); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP14_PKHSCANRANGE_KEY_B", 9)); + s = db.HMSet("GP14_PKHSCANRANGE_KEY_C", gp14_field_value_c); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP14_PKHSCANRANGE_KEY_C", 9)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP14_PKHSCANRANGE_KEY_B", "u", "", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); +} + +// PKHRScanRange +TEST_F(HashesTest, PKHRScanRangeTest) { + int32_t ret; + std::string start_field; + std::string next_field; + std::vector field_value_out; + std::vector expect_field_value; + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_end/next_field field_start + std::vector gp1_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP1_PKHRSCANRANGE_KEY", gp1_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHScanRange("GP1_PKHRSCANRANGE_KEY", "", "", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 0; --idx) { + expect_field_value.push_back(gp1_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_end field_start + std::vector gp2_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP2_PKHRSCANRANGE_KEY", gp2_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP2_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP2_PKHRSCANRANGE_KEY", "", "b", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 1; --idx) { + expect_field_value.push_back(gp2_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_end/next_field field_start + std::vector gp3_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP3_PKHRSCANRANGE_KEY", gp3_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP3_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP3_PKHRSCANRANGE_KEY", "r", "", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 0; --idx) { + expect_field_value.push_back(gp3_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_end field_start + std::vector gp4_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP4_PKHRSCANRANGE_KEY", gp4_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP4_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP4_PKHRSCANRANGE_KEY", "p", "d", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 7; idx >= 2; --idx) { + expect_field_value.push_back(gp4_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_end field_start + std::vector gp5_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP5_PKHRSCANRANGE_KEY", gp5_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP5_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP5_PKHRSCANRANGE_KEY", "q", "c", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 1; --idx) { + expect_field_value.push_back(gp5_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_end field_start + std::vector gp6_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP6_PKHRSCANRANGE_KEY", gp6_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP6_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP6_PKHRSCANRANGE_KEY", "k", "i", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 5; idx >= 4; --idx) { + expect_field_value.push_back(gp6_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ + // field_start/field_end + std::vector gp7_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP7_PKHRSCANRANGE_KEY", gp7_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP7_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP7_PKHRSCANRANGE_KEY", "i", "i", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx >= 4; --idx) { + expect_field_value.push_back(gp7_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ + // field_start field_end + std::vector gp8_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP8_PKHRSCANRANGE_KEY", gp8_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP8_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP8_PKHRSCANRANGE_KEY", "i", "k", "*", 10, &field_value_out, &next_field); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ ^ + // field_end next_field field_start + std::vector gp9_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP9_PKHRSCANRANGE_KEY", gp9_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP9_PKHRSCANRANGE_KEY", 10)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP9_PKHRSCANRANGE_KEY", "q", "c", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 4; --idx) { + expect_field_value.push_back(gp7_field_value[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "g"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // a c e g i k m o q s + // ^ ^ ^ ^ + // field_end next_field deleted field_start + std::vector gp10_field_value{{"a", "v"}, {"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, + {"k", "v"}, {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP10_PKHRSCANRANGE_KEY", gp10_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP10_PKHRSCANRANGE_KEY", 10)); + s = db.HDel("GP10_PKHRSCANRANGE_KEY", {"m"}, &ret); + ASSERT_TRUE(s.ok()); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP10_PKHRSCANRANGE_KEY", "q", "c", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6) { + expect_field_value.push_back(gp10_field_value[idx]); + } + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "e"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // a_f1 a_f2 a_f3 b_f1 b_f2 b_f3 c_f1 c_f2 c_f3 + // ^ ^ ^ + // field_end next_field field_start + std::vector gp11_field_value{{"a_f1", "v"}, {"a_f2", "v"}, {"a_f3", "v"}, {"b_f1", "v"}, {"b_f2", "v"}, + {"b_f3", "v"}, {"c_f1", "v"}, {"c_f2", "v"}, {"c_f3", "v"}}; + s = db.HMSet("GP11_PKHRSCANRANGE_KEY", gp11_field_value); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP11_PKHRSCANRANGE_KEY", 9)); + ASSERT_TRUE(s.ok()); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP11_PKHRSCANRANGE_KEY", "c_f3", "a_f1", "*f3", 3, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 6; --idx) { + if (idx != 6 && idx != 7) { + expect_field_value.push_back(gp11_field_value[idx]); + } + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "b_f3"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // a c e g i k m o q s + // ^ ^ + // field_end/next_field field_start + std::vector gp12_field_value_a{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp12_field_value_b{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp12_field_value_c{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP12_PKHRSCANRANGE_KEY_A", gp12_field_value_a); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP12_PKHRSCANRANGE_KEY_A", 9)); + s = db.HMSet("GP12_PKHRSCANRANGE_KEY_B", gp12_field_value_b); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP12_PKHRSCANRANGE_KEY_B", 9)); + s = db.HMSet("GP12_PKHRSCANRANGE_KEY_C", gp12_field_value_c); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP12_PKHRSCANRANGE_KEY_C", 9)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP12_PKHRSCANRANGE_KEY_B", "a", "", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, ""); + + // ************************** Group 13 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // c e g i k m o q s + // ^ ^ ^ + // field_end next_field field_start + std::vector gp13_field_value_a{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp13_field_value_b{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp13_field_value_c{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP13_PKHRSCANRANGE_KEY_A", gp13_field_value_a); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP13_PKHRSCANRANGE_KEY_A", 9)); + s = db.HMSet("GP13_PKHRSCANRANGE_KEY_B", gp13_field_value_b); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP13_PKHRSCANRANGE_KEY_B", 9)); + s = db.HMSet("GP13_PKHRSCANRANGE_KEY_C", gp13_field_value_c); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP13_PKHRSCANRANGE_KEY_C", 9)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP13_PKHRSCANRANGE_KEY_B", "o", "c", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 6; idx >= 2; --idx) { + expect_field_value.push_back(gp13_field_value_b[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "e"); + + // ************************** Group 14 Test ************************** + // 0 1 2 3 4 5 6 7 8 + // c e g i k m o q s u + // ^ ^ ^ + // field_end next_field field_start + std::vector gp14_field_value_a{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp14_field_value_b{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + std::vector gp14_field_value_c{{"c", "v"}, {"e", "v"}, {"g", "v"}, {"i", "v"}, {"k", "v"}, + {"m", "v"}, {"o", "v"}, {"q", "v"}, {"s", "v"}}; + s = db.HMSet("GP14_PKHRSCANRANGE_KEY_A", gp14_field_value_a); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP14_PKHRSCANRANGE_KEY_A", 9)); + s = db.HMSet("GP14_PKHRSCANRANGE_KEY_B", gp14_field_value_b); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP14_PKHRSCANRANGE_KEY_B", 9)); + s = db.HMSet("GP14_PKHRSCANRANGE_KEY_C", gp14_field_value_c); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP14_PKHRSCANRANGE_KEY_C", 9)); + + field_value_out.clear(); + expect_field_value.clear(); + s = db.PKHRScanRange("GP14_PKHRSCANRANGE_KEY_B", "u", "g", "*", 5, &field_value_out, &next_field); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 4; --idx) { + expect_field_value.push_back(gp14_field_value_b[idx]); + } + ASSERT_TRUE(field_value_match(field_value_out, expect_field_value)); + ASSERT_EQ(next_field, "i"); +} + +int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("hashes_test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/hyperloglog_test.cc b/tools/pika_migrate/src/storage/tests/hyperloglog_test.cc new file mode 100644 index 0000000000..a8f73ebb51 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/hyperloglog_test.cc @@ -0,0 +1,188 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "storage/storage.h" +#include "storage/util.h" + +using namespace storage; + +class HyperLogLogTest : public ::testing::Test { + public: + HyperLogLogTest() = default; + ~HyperLogLogTest() override = default; + + void SetUp() override { + std::string path = "./db/hyperloglog"; + if (access(path.c_str(), F_OK) != 0) { + mkdir(path.c_str(), 0755); + } + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + } + + void TearDown() override { + std::string path = "./db/hyperloglog"; + DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +TEST_F(HyperLogLogTest, PfaddTest) { + std::vector values; + bool update; + std::map type_status; + // PFADD without arguments creates an HLL value + s = db.PfAdd("HLL", values, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + std::vector keys{"HLL"}; + int64_t nums = db.Exists(keys); + ASSERT_EQ(nums, 1); + + // Approximated cardinality after creation is zero + int64_t result; + s = db.PfCount(keys, &result); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(result, 0); + + nums = db.Del(keys); + ASSERT_EQ(nums, 1); + + // PFADD the return value is true when at least 1 reg was modified + values.clear(); + values.emplace_back("A"); + values.emplace_back("B"); + values.emplace_back("C"); + s = db.PfAdd("HLL", values, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + + // PFADD the return value is false when no reg was modified + values.clear(); + values.emplace_back("A"); + values.emplace_back("B"); + values.emplace_back("C"); + update = false; + s = db.PfAdd("HLL", values, &update); + ASSERT_TRUE(s.ok()); + ASSERT_FALSE(update); + nums = db.Del(keys); + ASSERT_EQ(nums, 1); + + // PFADD works with empty string (regression) + values.clear(); + values.emplace_back(""); + s = db.PfAdd("HLL", values, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + + nums = db.Del(keys); + ASSERT_EQ(nums, 1); +} + +TEST_F(HyperLogLogTest, PfCountTest) { + // PFCOUNT returns approximated cardinality of set + std::vector values; + bool update; + std::map type_status; + + for (int32_t i = 1; i <= 5; i++) { + values.push_back(std::to_string(i)); + } + s = db.PfAdd("HLL", values, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + + std::vector keys{"HLL"}; + int64_t result; + s = db.PfCount(keys, &result); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(result, 5); + + values.clear(); + for (int32_t i = 6; i <= 10; i++) { + values.push_back(std::to_string(i)); + } + s = db.PfAdd("HLL", values, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + + s = db.PfCount(keys, &result); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(result, 10); + + int64_t nums = db.Del(keys); + ASSERT_EQ(nums, 1); +} + +TEST_F(HyperLogLogTest, PfMergeTest) { + // PFMERGE results on the cardinality of union of sets + bool update; + std::vector values1{"A", "B", "C"}; + s = db.PfAdd("HLL1", values1, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + + std::vector values2{"B", "C", "D"}; + s = db.PfAdd("HLL2", values2, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + + std::vector values3{"C", "D", "E"}; + s = db.PfAdd("HLL3", values3, &update); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(update); + + std::vector keys{"HLL1", "HLL2", "HLL3"}; + std::string result_value; + s = db.PfMerge(keys, result_value); + ASSERT_TRUE(s.ok()); + int64_t result; + s = db.PfCount(keys, &result); + ASSERT_EQ(result, 5); + + std::map type_status; + int64_t nums = db.Del(keys); + ASSERT_EQ(nums, 3); +} + +TEST_F(HyperLogLogTest, MultipleKeysTest) { + // PFCOUNT multiple-keys merge returns cardinality of union + bool update; + for (int32_t i = 1; i <= 10000; i++) { + std::vector hll1_value{"FOO" + std::to_string(i)}; + std::vector hll2_value{"BAR" + std::to_string(i)}; + std::vector hll3_value{"ZAP" + std::to_string(i)}; + s = db.PfAdd("HLL1", hll1_value, &update); + ASSERT_TRUE(s.ok()); + + s = db.PfAdd("HLL2", hll2_value, &update); + ASSERT_TRUE(s.ok()); + + s = db.PfAdd("HLL3", hll3_value, &update); + ASSERT_TRUE(s.ok()); + } + std::vector keys{"HLL1", "HLL2", "HLL3"}; + int64_t result; + s = db.PfCount(keys, &result); + ASSERT_TRUE(s.ok()); + int32_t ratio_nums = abs(10000 * 3 - result); + ASSERT_LT(ratio_nums, static_cast(result / 100) * 5); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/keys_test.cc b/tools/pika_migrate/src/storage/tests/keys_test.cc new file mode 100644 index 0000000000..eeb7f8d9db --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/keys_test.cc @@ -0,0 +1,5264 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" +#include "storage/storage.h" +#include "storage/util.h" + +// using namespace storage; +using storage::DataType; +using storage::Slice; +using storage::Status; + +class KeysTest : public ::testing::Test { + public: + KeysTest() = default; + ~KeysTest() override = default; + + void SetUp() override { + std::string path = "./db/keys"; + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + } + + void TearDown() override { + std::string path = "./db/keys"; + storage::DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + storage::StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +static bool make_expired(storage::Storage* const db, const Slice& key) { + std::map type_status; + int32_t ret = db->Expire(key, 1); + if ((ret == 0) || !type_status[storage::DataType::kStrings].ok()) { + return false; + } + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + return true; +} + +static bool key_value_match(const std::vector& key_value_out, const std::vector& expect_key_value) { + if (key_value_out.size() != expect_key_value.size()) { + LOG(WARNING) << "key_value_out.size: " << key_value_out.size() << " expect_key_value.size: " << expect_key_value.size(); + return false; + } + for (int32_t idx = 0; idx < key_value_out.size(); ++idx) { + LOG(WARNING) << "key_value_out[idx]: "<< key_value_out[idx].key << " expect_key_value[idx]: " << expect_key_value[idx].key; + LOG(WARNING) << "key_value_out[idx]: "<< key_value_out[idx].value << " expect_key_value[idx]: " << expect_key_value[idx].value; + if (key_value_out[idx].key != expect_key_value[idx].key || + key_value_out[idx].value != expect_key_value[idx].value) { + return false; + } + } + return true; +} + +static bool key_match(const std::vector& keys_out, const std::vector& expect_keys) { + if (keys_out.size() != expect_keys.size()) { + return false; + } + for (int32_t idx = 0; idx < keys_out.size(); ++idx) { + if (keys_out[idx] != expect_keys[idx]) { + return false; + } + } + return true; +} + +// PKScanRange +// Note: This test needs to execute at first because all of the data is +// predetermined. +TEST_F(KeysTest, PKScanRangeTest) { // NOLINT + int32_t ret; + uint64_t ret_u64; + std::string next_key; + std::vector keys_del; + std::vector keys_out; + std::vector expect_keys; + std::map type_status; + std::vector kvs_out; + std::vector expect_kvs; + std::vector kvs{{"PKSCANRANGE_A", "VALUE"}, {"PKSCANRANGE_C", "VALUE"}, {"PKSCANRANGE_E", "VALUE"}, + {"PKSCANRANGE_G", "VALUE"}, {"PKSCANRANGE_I", "VALUE"}, {"PKSCANRANGE_K", "VALUE"}, + {"PKSCANRANGE_M", "VALUE"}, {"PKSCANRANGE_O", "VALUE"}, {"PKSCANRANGE_Q", "VALUE"}, + {"PKSCANRANGE_S", "VALUE"}}; + keys_del.reserve(kvs.size()); +for (const auto& kv : kvs) { + keys_del.push_back(kv.key); + } + + //=============================== Strings =============================== + s = db.MSet(kvs); + ASSERT_TRUE(s.ok()); + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 9; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_B", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 9; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "", "PKSCANRANGE_R", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 8; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_D", "PKSCANRANGE_P", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 2; idx <= 7; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_C", "PKSCANRANGE_Q", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 8; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_I", "PKSCANRANGE_K", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 5; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_I", "PKSCANRANGE_I", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 4; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_K", "PKSCANRANGE_I", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_start next_key key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_C", "PKSCANRANGE_Q", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 5; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, "PKSCANRANGE_M"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + ASSERT_TRUE(make_expired(&db, "PKSCANRANGE_G")); + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_C", "PKSCANRANGE_Q", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 6; ++idx) { + if (idx != 3) { + expect_kvs.push_back(kvs[idx]); + } + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, "PKSCANRANGE_O"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKScanRange(DataType::kStrings, "PKSCANRANGE_C", "PKSCANRANGE_Q", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, "PKSCANRANGE_I"); + + //=============================== Sets =============================== + std::vector kvset{{"PKSCANRANGE_A1", "VALUE"}, {"PKSCANRANGE_C1", "VALUE"}, {"PKSCANRANGE_E1", "VALUE"}, + {"PKSCANRANGE_G1", "VALUE"}, {"PKSCANRANGE_I1", "VALUE"}, {"PKSCANRANGE_K1", "VALUE"}, + {"PKSCANRANGE_M1", "VALUE"}, {"PKSCANRANGE_O1", "VALUE"}, {"PKSCANRANGE_Q1", "VALUE"}, + {"PKSCANRANGE_S1", "VALUE"}}; + for (const auto& kv : kvset) { + s = db.SAdd(kv.key, {"MEMBER"}, &ret); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 9; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_B1", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 9; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "", "PKSCANRANGE_R1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 8; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_D1", "PKSCANRANGE_P1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 2; idx <= 7; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_C1", "PKSCANRANGE_Q1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 8; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_I1", "PKSCANRANGE_K1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 5; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_I1", "PKSCANRANGE_I1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 4; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_K1", "PKSCANRANGE_I1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_start next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_C1", "PKSCANRANGE_Q1", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 5; ++idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_M1"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + ASSERT_TRUE(make_expired(&db, "PKSCANRANGE_G1")); + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_C1", "PKSCANRANGE_Q1", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 6; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_O1"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_C1", "PKSCANRANGE_Q1", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_I1"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_start expire deleted next_key key_end + keys_out.clear(); + expect_keys.clear(); + db.Del({"PKSCANRANGE_I1"}); + s = db.PKScanRange(DataType::kSets, "PKSCANRANGE_C1", "PKSCANRANGE_Q1", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_K1"); + + //=============================== Hashes =============================== + std::vector kvhash{{"PKSCANRANGE_A2", "VALUE"}, {"PKSCANRANGE_C2", "VALUE"}, {"PKSCANRANGE_E2", "VALUE"}, + {"PKSCANRANGE_G2", "VALUE"}, {"PKSCANRANGE_I2", "VALUE"}, {"PKSCANRANGE_K2", "VALUE"}, + {"PKSCANRANGE_M2", "VALUE"}, {"PKSCANRANGE_O2", "VALUE"}, {"PKSCANRANGE_Q2", "VALUE"}, + {"PKSCANRANGE_S2", "VALUE"}}; + for (const auto& kv : kvhash) { + s = db.HMSet(kv.key, {{"FIELD", "VALUE"}}); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 9; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_B2", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 9; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "", "PKSCANRANGE_R2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 8; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_D2", "PKSCANRANGE_P2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 2; idx <= 7; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_C2", "PKSCANRANGE_Q2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 8; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_I2", "PKSCANRANGE_K2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 5; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_I2", "PKSCANRANGE_I2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 4; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_K2", "PKSCANRANGE_I2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_start next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_C2", "PKSCANRANGE_Q2", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 5; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_M2"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + ASSERT_TRUE(make_expired(&db, "PKSCANRANGE_G2")); + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_C2", "PKSCANRANGE_Q2", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 6; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvhash[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_O2"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_C2", "PKSCANRANGE_Q2", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_I2"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_start expire deleted next_key key_end + keys_out.clear(); + expect_keys.clear(); + db.Del({"PKSCANRANGE_I2"}); + s = db.PKScanRange(DataType::kHashes, "PKSCANRANGE_C2", "PKSCANRANGE_Q2", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_K2"); + + //=============================== ZSets =============================== + + + std::vector kvzset{{"PKSCANRANGE_A3", "VALUE"}, {"PKSCANRANGE_C3", "VALUE"}, {"PKSCANRANGE_E3", "VALUE"}, + {"PKSCANRANGE_G3", "VALUE"}, {"PKSCANRANGE_I3", "VALUE"}, {"PKSCANRANGE_K3", "VALUE"}, + {"PKSCANRANGE_M3", "VALUE"}, {"PKSCANRANGE_O3", "VALUE"}, {"PKSCANRANGE_Q3", "VALUE"}, + {"PKSCANRANGE_S3", "VALUE"}}; + for (const auto& kv : kvzset) { + s = db.ZAdd(kv.key, {{1, "MEMBER"}}, &ret); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 9; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_B3", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 9; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "", "PKSCANRANGE_R3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 8; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_D3", "PKSCANRANGE_P3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 2; idx <= 7; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_C3", "PKSCANRANGE_Q3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 8; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_I3", "PKSCANRANGE_K3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 5; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_I3", "PKSCANRANGE_I3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 4; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_K3", "PKSCANRANGE_I3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_start next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_C3", "PKSCANRANGE_Q3", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 5; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_M3"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + ASSERT_TRUE(make_expired(&db, "PKSCANRANGE_G3")); + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_C3", "PKSCANRANGE_Q3", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 6; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvzset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_O3"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_C3", "PKSCANRANGE_Q3", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_I3"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_start expire deleted next_key key_end + keys_out.clear(); + expect_keys.clear(); + db.Del({"PKSCANRANGE_I3"}); + s = db.PKScanRange(DataType::kZSets, "PKSCANRANGE_C3", "PKSCANRANGE_Q3", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvzset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_K3"); + + //=============================== Lists =============================== + std::vector kvlist{{"PKSCANRANGE_A4", "VALUE"}, {"PKSCANRANGE_C4", "VALUE"}, {"PKSCANRANGE_E4", "VALUE"}, + {"PKSCANRANGE_G4", "VALUE"}, {"PKSCANRANGE_I4", "VALUE"}, {"PKSCANRANGE_K4", "VALUE"}, + {"PKSCANRANGE_M4", "VALUE"}, {"PKSCANRANGE_O4", "VALUE"}, {"PKSCANRANGE_Q4", "VALUE"}, + {"PKSCANRANGE_S4", "VALUE"}}; + for (const auto& kv : kvlist) { + s = db.LPush(kv.key, {"NODE"}, &ret_u64); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 9; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end/next_key + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_B4", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 9; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "", "PKSCANRANGE_R4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 0; idx <= 8; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_D4", "PKSCANRANGE_P4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 2; idx <= 7; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_C4", "PKSCANRANGE_Q4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 8; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_I4", "PKSCANRANGE_K4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 5; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_I4", "PKSCANRANGE_I4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx <= 4; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_K4", "PKSCANRANGE_I4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_start next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_C4", "PKSCANRANGE_Q4", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 5; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_M4"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + ASSERT_TRUE(make_expired(&db, "PKSCANRANGE_G4")); + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_C4", "PKSCANRANGE_Q4", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 6; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvlist[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_O4"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_start expire next_key key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_C4", "PKSCANRANGE_Q4", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_I4"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_start expire deleted next_key key_end + keys_out.clear(); + expect_keys.clear(); + db.Del({"PKSCANRANGE_I4"}); + s = db.PKScanRange(DataType::kLists, "PKSCANRANGE_C4", "PKSCANRANGE_Q4", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 1; idx <= 2; ++idx) { + if (idx != 3) { + expect_keys.push_back(kvlist[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKSCANRANGE_K4"); + + type_status.clear(); + db.Del(keys_del); + sleep(2); + db.Compact(DataType::kAll, true); +} + +// PKRScanRange +// Note: This test needs to execute at first because all of the data is +// predetermined. +TEST_F(KeysTest, PKRScanRangeTest) { // NOLINT + int32_t ret; + uint64_t ret_u64; + std::string next_key; + std::vector keys_del; + std::vector keys_out; + std::vector expect_keys; + std::map type_status; + std::vector kvs_out; + std::vector expect_kvs; + std::vector kvs{{"PKRSCANRANGE_A", "VALUE"}, {"PKRSCANRANGE_C", "VALUE"}, + {"PKRSCANRANGE_E", "VALUE"}, {"PKRSCANRANGE_G", "VALUE"}, + {"PKRSCANRANGE_I", "VALUE"}, {"PKRSCANRANGE_K", "VALUE"}, + {"PKRSCANRANGE_M", "VALUE"}, {"PKRSCANRANGE_O", "VALUE"}, + {"PKRSCANRANGE_Q", "VALUE"}, {"PKRSCANRANGE_S", "VALUE"}}; + keys_del.reserve(kvs.size()); +for (const auto& kv : kvs) { + keys_del.push_back(kv.key); + } + + //=============================== Strings =============================== + s = db.MSet(kvs); + ASSERT_TRUE(s.ok()); + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 0; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "", "PKRSCANRANGE_B", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 1; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_R", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 0; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_P", "PKRSCANRANGE_D", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 7; idx >= 2; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_Q", "PKRSCANRANGE_C", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 1; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_K", "PKRSCANRANGE_I", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 5; idx >= 4; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_I", "PKRSCANRANGE_I", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx >= 4; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_I", "PKRSCANRANGE_K", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_end next_key key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_Q", "PKRSCANRANGE_C", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 4; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_end next_key expire key_start + kvs_out.clear(); + expect_kvs.clear(); + ASSERT_TRUE(make_expired(&db, "PKRSCANRANGE_M")); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_Q", "PKRSCANRANGE_C", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6) { + expect_kvs.push_back(kvs[idx]); + } + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_end next_key expire key_start + kvs_out.clear(); + expect_kvs.clear(); + s = db.PKRScanRange(DataType::kStrings, "PKRSCANRANGE_Q", "PKRSCANRANGE_C", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 7; --idx) { + expect_kvs.push_back(kvs[idx]); + } + ASSERT_TRUE(key_value_match(kvs_out, expect_kvs)); + ASSERT_EQ(next_key, "PKRSCANRANGE_K"); + + //=============================== Sets =============================== + std::vector kvset{{"PKRSCANRANGE_A1", "VALUE"}, {"PKRSCANRANGE_C1", "VALUE"}, + {"PKRSCANRANGE_E1", "VALUE"}, {"PKRSCANRANGE_G1", "VALUE"}, + {"PKRSCANRANGE_I1", "VALUE"}, {"PKRSCANRANGE_K1", "VALUE"}, + {"PKRSCANRANGE_M1", "VALUE"}, {"PKRSCANRANGE_O1", "VALUE"}, + {"PKRSCANRANGE_Q1", "VALUE"}, {"PKRSCANRANGE_S1", "VALUE"}}; + for (const auto& kv : kvset) { + s = db.SAdd(kv.key, {"MEMBER"}, &ret); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 0; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "", "PKRSCANRANGE_B1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 1; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_R1", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 0; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_P1", "PKRSCANRANGE_D1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 7; idx >= 2; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_Q1", "PKRSCANRANGE_C1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 1; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_K1", "PKRSCANRANGE_I1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 5; idx >= 4; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_I1", "PKRSCANRANGE_I1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx >= 4; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_I1", "PKRSCANRANGE_K1", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_end next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_Q1", "PKRSCANRANGE_C1", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 4; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G1"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_end next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + ASSERT_TRUE(make_expired(&db, "PKRSCANRANGE_M1")); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_Q1", "PKRSCANRANGE_C1", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6) { + expect_keys.push_back(kvset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E1"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.SRem("PKRSCANRANGE_I1", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_Q1", "PKRSCANRANGE_C1", "*", 4, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6 && idx != 4) { + expect_keys.push_back(kvset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E1"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end empty next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_Q1", "PKRSCANRANGE_C1", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 7; --idx) { + expect_keys.push_back(kvset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_K1"); + + // ************************** Group 13 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kSets, "PKRSCANRANGE_Q1", "PKRSCANRANGE_C1", "*", 3, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 5; --idx) { + if (idx != 6) { + expect_keys.push_back(kvset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G1"); + + //=============================== Hashes =============================== + std::vector kvhash{{"PKRSCANRANGE_A2", "VALUE"}, {"PKRSCANRANGE_C2", "VALUE"}, + {"PKRSCANRANGE_E2", "VALUE"}, {"PKRSCANRANGE_G2", "VALUE"}, + {"PKRSCANRANGE_I2", "VALUE"}, {"PKRSCANRANGE_K2", "VALUE"}, + {"PKRSCANRANGE_M2", "VALUE"}, {"PKRSCANRANGE_O2", "VALUE"}, + {"PKRSCANRANGE_Q2", "VALUE"}, {"PKRSCANRANGE_S2", "VALUE"}}; + for (const auto& kv : kvhash) { + s = db.HMSet(kv.key, {{"FIELD", "VALUE"}}); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 0; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "", "PKRSCANRANGE_B2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 1; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_R2", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 0; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_P2", "PKRSCANRANGE_D2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 7; idx >= 2; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_Q2", "PKRSCANRANGE_C2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 1; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_K2", "PKRSCANRANGE_I2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 5; idx >= 4; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_I2", "PKRSCANRANGE_I2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx >= 4; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_I2", "PKRSCANRANGE_K2", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_end next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_Q2", "PKRSCANRANGE_C2", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 4; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G2"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_end next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + ASSERT_TRUE(make_expired(&db, "PKRSCANRANGE_M2")); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_Q2", "PKRSCANRANGE_C2", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6) { + expect_keys.push_back(kvhash[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E2"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.HDel("PKRSCANRANGE_I2", {"FIELD"}, &ret); + ASSERT_TRUE(s.ok()); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_Q2", "PKRSCANRANGE_C2", "*", 4, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6 && idx != 4) { + expect_keys.push_back(kvhash[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E2"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end empty next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_Q2", "PKRSCANRANGE_C2", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 7; --idx) { + expect_keys.push_back(kvhash[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_K2"); + + // ************************** Group 13 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kHashes, "PKRSCANRANGE_Q2", "PKRSCANRANGE_C2", "*", 3, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 5; --idx) { + if (idx != 6) { + expect_keys.push_back(kvhash[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G2"); + + //=============================== ZSets =============================== + std::vector kvzset{{"PKRSCANRANGE_A3", "VALUE"}, {"PKRSCANRANGE_C3", "VALUE"}, + {"PKRSCANRANGE_E3", "VALUE"}, {"PKRSCANRANGE_G3", "VALUE"}, + {"PKRSCANRANGE_I3", "VALUE"}, {"PKRSCANRANGE_K3", "VALUE"}, + {"PKRSCANRANGE_M3", "VALUE"}, {"PKRSCANRANGE_O3", "VALUE"}, + {"PKRSCANRANGE_Q3", "VALUE"}, {"PKRSCANRANGE_S3", "VALUE"}}; + for (const auto& kv : kvzset) { + s = db.ZAdd(kv.key, {{1, "MEMBER"}}, &ret); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 0; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "", "PKRSCANRANGE_B3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 1; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_R3", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 0; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_P3", "PKRSCANRANGE_D3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 7; idx >= 2; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_Q3", "PKRSCANRANGE_C3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 1; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_K3", "PKRSCANRANGE_I3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 5; idx >= 4; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_I3", "PKRSCANRANGE_I3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx >= 4; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_I3", "PKRSCANRANGE_K3", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_end next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_Q3", "PKRSCANRANGE_C3", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 4; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G3"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_end next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + ASSERT_TRUE(make_expired(&db, "PKRSCANRANGE_M3")); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_Q3", "PKRSCANRANGE_C3", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6) { + expect_keys.push_back(kvzset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E3"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.ZRem("PKRSCANRANGE_I3", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_Q3", "PKRSCANRANGE_C3", "*", 4, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6 && idx != 4) { + expect_keys.push_back(kvzset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E3"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end empty next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_Q3", "PKRSCANRANGE_C3", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 7; --idx) { + expect_keys.push_back(kvzset[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_K3"); + + // ************************** Group 13 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kZSets, "PKRSCANRANGE_Q3", "PKRSCANRANGE_C3", "*", 3, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 5; --idx) { + if (idx != 6) { + expect_keys.push_back(kvzset[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G3"); + + //=============================== Lists =============================== + std::vector kvlist{{"PKRSCANRANGE_A4", "VALUE"}, {"PKRSCANRANGE_C4", "VALUE"}, + {"PKRSCANRANGE_E4", "VALUE"}, {"PKRSCANRANGE_G4", "VALUE"}, + {"PKRSCANRANGE_I4", "VALUE"}, {"PKRSCANRANGE_K4", "VALUE"}, + {"PKRSCANRANGE_M4", "VALUE"}, {"PKRSCANRANGE_O4", "VALUE"}, + {"PKRSCANRANGE_Q4", "VALUE"}, {"PKRSCANRANGE_S4", "VALUE"}}; + for (const auto& kv : kvlist) { + s = db.LPush(kv.key, {"NODE"}, &ret_u64); + } + + // ************************** Group 1 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 0; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 2 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "", "PKRSCANRANGE_B4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 9; idx >= 1; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 3 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end/next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_R4", "", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 0; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 4 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_P4", "PKRSCANRANGE_D4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 7; idx >= 2; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 5 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_Q4", "PKRSCANRANGE_C4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 1; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 6 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_end key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_K4", "PKRSCANRANGE_I4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 5; idx >= 4; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 7 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ + // key_start/key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_I4", "PKRSCANRANGE_I4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 4; idx >= 4; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 8 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ + // key_start key_end + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_I4", "PKRSCANRANGE_K4", "*", 10, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, ""); + + // ************************** Group 9 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ + // key_end next_key key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_Q4", "PKRSCANRANGE_C4", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 4; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G4"); + + // ************************** Group 10 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ + // key_end next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + ASSERT_TRUE(make_expired(&db, "PKRSCANRANGE_M4")); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_Q4", "PKRSCANRANGE_C4", "*", 5, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6) { + expect_keys.push_back(kvlist[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E4"); + + // ************************** Group 11 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + std::string element; + std::vector elements; + s = db.LPop("PKRSCANRANGE_I4",1, &elements); + ASSERT_TRUE(s.ok()); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_Q4", "PKRSCANRANGE_C4", "*", 4, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 3; --idx) { + if (idx != 6 && idx != 4) { + expect_keys.push_back(kvlist[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_E4"); + + // ************************** Group 12 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end empty next_key expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_Q4", "PKRSCANRANGE_C4", "*", 2, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 7; --idx) { + expect_keys.push_back(kvlist[idx].key); + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_K4"); + + // ************************** Group 13 Test ************************** + // 0 1 2 3 4 5 6 7 8 9 + // A C E G I K M O Q S + // ^ ^ ^ ^ ^ + // key_end next_key empty expire key_start + keys_out.clear(); + expect_keys.clear(); + s = db.PKRScanRange(DataType::kLists, "PKRSCANRANGE_Q4", "PKRSCANRANGE_C4", "*", 3, &keys_out, &kvs_out, &next_key); + ASSERT_TRUE(s.ok()); + for (int32_t idx = 8; idx >= 5; --idx) { + if (idx != 6) { + expect_keys.push_back(kvlist[idx].key); + } + } + ASSERT_TRUE(key_match(keys_out, expect_keys)); + ASSERT_EQ(next_key, "PKRSCANRANGE_G4"); + + type_status.clear(); + db.Del(keys_del); + sleep(2); + db.Compact(DataType::kAll, true); +} + +TEST_F(KeysTest, PKPatternMatchDel) { + int32_t ret; + uint64_t ret64; + int64_t delete_count = 0; + std::vector keys; + std::vector remove_keys; + const int64_t max_count = storage::BATCH_DELETE_LIMIT; + std::map type_status; + + //=============================== Strings =============================== + + // ***************** Group 1 Test ***************** + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP1_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + ASSERT_EQ(remove_keys.size(), 6); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY2", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY4", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP2_PKPATTERNMATCHDEL_STRING_KEY6", "VALUE"); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_STRING_KEY5")); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY1_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY3_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY5_0xxx0", "VALUE"); + db.Set("GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); + s = db.PKPatternMatchDelWithRemoveKeys("*0xxx0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP3_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0"); + ASSERT_EQ(keys[1], "GP3_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0"); + ASSERT_EQ(keys[2], "GP3_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY1", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY2_0ooo0", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY3", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY4_0ooo0", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY5", "VALUE"); + db.Set("GP4_PKPATTERNMATCHDEL_STRING_KEY6_0ooo0", "VALUE"); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP4_PKPATTERNMATCHDEL_STRING_KEY5")); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + size_t gp5_total_kv = 23333; + for (size_t idx = 0; idx < gp5_total_kv; ++idx) { + db.Set("GP5_PKPATTERNMATCHDEL_STRING_KEY" + std::to_string(idx), "VALUE"); + } + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, max_count); + ASSERT_EQ(remove_keys.size(), max_count); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kStrings, "*", &keys); + ASSERT_EQ(keys.size(), gp5_total_kv - max_count); + db.Del(keys); + + //=============================== Set =============================== + + // ***************** Group 1 Test ***************** + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP1_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + ASSERT_EQ(remove_keys.size(), 6); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP2_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_SET_KEY5")); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY2_0ooo0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY4_0ooo0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", {"M1"}, &ret); + db.SAdd("GP3_PKPATTERNMATCHDEL_SET_KEY6_0ooo0", {"M1"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_SET_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY2", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY4", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + db.SAdd("GP4_PKPATTERNMATCHDEL_SET_KEY6", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY1", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY3", {"M1"}, &ret); + db.SRem("GP4_PKPATTERNMATCHDEL_SET_KEY5", {"M1"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY5_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY7_0ooo0", {"M1"}, &ret); + db.SAdd("GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0", {"M1"}, &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_SET_KEY2_0xxx0")); + db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY3_0ooo0", {"M1"}, &ret); + db.SRem("GP5_PKPATTERNMATCHDEL_SET_KEY4_0xxx0", {"M1"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + ASSERT_EQ(remove_keys.size(), 2); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_SET_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_SET_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_set = 23333; + for (size_t idx = 0; idx < gp6_total_set; ++idx) { + db.SAdd("GP6_PKPATTERNMATCHDEL_SET_KEY" + std::to_string(idx), {"M1"}, &ret); + } + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, max_count); + ASSERT_EQ(remove_keys.size(), max_count); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kSets, "*", &keys); + ASSERT_EQ(keys.size(), gp6_total_set - max_count); + db.Del(keys); + + //=============================== Hashes =============================== + + // ***************** Group 1 Test ***************** + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP1_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + ASSERT_EQ(remove_keys.size(), 6); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP2_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_HASH_KEY5")); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY2_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY4_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP3_PKPATTERNMATCHDEL_HASH_KEY6_0ooo0", "FIELD", "VALUE", &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_HASH_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY1", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY2", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY3", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY4", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY5", "FIELD", "VALUE", &ret); + db.HSet("GP4_PKPATTERNMATCHDEL_HASH_KEY6", "FIELD", "VALUE", &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY1", {"FIELD"}, &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY3", {"FIELD"}, &ret); + db.HDel("GP4_PKPATTERNMATCHDEL_HASH_KEY5", {"FIELD"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY5_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY7_0ooo0", "FIELD", "VALUE", &ret); + db.HSet("GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0", "FIELD", "VALUE", &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_HASH_KEY2_0xxx0")); + db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY3_0ooo0", {"FIELD"}, &ret); + db.HDel("GP5_PKPATTERNMATCHDEL_HASH_KEY4_0xxx0", {"FIELD"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + ASSERT_EQ(remove_keys.size(), 2); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_HASH_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_HASH_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_hash = 23333; + for (size_t idx = 0; idx < gp6_total_hash; ++idx) { + db.HSet("GP6_PKPATTERNMATCHDEL_HASH_KEY" + std::to_string(idx), "FIELD", "VALUE", &ret); + } + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, max_count); + ASSERT_EQ(remove_keys.size(), max_count); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kHashes, "*", &keys); + ASSERT_EQ(keys.size(), gp6_total_hash - max_count); + db.Del(keys); + + //=============================== ZSets =============================== + + // ***************** Group 1 Test ***************** + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP1_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + ASSERT_EQ(remove_keys.size(), 6); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP2_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_ZSET_KEY5")); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY2_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY4_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP3_PKPATTERNMATCHDEL_ZSET_KEY6_0ooo0", {{1, "M"}}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_ZSET_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY2", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY4", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {{1, "M"}}, &ret); + db.ZAdd("GP4_PKPATTERNMATCHDEL_ZSET_KEY6", {{1, "M"}}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY1", {"M"}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY3", {"M"}, &ret); + db.ZRem("GP4_PKPATTERNMATCHDEL_ZSET_KEY5", {"M"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY5_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY7_0ooo0", {{1, "M"}}, &ret); + db.ZAdd("GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0", {{1, "M"}}, &ret); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_ZSET_KEY2_0xxx0")); + db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY3_0ooo0", {"M"}, &ret); + db.ZRem("GP5_PKPATTERNMATCHDEL_ZSET_KEY4_0xxx0", {"M"}, &ret); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + ASSERT_EQ(remove_keys.size(), 2); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_ZSET_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_ZSET_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_zset = 23333; + for (size_t idx = 0; idx < gp6_total_zset; ++idx) { + db.ZAdd("GP6_PKPATTERNMATCHDEL_ZSET_KEY" + std::to_string(idx), {{1, "M"}}, &ret); + } + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, max_count); + ASSERT_EQ(remove_keys.size(), max_count); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kZSets, "*", &keys); + ASSERT_EQ(keys.size(), gp6_total_zset-max_count); + db.Del(keys); + + //=============================== List =============================== + + // ***************** Group 1 Test ***************** + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP1_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 6); + ASSERT_EQ(remove_keys.size(), 6); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 2 Test ***************** + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP2_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY1")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY3")); + ASSERT_TRUE(make_expired(&db, "GP2_PKPATTERNMATCHDEL_LIST_KEY5")); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 3 Test ***************** + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY2_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY4_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP3_PKPATTERNMATCHDEL_LIST_KEY6_0ooo0", {"VALUE"}, &ret64); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY1_0xxx0", keys[0]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY3_0xxx0", keys[1]); + ASSERT_EQ("GP3_PKPATTERNMATCHDEL_LIST_KEY5_0xxx0", keys[2]); + type_status.clear(); + db.Del(keys); + + // ***************** Group 4 Test ***************** + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY1", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY2", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY3", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY4", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY5", {"VALUE"}, &ret64); + db.LPush("GP4_PKPATTERNMATCHDEL_LIST_KEY6", {"VALUE"}, &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY1", 1, "VALUE", &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY3", 1, "VALUE", &ret64); + db.LRem("GP4_PKPATTERNMATCHDEL_LIST_KEY5", 1, "VALUE", &ret64); + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 3); + ASSERT_EQ(remove_keys.size(), 3); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 0); + + // ***************** Group 5 Test ***************** + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY5_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY7_0ooo0", {"VALUE"}, &ret64); + db.LPush("GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0", {"VALUE"}, &ret64); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY1_0ooo0")); + ASSERT_TRUE(make_expired(&db, "GP5_PKPATTERNMATCHDEL_LIST_KEY2_0xxx0")); + db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY3_0ooo0", 1, "VALUE", &ret64); + db.LRem("GP5_PKPATTERNMATCHDEL_LIST_KEY4_0xxx0", 1, "VALUE", &ret64); + s = db.PKPatternMatchDelWithRemoveKeys("*0ooo0", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, 2); + ASSERT_EQ(remove_keys.size(), 2); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_PKPATTERNMATCHDEL_LIST_KEY6_0xxx0"); + ASSERT_EQ(keys[1], "GP5_PKPATTERNMATCHDEL_LIST_KEY8_0xxx0"); + type_status.clear(); + db.Del(keys); + + // ***************** Group 6 Test ***************** + size_t gp6_total_list = 23333; + for (size_t idx = 0; idx < gp6_total_list; ++idx) { + db.LPush("GP6_PKPATTERNMATCHDEL_LIST_KEY" + std::to_string(idx), {"VALUE"}, &ret64); + } + s = db.PKPatternMatchDelWithRemoveKeys("*", &delete_count, &remove_keys, max_count); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(delete_count, max_count); + ASSERT_EQ(remove_keys.size(), max_count); + keys.clear(); + remove_keys.clear(); + db.Keys(DataType::kLists, "*", &keys); + ASSERT_EQ(keys.size(), gp6_total_list - max_count); + db.Del(keys); + + sleep(2); + db.Compact(DataType::kAll, true); +} + +// Scan +// Note: This test needs to execute at first because all of the data is +// predetermined. +TEST_F(KeysTest, ScanCaseAllTest) { // NOLINT + int64_t cursor; + int64_t next_cursor; + int64_t del_num; + int32_t int32_ret; + uint64_t uint64_ret; + std::vector keys; + std::vector total_keys; + std::vector delete_keys; + std::map type_status; + + // ***************** Group 1 Test ***************** + // String + s = db.Set("GP1_SCAN_CASE_ALL_STRING_KEY1", "GP1_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP1_SCAN_CASE_ALL_STRING_KEY2", "GP1_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP1_SCAN_CASE_ALL_STRING_KEY3", "GP1_SCAN_CASE_ALL_STRING_VALUE3"); + + // Hash + s = db.HSet("GP1_SCAN_CASE_ALL_HASH_KEY1", "GP1_SCAN_CASE_ALL_HASH_FIELD1", "GP1_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP1_SCAN_CASE_ALL_HASH_KEY2", "GP1_SCAN_CASE_ALL_HASH_FIELD2", "GP1_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP1_SCAN_CASE_ALL_HASH_KEY3", "GP1_SCAN_CASE_ALL_HASH_FIELD3", "GP1_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + + // Set + s = db.SAdd("GP1_SCAN_CASE_ALL_SET_KEY1", {"GP1_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP1_SCAN_CASE_ALL_SET_KEY2", {"GP1_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP1_SCAN_CASE_ALL_SET_KEY3", {"GP1_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + + // List + s = db.LPush("GP1_SCAN_CASE_ALL_LIST_KEY1", {"GP1_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP1_SCAN_CASE_ALL_LIST_KEY2", {"GP1_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP1_SCAN_CASE_ALL_LIST_KEY3", {"GP1_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + + // ZSet + s = db.ZAdd("GP1_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP1_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP1_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP1_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP1_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP1_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + + // Scan + delete_keys.clear(); + keys.clear(); + cursor = db.Scan(DataType::kAll, 0, "*", 3, &keys); + ASSERT_EQ(cursor, 3); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP1_SCAN_CASE_ALL_STRING_KEY1"); + ASSERT_EQ(keys[1], "GP1_SCAN_CASE_ALL_STRING_KEY2"); + ASSERT_EQ(keys[2], "GP1_SCAN_CASE_ALL_STRING_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 3, "*", 3, &keys); + ASSERT_EQ(cursor, 6); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP1_SCAN_CASE_ALL_HASH_KEY1"); + ASSERT_EQ(keys[1], "GP1_SCAN_CASE_ALL_HASH_KEY2"); + ASSERT_EQ(keys[2], "GP1_SCAN_CASE_ALL_HASH_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 6, "*", 3, &keys); + ASSERT_EQ(cursor, 9); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP1_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(keys[1], "GP1_SCAN_CASE_ALL_SET_KEY2"); + ASSERT_EQ(keys[2], "GP1_SCAN_CASE_ALL_SET_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 9, "*", 3, &keys); + ASSERT_EQ(cursor, 12); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP1_SCAN_CASE_ALL_LIST_KEY1"); + ASSERT_EQ(keys[1], "GP1_SCAN_CASE_ALL_LIST_KEY2"); + ASSERT_EQ(keys[2], "GP1_SCAN_CASE_ALL_LIST_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 12, "*", 3, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(keys[0], "GP1_SCAN_CASE_ALL_ZSET_KEY1"); + ASSERT_EQ(keys[1], "GP1_SCAN_CASE_ALL_ZSET_KEY2"); + ASSERT_EQ(keys[2], "GP1_SCAN_CASE_ALL_ZSET_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 2 Test ***************** + // String + s = db.Set("GP2_SCAN_CASE_ALL_STRING_KEY1", "GP2_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP2_SCAN_CASE_ALL_STRING_KEY2", "GP2_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP2_SCAN_CASE_ALL_STRING_KEY3", "GP2_SCAN_CASE_ALL_STRING_VALUE3"); + + // Hash + s = db.HSet("GP2_SCAN_CASE_ALL_HASH_KEY1", "GP2_SCAN_CASE_ALL_HASH_FIELD1", "GP2_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP2_SCAN_CASE_ALL_HASH_KEY2", "GP2_SCAN_CASE_ALL_HASH_FIELD2", "GP2_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP2_SCAN_CASE_ALL_HASH_KEY3", "GP2_SCAN_CASE_ALL_HASH_FIELD3", "GP2_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + + // Set + s = db.SAdd("GP2_SCAN_CASE_ALL_SET_KEY1", {"GP2_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP2_SCAN_CASE_ALL_SET_KEY2", {"GP2_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP2_SCAN_CASE_ALL_SET_KEY3", {"GP2_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + + // List + s = db.LPush("GP2_SCAN_CASE_ALL_LIST_KEY1", {"GP2_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP2_SCAN_CASE_ALL_LIST_KEY2", {"GP2_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP2_SCAN_CASE_ALL_LIST_KEY3", {"GP2_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + + // ZSet + s = db.ZAdd("GP2_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP2_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP2_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP2_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP2_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP2_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + + // Scan + delete_keys.clear(); + keys.clear(); + cursor = db.Scan(DataType::kAll, 0, "*", 2, &keys); + ASSERT_EQ(cursor, 2); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_STRING_KEY1"); + ASSERT_EQ(keys[1], "GP2_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 2, "*", 2, &keys); + ASSERT_EQ(cursor, 4); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_STRING_KEY3"); + ASSERT_EQ(keys[1], "GP2_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 4, "*", 2, &keys); + ASSERT_EQ(cursor, 6); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_HASH_KEY2"); + ASSERT_EQ(keys[1], "GP2_SCAN_CASE_ALL_HASH_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 6, "*", 2, &keys); + ASSERT_EQ(cursor, 8); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(keys[1], "GP2_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 8, "*", 2, &keys); + ASSERT_EQ(cursor, 10); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_SET_KEY3"); + ASSERT_EQ(keys[1], "GP2_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 10, "*", 2, &keys); + ASSERT_EQ(cursor, 12); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_LIST_KEY2"); + ASSERT_EQ(keys[1], "GP2_SCAN_CASE_ALL_LIST_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 12, "*", 2, &keys); + ASSERT_EQ(cursor, 14); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_ZSET_KEY1"); + ASSERT_EQ(keys[1], "GP2_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 14, "*", 2, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 1); + ASSERT_EQ(keys[0], "GP2_SCAN_CASE_ALL_ZSET_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 3 Test ***************** + // String + s = db.Set("GP3_SCAN_CASE_ALL_STRING_KEY1", "GP3_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP3_SCAN_CASE_ALL_STRING_KEY2", "GP3_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP3_SCAN_CASE_ALL_STRING_KEY3", "GP3_SCAN_CASE_ALL_STRING_VALUE3"); + + // Hash + s = db.HSet("GP3_SCAN_CASE_ALL_HASH_KEY1", "GP3_SCAN_CASE_ALL_HASH_FIELD1", "GP3_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP3_SCAN_CASE_ALL_HASH_KEY2", "GP3_SCAN_CASE_ALL_HASH_FIELD2", "GP3_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP3_SCAN_CASE_ALL_HASH_KEY3", "GP3_SCAN_CASE_ALL_HASH_FIELD3", "GP3_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + + // Set + s = db.SAdd("GP3_SCAN_CASE_ALL_SET_KEY1", {"GP3_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP3_SCAN_CASE_ALL_SET_KEY2", {"GP3_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP3_SCAN_CASE_ALL_SET_KEY3", {"GP3_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + + // List + s = db.LPush("GP3_SCAN_CASE_ALL_LIST_KEY1", {"GP3_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP3_SCAN_CASE_ALL_LIST_KEY2", {"GP3_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP3_SCAN_CASE_ALL_LIST_KEY3", {"GP3_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + + // ZSet + s = db.ZAdd("GP3_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP3_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP3_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP3_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP3_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP3_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + + // Scan + delete_keys.clear(); + keys.clear(); + cursor = db.Scan(DataType::kAll, 0, "*", 5, &keys); + ASSERT_EQ(cursor, 5); + ASSERT_EQ(keys.size(), 5); + ASSERT_EQ(keys[0], "GP3_SCAN_CASE_ALL_STRING_KEY1"); + ASSERT_EQ(keys[1], "GP3_SCAN_CASE_ALL_STRING_KEY2"); + ASSERT_EQ(keys[2], "GP3_SCAN_CASE_ALL_STRING_KEY3"); + ASSERT_EQ(keys[3], "GP3_SCAN_CASE_ALL_HASH_KEY1"); + ASSERT_EQ(keys[4], "GP3_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 5, "*", 5, &keys); + ASSERT_EQ(cursor, 10); + ASSERT_EQ(keys.size(), 5); + ASSERT_EQ(keys[0], "GP3_SCAN_CASE_ALL_HASH_KEY3"); + ASSERT_EQ(keys[1], "GP3_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(keys[2], "GP3_SCAN_CASE_ALL_SET_KEY2"); + ASSERT_EQ(keys[3], "GP3_SCAN_CASE_ALL_SET_KEY3"); + ASSERT_EQ(keys[4], "GP3_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + keys.clear(); + cursor = db.Scan(DataType::kAll, 10, "*", 5, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 5); + ASSERT_EQ(keys[0], "GP3_SCAN_CASE_ALL_LIST_KEY2"); + ASSERT_EQ(keys[1], "GP3_SCAN_CASE_ALL_LIST_KEY3"); + ASSERT_EQ(keys[2], "GP3_SCAN_CASE_ALL_ZSET_KEY1"); + ASSERT_EQ(keys[3], "GP3_SCAN_CASE_ALL_ZSET_KEY2"); + ASSERT_EQ(keys[4], "GP3_SCAN_CASE_ALL_ZSET_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 4 Test ***************** + // String + s = db.Set("GP4_SCAN_CASE_ALL_STRING_KEY1", "GP4_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP4_SCAN_CASE_ALL_STRING_KEY2", "GP4_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP4_SCAN_CASE_ALL_STRING_KEY3", "GP4_SCAN_CASE_ALL_STRING_VALUE3"); + + // Hash + s = db.HSet("GP4_SCAN_CASE_ALL_HASH_KEY1", "GP4_SCAN_CASE_ALL_HASH_FIELD1", "GP4_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP4_SCAN_CASE_ALL_HASH_KEY2", "GP4_SCAN_CASE_ALL_HASH_FIELD2", "GP4_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP4_SCAN_CASE_ALL_HASH_KEY3", "GP4_SCAN_CASE_ALL_HASH_FIELD3", "GP4_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + + // Set + s = db.SAdd("GP4_SCAN_CASE_ALL_SET_KEY1", {"GP4_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP4_SCAN_CASE_ALL_SET_KEY2", {"GP4_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP4_SCAN_CASE_ALL_SET_KEY3", {"GP4_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + + // List + s = db.LPush("GP4_SCAN_CASE_ALL_LIST_KEY1", {"GP4_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP4_SCAN_CASE_ALL_LIST_KEY2", {"GP4_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP4_SCAN_CASE_ALL_LIST_KEY3", {"GP4_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + + // ZSet + s = db.ZAdd("GP4_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP4_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP4_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP4_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP4_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP4_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + + delete_keys.clear(); + keys.clear(); + cursor = db.Scan(DataType::kAll, 0, "*", 15, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 15); + ASSERT_EQ(keys[0], "GP4_SCAN_CASE_ALL_STRING_KEY1"); + ASSERT_EQ(keys[1], "GP4_SCAN_CASE_ALL_STRING_KEY2"); + ASSERT_EQ(keys[2], "GP4_SCAN_CASE_ALL_STRING_KEY3"); + ASSERT_EQ(keys[3], "GP4_SCAN_CASE_ALL_HASH_KEY1"); + ASSERT_EQ(keys[4], "GP4_SCAN_CASE_ALL_HASH_KEY2"); + ASSERT_EQ(keys[5], "GP4_SCAN_CASE_ALL_HASH_KEY3"); + ASSERT_EQ(keys[6], "GP4_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(keys[7], "GP4_SCAN_CASE_ALL_SET_KEY2"); + ASSERT_EQ(keys[8], "GP4_SCAN_CASE_ALL_SET_KEY3"); + ASSERT_EQ(keys[9], "GP4_SCAN_CASE_ALL_LIST_KEY1"); + ASSERT_EQ(keys[10], "GP4_SCAN_CASE_ALL_LIST_KEY2"); + ASSERT_EQ(keys[11], "GP4_SCAN_CASE_ALL_LIST_KEY3"); + ASSERT_EQ(keys[12], "GP4_SCAN_CASE_ALL_ZSET_KEY1"); + ASSERT_EQ(keys[13], "GP4_SCAN_CASE_ALL_ZSET_KEY2"); + ASSERT_EQ(keys[14], "GP4_SCAN_CASE_ALL_ZSET_KEY3"); + delete_keys.insert(delete_keys.end(), keys.begin(), keys.end()); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 5 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP5_SCAN_CASE_ALL_STRING_KEY1", "GP5_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP5_SCAN_CASE_ALL_STRING_KEY2", "GP5_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP5_SCAN_CASE_ALL_STRING_KEY3", "GP5_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP5_SCAN_CASE_ALL_HASH_KEY1", "GP5_SCAN_CASE_ALL_HASH_FIELD1", "GP5_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP5_SCAN_CASE_ALL_HASH_KEY2", "GP5_SCAN_CASE_ALL_HASH_FIELD2", "GP5_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP5_SCAN_CASE_ALL_HASH_KEY3", "GP5_SCAN_CASE_ALL_HASH_FIELD3", "GP5_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP5_SCAN_CASE_ALL_SET_KEY1", {"GP5_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP5_SCAN_CASE_ALL_SET_KEY2", {"GP5_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP5_SCAN_CASE_ALL_SET_KEY3", {"GP5_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP5_SCAN_CASE_ALL_LIST_KEY1", {"GP5_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP5_SCAN_CASE_ALL_LIST_KEY2", {"GP5_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP5_SCAN_CASE_ALL_LIST_KEY3", {"GP5_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP5_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP5_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP5_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP5_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP5_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP5_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP5_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "*_SET_*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 3); + ASSERT_EQ(total_keys[0], "GP5_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(total_keys[1], "GP5_SCAN_CASE_ALL_SET_KEY2"); + ASSERT_EQ(total_keys[2], "GP5_SCAN_CASE_ALL_SET_KEY3"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 6 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP6_SCAN_CASE_ALL_STRING_KEY1", "GP6_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP6_SCAN_CASE_ALL_STRING_KEY2", "GP6_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP6_SCAN_CASE_ALL_STRING_KEY3", "GP6_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP6_SCAN_CASE_ALL_HASH_KEY1", "GP6_SCAN_CASE_ALL_HASH_FIELD1", "GP6_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP6_SCAN_CASE_ALL_HASH_KEY2", "GP6_SCAN_CASE_ALL_HASH_FIELD2", "GP6_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP6_SCAN_CASE_ALL_HASH_KEY3", "GP6_SCAN_CASE_ALL_HASH_FIELD3", "GP6_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP6_SCAN_CASE_ALL_SET_KEY1", {"GP6_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP6_SCAN_CASE_ALL_SET_KEY2", {"GP6_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP6_SCAN_CASE_ALL_SET_KEY3", {"GP6_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP6_SCAN_CASE_ALL_LIST_KEY1", {"GP6_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP6_SCAN_CASE_ALL_LIST_KEY2", {"GP6_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP6_SCAN_CASE_ALL_LIST_KEY3", {"GP6_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP6_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP6_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP6_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP6_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP6_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP6_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP6_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "*KEY1", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 5); + ASSERT_EQ(total_keys[0], "GP6_SCAN_CASE_ALL_STRING_KEY1"); + ASSERT_EQ(total_keys[1], "GP6_SCAN_CASE_ALL_HASH_KEY1"); + ASSERT_EQ(total_keys[2], "GP6_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(total_keys[3], "GP6_SCAN_CASE_ALL_LIST_KEY1"); + ASSERT_EQ(total_keys[4], "GP6_SCAN_CASE_ALL_ZSET_KEY1"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 7 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP7_SCAN_CASE_ALL_STRING_KEY1", "GP7_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP7_SCAN_CASE_ALL_STRING_KEY2", "GP7_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP7_SCAN_CASE_ALL_STRING_KEY3", "GP7_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP7_SCAN_CASE_ALL_HASH_KEY1", "GP7_SCAN_CASE_ALL_HASH_FIELD1", "GP7_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP7_SCAN_CASE_ALL_HASH_KEY2", "GP7_SCAN_CASE_ALL_HASH_FIELD2", "GP7_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP7_SCAN_CASE_ALL_HASH_KEY3", "GP7_SCAN_CASE_ALL_HASH_FIELD3", "GP7_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP7_SCAN_CASE_ALL_SET_KEY1", {"GP7_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP7_SCAN_CASE_ALL_SET_KEY2", {"GP7_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP7_SCAN_CASE_ALL_SET_KEY3", {"GP7_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP7_SCAN_CASE_ALL_LIST_KEY1", {"GP7_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP7_SCAN_CASE_ALL_LIST_KEY2", {"GP7_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP7_SCAN_CASE_ALL_LIST_KEY3", {"GP7_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP7_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP7_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP7_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP7_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP7_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP7_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP7_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "*KEY2", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 5); + ASSERT_EQ(total_keys[0], "GP7_SCAN_CASE_ALL_STRING_KEY2"); + ASSERT_EQ(total_keys[1], "GP7_SCAN_CASE_ALL_HASH_KEY2"); + ASSERT_EQ(total_keys[2], "GP7_SCAN_CASE_ALL_SET_KEY2"); + ASSERT_EQ(total_keys[3], "GP7_SCAN_CASE_ALL_LIST_KEY2"); + ASSERT_EQ(total_keys[4], "GP7_SCAN_CASE_ALL_ZSET_KEY2"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 8 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP8_SCAN_CASE_ALL_STRING_KEY1", "GP8_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP8_SCAN_CASE_ALL_STRING_KEY2", "GP8_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP8_SCAN_CASE_ALL_STRING_KEY3", "GP8_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP8_SCAN_CASE_ALL_HASH_KEY1", "GP8_SCAN_CASE_ALL_HASH_FIELD1", "GP8_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP8_SCAN_CASE_ALL_HASH_KEY2", "GP8_SCAN_CASE_ALL_HASH_FIELD2", "GP8_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP8_SCAN_CASE_ALL_HASH_KEY3", "GP8_SCAN_CASE_ALL_HASH_FIELD3", "GP8_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP8_SCAN_CASE_ALL_SET_KEY1", {"GP8_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP8_SCAN_CASE_ALL_SET_KEY2", {"GP8_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP8_SCAN_CASE_ALL_SET_KEY3", {"GP8_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP8_SCAN_CASE_ALL_LIST_KEY1", {"GP8_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP8_SCAN_CASE_ALL_LIST_KEY2", {"GP8_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP8_SCAN_CASE_ALL_LIST_KEY3", {"GP8_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP8_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP8_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP8_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP8_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP8_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP8_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP8_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "*KEY3", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 5); + ASSERT_EQ(total_keys[0], "GP8_SCAN_CASE_ALL_STRING_KEY3"); + ASSERT_EQ(total_keys[1], "GP8_SCAN_CASE_ALL_HASH_KEY3"); + ASSERT_EQ(total_keys[2], "GP8_SCAN_CASE_ALL_SET_KEY3"); + ASSERT_EQ(total_keys[3], "GP8_SCAN_CASE_ALL_LIST_KEY3"); + ASSERT_EQ(total_keys[4], "GP8_SCAN_CASE_ALL_ZSET_KEY3"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 9 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP9_SCAN_CASE_ALL_STRING_KEY1", "GP9_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP9_SCAN_CASE_ALL_STRING_KEY2", "GP9_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP9_SCAN_CASE_ALL_STRING_KEY3", "GP9_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP9_SCAN_CASE_ALL_HASH_KEY1", "GP9_SCAN_CASE_ALL_HASH_FIELD1", "GP9_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP9_SCAN_CASE_ALL_HASH_KEY2", "GP9_SCAN_CASE_ALL_HASH_FIELD2", "GP9_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP9_SCAN_CASE_ALL_HASH_KEY3", "GP9_SCAN_CASE_ALL_HASH_FIELD3", "GP9_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP9_SCAN_CASE_ALL_SET_KEY1", {"GP9_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP9_SCAN_CASE_ALL_SET_KEY2", {"GP9_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP9_SCAN_CASE_ALL_SET_KEY3", {"GP9_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP9_SCAN_CASE_ALL_LIST_KEY1", {"GP9_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP9_SCAN_CASE_ALL_LIST_KEY2", {"GP9_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP9_SCAN_CASE_ALL_LIST_KEY3", {"GP9_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP9_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP9_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP9_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP9_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP9_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP9_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP9_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP9*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 15); + ASSERT_EQ(total_keys[0], "GP9_SCAN_CASE_ALL_STRING_KEY1"); + ASSERT_EQ(total_keys[1], "GP9_SCAN_CASE_ALL_STRING_KEY2"); + ASSERT_EQ(total_keys[2], "GP9_SCAN_CASE_ALL_STRING_KEY3"); + ASSERT_EQ(total_keys[3], "GP9_SCAN_CASE_ALL_HASH_KEY1"); + ASSERT_EQ(total_keys[4], "GP9_SCAN_CASE_ALL_HASH_KEY2"); + ASSERT_EQ(total_keys[5], "GP9_SCAN_CASE_ALL_HASH_KEY3"); + ASSERT_EQ(total_keys[6], "GP9_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(total_keys[7], "GP9_SCAN_CASE_ALL_SET_KEY2"); + ASSERT_EQ(total_keys[8], "GP9_SCAN_CASE_ALL_SET_KEY3"); + ASSERT_EQ(total_keys[9], "GP9_SCAN_CASE_ALL_LIST_KEY1"); + ASSERT_EQ(total_keys[10], "GP9_SCAN_CASE_ALL_LIST_KEY2"); + ASSERT_EQ(total_keys[11], "GP9_SCAN_CASE_ALL_LIST_KEY3"); + ASSERT_EQ(total_keys[12], "GP9_SCAN_CASE_ALL_ZSET_KEY1"); + ASSERT_EQ(total_keys[13], "GP9_SCAN_CASE_ALL_ZSET_KEY2"); + ASSERT_EQ(total_keys[14], "GP9_SCAN_CASE_ALL_ZSET_KEY3"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 10 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP10_SCAN_CASE_ALL_STRING_KEY1", "GP10_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP10_SCAN_CASE_ALL_STRING_KEY2", "GP10_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP10_SCAN_CASE_ALL_STRING_KEY3", "GP10_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP10_SCAN_CASE_ALL_HASH_KEY1", "GP10_SCAN_CASE_ALL_HASH_FIELD1", "GP10_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP10_SCAN_CASE_ALL_HASH_KEY2", "GP10_SCAN_CASE_ALL_HASH_FIELD2", "GP10_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP10_SCAN_CASE_ALL_HASH_KEY3", "GP10_SCAN_CASE_ALL_HASH_FIELD3", "GP10_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP10_SCAN_CASE_ALL_SET_KEY1", {"GP10_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP10_SCAN_CASE_ALL_SET_KEY2", {"GP10_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP10_SCAN_CASE_ALL_SET_KEY3", {"GP10_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP10_SCAN_CASE_ALL_LIST_KEY1", {"GP10_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP10_SCAN_CASE_ALL_LIST_KEY2", {"GP10_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP10_SCAN_CASE_ALL_LIST_KEY3", {"GP10_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP10_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP10_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP10_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP10_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP10_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP10_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP10_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP10_SCAN_CASE_ALL_STRING_*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 3); + ASSERT_EQ(total_keys[0], "GP10_SCAN_CASE_ALL_STRING_KEY1"); + ASSERT_EQ(total_keys[1], "GP10_SCAN_CASE_ALL_STRING_KEY2"); + ASSERT_EQ(total_keys[2], "GP10_SCAN_CASE_ALL_STRING_KEY3"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 11 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP11_SCAN_CASE_ALL_STRING_KEY1", "GP11_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP11_SCAN_CASE_ALL_STRING_KEY2", "GP11_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP11_SCAN_CASE_ALL_STRING_KEY3", "GP11_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP11_SCAN_CASE_ALL_HASH_KEY1", "GP11_SCAN_CASE_ALL_HASH_FIELD1", "GP11_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP11_SCAN_CASE_ALL_HASH_KEY2", "GP11_SCAN_CASE_ALL_HASH_FIELD2", "GP11_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP11_SCAN_CASE_ALL_HASH_KEY3", "GP11_SCAN_CASE_ALL_HASH_FIELD3", "GP11_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP11_SCAN_CASE_ALL_SET_KEY1", {"GP11_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP11_SCAN_CASE_ALL_SET_KEY2", {"GP11_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP11_SCAN_CASE_ALL_SET_KEY3", {"GP11_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP11_SCAN_CASE_ALL_LIST_KEY1", {"GP11_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP11_SCAN_CASE_ALL_LIST_KEY2", {"GP11_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP11_SCAN_CASE_ALL_LIST_KEY3", {"GP11_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP11_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP11_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP11_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP11_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP11_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP11_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP11_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP11_SCAN_CASE_ALL_SET_*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 3); + ASSERT_EQ(total_keys[0], "GP11_SCAN_CASE_ALL_SET_KEY1"); + ASSERT_EQ(total_keys[1], "GP11_SCAN_CASE_ALL_SET_KEY2"); + ASSERT_EQ(total_keys[2], "GP11_SCAN_CASE_ALL_SET_KEY3"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 12 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP12_SCAN_CASE_ALL_STRING_KEY1", "GP12_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP12_SCAN_CASE_ALL_STRING_KEY2", "GP12_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP12_SCAN_CASE_ALL_STRING_KEY3", "GP12_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_STRING_KEY1"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_STRING_KEY2"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_STRING_KEY3"); + + // Hash + s = db.HSet("GP12_SCAN_CASE_ALL_HASH_KEY1", "GP12_SCAN_CASE_ALL_HASH_FIELD1", "GP12_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP12_SCAN_CASE_ALL_HASH_KEY2", "GP12_SCAN_CASE_ALL_HASH_FIELD2", "GP12_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP12_SCAN_CASE_ALL_HASH_KEY3", "GP12_SCAN_CASE_ALL_HASH_FIELD3", "GP12_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_HASH_KEY1"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_HASH_KEY2"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_HASH_KEY3"); + + // Set + s = db.SAdd("GP12_SCAN_CASE_ALL_SET_KEY1", {"GP12_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP12_SCAN_CASE_ALL_SET_KEY2", {"GP12_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP12_SCAN_CASE_ALL_SET_KEY3", {"GP12_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_SET_KEY1"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_SET_KEY2"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_SET_KEY3"); + + // List + s = db.LPush("GP12_SCAN_CASE_ALL_LIST_KEY1", {"GP12_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP12_SCAN_CASE_ALL_LIST_KEY2", {"GP12_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP12_SCAN_CASE_ALL_LIST_KEY3", {"GP12_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_LIST_KEY1"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_LIST_KEY2"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_LIST_KEY3"); + + // ZSet + s = db.ZAdd("GP12_SCAN_CASE_ALL_ZSET_KEY1", {{1, "GP12_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP12_SCAN_CASE_ALL_ZSET_KEY2", {{1, "GP12_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP12_SCAN_CASE_ALL_ZSET_KEY3", {{1, "GP12_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_ZSET_KEY1"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_ZSET_KEY2"); + delete_keys.emplace_back("GP12_SCAN_CASE_ALL_ZSET_KEY3"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP12_SCAN_CASE_ALL_ZSET_*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 3); + ASSERT_EQ(total_keys[0], "GP12_SCAN_CASE_ALL_ZSET_KEY1"); + ASSERT_EQ(total_keys[1], "GP12_SCAN_CASE_ALL_ZSET_KEY2"); + ASSERT_EQ(total_keys[2], "GP12_SCAN_CASE_ALL_ZSET_KEY3"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 13 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP13_KEY1_SCAN_CASE_ALL_STRING", "GP13_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP13_KEY2_SCAN_CASE_ALL_STRING", "GP13_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP13_KEY3_SCAN_CASE_ALL_STRING", "GP13_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP13_KEY1_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP13_KEY2_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP13_KEY3_SCAN_CASE_ALL_STRING"); + + // Hash + s = db.HSet("GP13_KEY1_SCAN_CASE_ALL_HASH", "GP13_SCAN_CASE_ALL_HASH_FIELD1", "GP13_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP13_KEY2_SCAN_CASE_ALL_HASH", "GP13_SCAN_CASE_ALL_HASH_FIELD2", "GP13_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP13_KEY3_SCAN_CASE_ALL_HASH", "GP13_SCAN_CASE_ALL_HASH_FIELD3", "GP13_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP13_KEY1_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP13_KEY2_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP13_KEY3_SCAN_CASE_ALL_HASH"); + + // Set + s = db.SAdd("GP13_KEY1_SCAN_CASE_ALL_SET", {"GP13_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP13_KEY2_SCAN_CASE_ALL_SET", {"GP13_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP13_KEY3_SCAN_CASE_ALL_SET", {"GP13_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP13_KEY1_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP13_KEY2_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP13_KEY3_SCAN_CASE_ALL_SET"); + + // List + s = db.LPush("GP13_KEY1_SCAN_CASE_ALL_LIST", {"GP13_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP13_KEY2_SCAN_CASE_ALL_LIST", {"GP13_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP13_KEY3_SCAN_CASE_ALL_LIST", {"GP13_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP13_KEY1_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP13_KEY2_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP13_KEY3_SCAN_CASE_ALL_LIST"); + + // ZSet + s = db.ZAdd("GP13_KEY1_SCAN_CASE_ALL_ZSET", {{1, "GP13_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP13_KEY2_SCAN_CASE_ALL_ZSET", {{1, "GP13_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP13_KEY3_SCAN_CASE_ALL_ZSET", {{1, "GP13_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP13_KEY1_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP13_KEY2_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP13_KEY3_SCAN_CASE_ALL_ZSET"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP13_KEY1_SCAN_CASE_ALL_*", 1, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 5); + ASSERT_EQ(total_keys[0], "GP13_KEY1_SCAN_CASE_ALL_STRING"); + ASSERT_EQ(total_keys[1], "GP13_KEY1_SCAN_CASE_ALL_HASH"); + ASSERT_EQ(total_keys[2], "GP13_KEY1_SCAN_CASE_ALL_SET"); + ASSERT_EQ(total_keys[3], "GP13_KEY1_SCAN_CASE_ALL_LIST"); + ASSERT_EQ(total_keys[4], "GP13_KEY1_SCAN_CASE_ALL_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 14 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP14_KEY1_SCAN_CASE_ALL_STRING", "GP14_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP14_KEY2_SCAN_CASE_ALL_STRING", "GP14_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP14_KEY3_SCAN_CASE_ALL_STRING", "GP14_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP14_KEY1_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP14_KEY2_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP14_KEY3_SCAN_CASE_ALL_STRING"); + + // Hash + s = db.HSet("GP14_KEY1_SCAN_CASE_ALL_HASH", "GP14_SCAN_CASE_ALL_HASH_FIELD1", "GP14_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP14_KEY2_SCAN_CASE_ALL_HASH", "GP14_SCAN_CASE_ALL_HASH_FIELD2", "GP14_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP14_KEY3_SCAN_CASE_ALL_HASH", "GP14_SCAN_CASE_ALL_HASH_FIELD3", "GP14_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP14_KEY1_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP14_KEY2_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP14_KEY3_SCAN_CASE_ALL_HASH"); + + // Set + s = db.SAdd("GP14_KEY1_SCAN_CASE_ALL_SET", {"GP14_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP14_KEY2_SCAN_CASE_ALL_SET", {"GP14_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP14_KEY3_SCAN_CASE_ALL_SET", {"GP14_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP14_KEY1_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP14_KEY2_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP14_KEY3_SCAN_CASE_ALL_SET"); + + // List + s = db.LPush("GP14_KEY1_SCAN_CASE_ALL_LIST", {"GP14_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP14_KEY2_SCAN_CASE_ALL_LIST", {"GP14_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP14_KEY3_SCAN_CASE_ALL_LIST", {"GP14_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP14_KEY1_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP14_KEY2_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP14_KEY3_SCAN_CASE_ALL_LIST"); + + // ZSet + s = db.ZAdd("GP14_KEY1_SCAN_CASE_ALL_ZSET", {{1, "GP14_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP14_KEY2_SCAN_CASE_ALL_ZSET", {{1, "GP14_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP14_KEY3_SCAN_CASE_ALL_ZSET", {{1, "GP14_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP14_KEY1_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP14_KEY2_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP14_KEY3_SCAN_CASE_ALL_ZSET"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP14_KEY1_SCAN_CASE_ALL_*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 5); + ASSERT_EQ(total_keys[0], "GP14_KEY1_SCAN_CASE_ALL_STRING"); + ASSERT_EQ(total_keys[1], "GP14_KEY1_SCAN_CASE_ALL_HASH"); + ASSERT_EQ(total_keys[2], "GP14_KEY1_SCAN_CASE_ALL_SET"); + ASSERT_EQ(total_keys[3], "GP14_KEY1_SCAN_CASE_ALL_LIST"); + ASSERT_EQ(total_keys[4], "GP14_KEY1_SCAN_CASE_ALL_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 15 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP15_KEY1_SCAN_CASE_ALL_STRING", "GP15_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP15_KEY2_SCAN_CASE_ALL_STRING", "GP15_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP15_KEY3_SCAN_CASE_ALL_STRING", "GP15_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP15_KEY1_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP15_KEY2_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP15_KEY3_SCAN_CASE_ALL_STRING"); + + // Hash + s = db.HSet("GP15_KEY1_SCAN_CASE_ALL_HASH", "GP15_SCAN_CASE_ALL_HASH_FIELD1", "GP15_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP15_KEY2_SCAN_CASE_ALL_HASH", "GP15_SCAN_CASE_ALL_HASH_FIELD2", "GP15_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP15_KEY3_SCAN_CASE_ALL_HASH", "GP15_SCAN_CASE_ALL_HASH_FIELD3", "GP15_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP15_KEY1_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP15_KEY2_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP15_KEY3_SCAN_CASE_ALL_HASH"); + + // Set + s = db.SAdd("GP15_KEY1_SCAN_CASE_ALL_SET", {"GP15_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP15_KEY2_SCAN_CASE_ALL_SET", {"GP15_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP15_KEY3_SCAN_CASE_ALL_SET", {"GP15_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP15_KEY1_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP15_KEY2_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP15_KEY3_SCAN_CASE_ALL_SET"); + + // List + s = db.LPush("GP15_KEY1_SCAN_CASE_ALL_LIST", {"GP15_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP15_KEY2_SCAN_CASE_ALL_LIST", {"GP15_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP15_KEY3_SCAN_CASE_ALL_LIST", {"GP15_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP15_KEY1_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP15_KEY2_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP15_KEY3_SCAN_CASE_ALL_LIST"); + + // ZSet + s = db.ZAdd("GP15_KEY1_SCAN_CASE_ALL_ZSET", {{1, "GP15_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP15_KEY2_SCAN_CASE_ALL_ZSET", {{1, "GP15_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP15_KEY3_SCAN_CASE_ALL_ZSET", {{1, "GP15_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP15_KEY1_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP15_KEY2_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP15_KEY3_SCAN_CASE_ALL_ZSET"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP15_KEY2_SCAN_CASE_ALL_*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 5); + ASSERT_EQ(total_keys[0], "GP15_KEY2_SCAN_CASE_ALL_STRING"); + ASSERT_EQ(total_keys[1], "GP15_KEY2_SCAN_CASE_ALL_HASH"); + ASSERT_EQ(total_keys[2], "GP15_KEY2_SCAN_CASE_ALL_SET"); + ASSERT_EQ(total_keys[3], "GP15_KEY2_SCAN_CASE_ALL_LIST"); + ASSERT_EQ(total_keys[4], "GP15_KEY2_SCAN_CASE_ALL_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 16 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP16_KEY1_SCAN_CASE_ALL_STRING", "GP16_SCAN_CASE_ALL_STRING_VALUE1"); + s = db.Set("GP16_KEY2_SCAN_CASE_ALL_STRING", "GP16_SCAN_CASE_ALL_STRING_VALUE2"); + s = db.Set("GP16_KEY3_SCAN_CASE_ALL_STRING", "GP16_SCAN_CASE_ALL_STRING_VALUE3"); + delete_keys.emplace_back("GP16_KEY1_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP16_KEY2_SCAN_CASE_ALL_STRING"); + delete_keys.emplace_back("GP16_KEY3_SCAN_CASE_ALL_STRING"); + + // Hash + s = db.HSet("GP16_KEY1_SCAN_CASE_ALL_HASH", "GP16_SCAN_CASE_ALL_HASH_FIELD1", "GP16_SCAN_CASE_ALL_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP16_KEY2_SCAN_CASE_ALL_HASH", "GP16_SCAN_CASE_ALL_HASH_FIELD2", "GP16_SCAN_CASE_ALL_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP16_KEY3_SCAN_CASE_ALL_HASH", "GP16_SCAN_CASE_ALL_HASH_FIELD3", "GP16_SCAN_CASE_ALL_HASH_VALUE3", + &int32_ret); + delete_keys.emplace_back("GP16_KEY1_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP16_KEY2_SCAN_CASE_ALL_HASH"); + delete_keys.emplace_back("GP16_KEY3_SCAN_CASE_ALL_HASH"); + + // Set + s = db.SAdd("GP16_KEY1_SCAN_CASE_ALL_SET", {"GP16_SCAN_CASE_ALL_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP16_KEY2_SCAN_CASE_ALL_SET", {"GP16_SCAN_CASE_ALL_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP16_KEY3_SCAN_CASE_ALL_SET", {"GP16_SCAN_CASE_ALL_SET_MEMBER3"}, &int32_ret); + delete_keys.emplace_back("GP16_KEY1_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP16_KEY2_SCAN_CASE_ALL_SET"); + delete_keys.emplace_back("GP16_KEY3_SCAN_CASE_ALL_SET"); + + // List + s = db.LPush("GP16_KEY1_SCAN_CASE_ALL_LIST", {"GP16_SCAN_CASE_ALL_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP16_KEY2_SCAN_CASE_ALL_LIST", {"GP16_SCAN_CASE_ALL_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP16_KEY3_SCAN_CASE_ALL_LIST", {"GP16_SCAN_CASE_ALL_LIST_NODE3"}, &uint64_ret); + delete_keys.emplace_back("GP16_KEY1_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP16_KEY2_SCAN_CASE_ALL_LIST"); + delete_keys.emplace_back("GP16_KEY3_SCAN_CASE_ALL_LIST"); + + // ZSet + s = db.ZAdd("GP16_KEY1_SCAN_CASE_ALL_ZSET", {{1, "GP16_SCAN_CASE_ALL_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP16_KEY2_SCAN_CASE_ALL_ZSET", {{1, "GP16_SCAN_CASE_ALL_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP16_KEY3_SCAN_CASE_ALL_ZSET", {{1, "GP16_SCAN_CASE_ALL_LIST_MEMBER3"}}, &int32_ret); + delete_keys.emplace_back("GP16_KEY1_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP16_KEY2_SCAN_CASE_ALL_ZSET"); + delete_keys.emplace_back("GP16_KEY3_SCAN_CASE_ALL_ZSET"); + + cursor = 0; + keys.clear(); + total_keys.clear(); + do { + next_cursor = db.Scan(DataType::kAll, cursor, "GP16_KEY3_SCAN_CASE_ALL_*", 5, &keys); + total_keys.insert(total_keys.end(), keys.begin(), keys.end()); + cursor = next_cursor; + } while (cursor != 0); + ASSERT_EQ(total_keys.size(), 5); + ASSERT_EQ(total_keys[0], "GP16_KEY3_SCAN_CASE_ALL_STRING"); + ASSERT_EQ(total_keys[1], "GP16_KEY3_SCAN_CASE_ALL_HASH"); + ASSERT_EQ(total_keys[2], "GP16_KEY3_SCAN_CASE_ALL_SET"); + ASSERT_EQ(total_keys[3], "GP16_KEY3_SCAN_CASE_ALL_LIST"); + ASSERT_EQ(total_keys[4], "GP16_KEY3_SCAN_CASE_ALL_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 15); + sleep(2); + db.Compact(DataType::kAll, true); +} + +// Scan +// Note: This test needs to execute at first because all of the data is +// predetermined. +TEST_F(KeysTest, ScanCaseSingleTest) { // NOLINT + int64_t cursor; + int64_t next_cursor; + int64_t del_num; + int32_t int32_ret; + uint64_t uint64_ret; + std::vector keys; + std::vector total_keys; + std::vector delete_keys; + std::map type_status; + + // ***************** Group 1 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP1_KEY1_SCAN_CASE_SINGLE_STRING", "GP1_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP1_KEY2_SCAN_CASE_SINGLE_STRING", "GP1_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP1_KEY3_SCAN_CASE_SINGLE_STRING", "GP1_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP1_KEY4_SCAN_CASE_SINGLE_STRING", "GP1_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP1_KEY5_SCAN_CASE_SINGLE_STRING", "GP1_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP1_KEY6_SCAN_CASE_SINGLE_STRING", "GP1_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP1_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP1_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP1_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP1_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP1_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP1_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP1_KEY1_SCAN_CASE_SINGLE_HASH", "GP1_SCAN_CASE_SINGLE_HASH_FIELD1", "GP1_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP1_KEY2_SCAN_CASE_SINGLE_HASH", "GP1_SCAN_CASE_SINGLE_HASH_FIELD2", "GP1_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP1_KEY3_SCAN_CASE_SINGLE_HASH", "GP1_SCAN_CASE_SINGLE_HASH_FIELD3", "GP1_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP1_KEY4_SCAN_CASE_SINGLE_HASH", "GP1_SCAN_CASE_SINGLE_HASH_FIELD4", "GP1_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP1_KEY5_SCAN_CASE_SINGLE_HASH", "GP1_SCAN_CASE_SINGLE_HASH_FIELD5", "GP1_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP1_KEY6_SCAN_CASE_SINGLE_HASH", "GP1_SCAN_CASE_SINGLE_HASH_FIELD6", "GP1_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP1_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP1_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP1_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP1_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP1_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP1_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP1_KEY1_SCAN_CASE_SINGLE_SET", {"GP1_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP1_KEY2_SCAN_CASE_SINGLE_SET", {"GP1_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP1_KEY3_SCAN_CASE_SINGLE_SET", {"GP1_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP1_KEY4_SCAN_CASE_SINGLE_SET", {"GP1_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP1_KEY5_SCAN_CASE_SINGLE_SET", {"GP1_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP1_KEY6_SCAN_CASE_SINGLE_SET", {"GP1_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP1_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP1_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP1_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP1_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP1_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP1_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP1_KEY1_SCAN_CASE_SINGLE_LIST", {"GP1_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP1_KEY2_SCAN_CASE_SINGLE_LIST", {"GP1_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP1_KEY3_SCAN_CASE_SINGLE_LIST", {"GP1_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP1_KEY4_SCAN_CASE_SINGLE_LIST", {"GP1_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP1_KEY5_SCAN_CASE_SINGLE_LIST", {"GP1_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP1_KEY6_SCAN_CASE_SINGLE_LIST", {"GP1_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP1_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP1_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP1_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP1_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP1_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP1_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP1_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP1_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP1_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP1_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP1_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP1_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP1_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP1_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP1_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP1_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP1_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP1_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP1_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP1_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP1_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP1_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP1_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP1_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kStrings, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 2); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP1_KEY1_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[1], "GP1_KEY2_SCAN_CASE_SINGLE_STRING"); + + keys.clear(); + cursor = db.Scan(DataType::kStrings, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 4); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP1_KEY3_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[1], "GP1_KEY4_SCAN_CASE_SINGLE_STRING"); + + keys.clear(); + cursor = db.Scan(DataType::kStrings, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP1_KEY5_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[1], "GP1_KEY6_SCAN_CASE_SINGLE_STRING"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 2 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP2_KEY1_SCAN_CASE_SINGLE_STRING", "GP2_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP2_KEY2_SCAN_CASE_SINGLE_STRING", "GP2_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP2_KEY3_SCAN_CASE_SINGLE_STRING", "GP2_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP2_KEY4_SCAN_CASE_SINGLE_STRING", "GP2_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP2_KEY5_SCAN_CASE_SINGLE_STRING", "GP2_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP2_KEY6_SCAN_CASE_SINGLE_STRING", "GP2_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP2_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP2_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP2_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP2_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP2_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP2_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP2_KEY1_SCAN_CASE_SINGLE_HASH", "GP2_SCAN_CASE_SINGLE_HASH_FIELD1", "GP2_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP2_KEY2_SCAN_CASE_SINGLE_HASH", "GP2_SCAN_CASE_SINGLE_HASH_FIELD2", "GP2_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP2_KEY3_SCAN_CASE_SINGLE_HASH", "GP2_SCAN_CASE_SINGLE_HASH_FIELD3", "GP2_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP2_KEY4_SCAN_CASE_SINGLE_HASH", "GP2_SCAN_CASE_SINGLE_HASH_FIELD4", "GP2_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP2_KEY5_SCAN_CASE_SINGLE_HASH", "GP2_SCAN_CASE_SINGLE_HASH_FIELD5", "GP2_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP2_KEY6_SCAN_CASE_SINGLE_HASH", "GP2_SCAN_CASE_SINGLE_HASH_FIELD6", "GP2_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP2_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP2_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP2_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP2_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP2_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP2_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP2_KEY1_SCAN_CASE_SINGLE_SET", {"GP2_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP2_KEY2_SCAN_CASE_SINGLE_SET", {"GP2_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP2_KEY3_SCAN_CASE_SINGLE_SET", {"GP2_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP2_KEY4_SCAN_CASE_SINGLE_SET", {"GP2_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP2_KEY5_SCAN_CASE_SINGLE_SET", {"GP2_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP2_KEY6_SCAN_CASE_SINGLE_SET", {"GP2_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP2_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP2_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP2_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP2_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP2_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP2_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP2_KEY1_SCAN_CASE_SINGLE_LIST", {"GP2_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP2_KEY2_SCAN_CASE_SINGLE_LIST", {"GP2_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP2_KEY3_SCAN_CASE_SINGLE_LIST", {"GP2_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP2_KEY4_SCAN_CASE_SINGLE_LIST", {"GP2_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP2_KEY5_SCAN_CASE_SINGLE_LIST", {"GP2_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP2_KEY6_SCAN_CASE_SINGLE_LIST", {"GP2_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP2_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP2_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP2_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP2_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP2_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP2_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP2_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP2_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP2_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP2_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP2_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP2_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP2_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP2_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP2_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP2_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP2_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP2_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP2_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP2_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP2_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP2_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP2_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP2_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kStrings, cursor, "*", 4, &keys); + ASSERT_EQ(cursor, 4); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(keys[0], "GP2_KEY1_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[1], "GP2_KEY2_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[2], "GP2_KEY3_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[3], "GP2_KEY4_SCAN_CASE_SINGLE_STRING"); + + keys.clear(); + cursor = db.Scan(DataType::kStrings, cursor, "*", 4, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP2_KEY5_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[1], "GP2_KEY6_SCAN_CASE_SINGLE_STRING"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 3 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP3_KEY1_SCAN_CASE_SINGLE_STRING", "GP3_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP3_KEY2_SCAN_CASE_SINGLE_STRING", "GP3_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP3_KEY3_SCAN_CASE_SINGLE_STRING", "GP3_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP3_KEY4_SCAN_CASE_SINGLE_STRING", "GP3_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP3_KEY5_SCAN_CASE_SINGLE_STRING", "GP3_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP3_KEY6_SCAN_CASE_SINGLE_STRING", "GP3_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP3_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP3_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP3_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP3_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP3_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP3_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP3_KEY1_SCAN_CASE_SINGLE_HASH", "GP3_SCAN_CASE_SINGLE_HASH_FIELD1", "GP3_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP3_KEY2_SCAN_CASE_SINGLE_HASH", "GP3_SCAN_CASE_SINGLE_HASH_FIELD2", "GP3_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP3_KEY3_SCAN_CASE_SINGLE_HASH", "GP3_SCAN_CASE_SINGLE_HASH_FIELD3", "GP3_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP3_KEY4_SCAN_CASE_SINGLE_HASH", "GP3_SCAN_CASE_SINGLE_HASH_FIELD4", "GP3_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP3_KEY5_SCAN_CASE_SINGLE_HASH", "GP3_SCAN_CASE_SINGLE_HASH_FIELD5", "GP3_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP3_KEY6_SCAN_CASE_SINGLE_HASH", "GP3_SCAN_CASE_SINGLE_HASH_FIELD6", "GP3_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP3_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP3_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP3_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP3_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP3_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP3_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP3_KEY1_SCAN_CASE_SINGLE_SET", {"GP3_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP3_KEY2_SCAN_CASE_SINGLE_SET", {"GP3_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP3_KEY3_SCAN_CASE_SINGLE_SET", {"GP3_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP3_KEY4_SCAN_CASE_SINGLE_SET", {"GP3_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP3_KEY5_SCAN_CASE_SINGLE_SET", {"GP3_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP3_KEY6_SCAN_CASE_SINGLE_SET", {"GP3_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP3_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP3_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP3_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP3_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP3_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP3_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP3_KEY1_SCAN_CASE_SINGLE_LIST", {"GP3_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP3_KEY2_SCAN_CASE_SINGLE_LIST", {"GP3_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP3_KEY3_SCAN_CASE_SINGLE_LIST", {"GP3_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP3_KEY4_SCAN_CASE_SINGLE_LIST", {"GP3_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP3_KEY5_SCAN_CASE_SINGLE_LIST", {"GP3_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP3_KEY6_SCAN_CASE_SINGLE_LIST", {"GP3_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP3_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP3_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP3_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP3_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP3_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP3_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP3_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP3_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP3_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP3_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP3_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP3_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP3_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP3_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP3_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP3_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP3_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP3_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP3_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP3_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP3_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP3_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP3_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP3_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kStrings, cursor, "*", 6, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 6); + ASSERT_EQ(keys[0], "GP3_KEY1_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[1], "GP3_KEY2_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[2], "GP3_KEY3_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[3], "GP3_KEY4_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[4], "GP3_KEY5_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[5], "GP3_KEY6_SCAN_CASE_SINGLE_STRING"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 4 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP4_KEY1_SCAN_CASE_SINGLE_STRING", "GP4_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP4_KEY2_SCAN_CASE_SINGLE_STRING", "GP4_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP4_KEY3_SCAN_CASE_SINGLE_STRING", "GP4_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP4_KEY4_SCAN_CASE_SINGLE_STRING", "GP4_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP4_KEY5_SCAN_CASE_SINGLE_STRING", "GP4_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP4_KEY6_SCAN_CASE_SINGLE_STRING", "GP4_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP4_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP4_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP4_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP4_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP4_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP4_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP4_KEY1_SCAN_CASE_SINGLE_HASH", "GP4_SCAN_CASE_SINGLE_HASH_FIELD1", "GP4_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP4_KEY2_SCAN_CASE_SINGLE_HASH", "GP4_SCAN_CASE_SINGLE_HASH_FIELD2", "GP4_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP4_KEY3_SCAN_CASE_SINGLE_HASH", "GP4_SCAN_CASE_SINGLE_HASH_FIELD3", "GP4_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP4_KEY4_SCAN_CASE_SINGLE_HASH", "GP4_SCAN_CASE_SINGLE_HASH_FIELD4", "GP4_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP4_KEY5_SCAN_CASE_SINGLE_HASH", "GP4_SCAN_CASE_SINGLE_HASH_FIELD5", "GP4_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP4_KEY6_SCAN_CASE_SINGLE_HASH", "GP4_SCAN_CASE_SINGLE_HASH_FIELD6", "GP4_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP4_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP4_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP4_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP4_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP4_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP4_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP4_KEY1_SCAN_CASE_SINGLE_SET", {"GP4_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP4_KEY2_SCAN_CASE_SINGLE_SET", {"GP4_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP4_KEY3_SCAN_CASE_SINGLE_SET", {"GP4_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP4_KEY4_SCAN_CASE_SINGLE_SET", {"GP4_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP4_KEY5_SCAN_CASE_SINGLE_SET", {"GP4_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP4_KEY6_SCAN_CASE_SINGLE_SET", {"GP4_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP4_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP4_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP4_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP4_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP4_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP4_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP4_KEY1_SCAN_CASE_SINGLE_LIST", {"GP4_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP4_KEY2_SCAN_CASE_SINGLE_LIST", {"GP4_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP4_KEY3_SCAN_CASE_SINGLE_LIST", {"GP4_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP4_KEY4_SCAN_CASE_SINGLE_LIST", {"GP4_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP4_KEY5_SCAN_CASE_SINGLE_LIST", {"GP4_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP4_KEY6_SCAN_CASE_SINGLE_LIST", {"GP4_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP4_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP4_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP4_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP4_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP4_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP4_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP4_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP4_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP4_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP4_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP4_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP4_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP4_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP4_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP4_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP4_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP4_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP4_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP4_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP4_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP4_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP4_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP4_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP4_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kStrings, cursor, "*", 10, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 6); + ASSERT_EQ(keys[0], "GP4_KEY1_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[1], "GP4_KEY2_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[2], "GP4_KEY3_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[3], "GP4_KEY4_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[4], "GP4_KEY5_SCAN_CASE_SINGLE_STRING"); + ASSERT_EQ(keys[5], "GP4_KEY6_SCAN_CASE_SINGLE_STRING"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 5 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP5_KEY1_SCAN_CASE_SINGLE_STRING", "GP5_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP5_KEY2_SCAN_CASE_SINGLE_STRING", "GP5_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP5_KEY3_SCAN_CASE_SINGLE_STRING", "GP5_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP5_KEY4_SCAN_CASE_SINGLE_STRING", "GP5_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP5_KEY5_SCAN_CASE_SINGLE_STRING", "GP5_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP5_KEY6_SCAN_CASE_SINGLE_STRING", "GP5_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP5_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP5_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP5_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP5_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP5_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP5_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP5_KEY1_SCAN_CASE_SINGLE_HASH", "GP5_SCAN_CASE_SINGLE_HASH_FIELD1", "GP5_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP5_KEY2_SCAN_CASE_SINGLE_HASH", "GP5_SCAN_CASE_SINGLE_HASH_FIELD2", "GP5_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP5_KEY3_SCAN_CASE_SINGLE_HASH", "GP5_SCAN_CASE_SINGLE_HASH_FIELD3", "GP5_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP5_KEY4_SCAN_CASE_SINGLE_HASH", "GP5_SCAN_CASE_SINGLE_HASH_FIELD4", "GP5_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP5_KEY5_SCAN_CASE_SINGLE_HASH", "GP5_SCAN_CASE_SINGLE_HASH_FIELD5", "GP5_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP5_KEY6_SCAN_CASE_SINGLE_HASH", "GP5_SCAN_CASE_SINGLE_HASH_FIELD6", "GP5_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP5_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP5_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP5_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP5_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP5_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP5_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP5_KEY1_SCAN_CASE_SINGLE_SET", {"GP5_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP5_KEY2_SCAN_CASE_SINGLE_SET", {"GP5_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP5_KEY3_SCAN_CASE_SINGLE_SET", {"GP5_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP5_KEY4_SCAN_CASE_SINGLE_SET", {"GP5_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP5_KEY5_SCAN_CASE_SINGLE_SET", {"GP5_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP5_KEY6_SCAN_CASE_SINGLE_SET", {"GP5_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP5_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP5_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP5_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP5_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP5_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP5_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP5_KEY1_SCAN_CASE_SINGLE_LIST", {"GP5_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP5_KEY2_SCAN_CASE_SINGLE_LIST", {"GP5_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP5_KEY3_SCAN_CASE_SINGLE_LIST", {"GP5_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP5_KEY4_SCAN_CASE_SINGLE_LIST", {"GP5_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP5_KEY5_SCAN_CASE_SINGLE_LIST", {"GP5_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP5_KEY6_SCAN_CASE_SINGLE_LIST", {"GP5_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP5_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP5_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP5_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP5_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP5_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP5_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP5_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP5_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP5_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP5_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP5_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP5_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP5_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP5_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP5_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP5_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP5_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP5_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP5_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP5_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP5_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP5_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP5_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP5_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kSets, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 2); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_KEY1_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[1], "GP5_KEY2_SCAN_CASE_SINGLE_SET"); + + keys.clear(); + cursor = db.Scan(DataType::kSets, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 4); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_KEY3_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[1], "GP5_KEY4_SCAN_CASE_SINGLE_SET"); + + keys.clear(); + cursor = db.Scan(DataType::kSets, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP5_KEY5_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[1], "GP5_KEY6_SCAN_CASE_SINGLE_SET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 6 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP6_KEY1_SCAN_CASE_SINGLE_STRING", "GP6_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP6_KEY2_SCAN_CASE_SINGLE_STRING", "GP6_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP6_KEY3_SCAN_CASE_SINGLE_STRING", "GP6_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP6_KEY4_SCAN_CASE_SINGLE_STRING", "GP6_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP6_KEY5_SCAN_CASE_SINGLE_STRING", "GP6_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP6_KEY6_SCAN_CASE_SINGLE_STRING", "GP6_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP6_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP6_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP6_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP6_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP6_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP6_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP6_KEY1_SCAN_CASE_SINGLE_HASH", "GP6_SCAN_CASE_SINGLE_HASH_FIELD1", "GP6_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP6_KEY2_SCAN_CASE_SINGLE_HASH", "GP6_SCAN_CASE_SINGLE_HASH_FIELD2", "GP6_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP6_KEY3_SCAN_CASE_SINGLE_HASH", "GP6_SCAN_CASE_SINGLE_HASH_FIELD3", "GP6_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP6_KEY4_SCAN_CASE_SINGLE_HASH", "GP6_SCAN_CASE_SINGLE_HASH_FIELD4", "GP6_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP6_KEY5_SCAN_CASE_SINGLE_HASH", "GP6_SCAN_CASE_SINGLE_HASH_FIELD5", "GP6_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP6_KEY6_SCAN_CASE_SINGLE_HASH", "GP6_SCAN_CASE_SINGLE_HASH_FIELD6", "GP6_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP6_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP6_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP6_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP6_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP6_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP6_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP6_KEY1_SCAN_CASE_SINGLE_SET", {"GP6_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP6_KEY2_SCAN_CASE_SINGLE_SET", {"GP6_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP6_KEY3_SCAN_CASE_SINGLE_SET", {"GP6_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP6_KEY4_SCAN_CASE_SINGLE_SET", {"GP6_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP6_KEY5_SCAN_CASE_SINGLE_SET", {"GP6_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP6_KEY6_SCAN_CASE_SINGLE_SET", {"GP6_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP6_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP6_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP6_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP6_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP6_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP6_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP6_KEY1_SCAN_CASE_SINGLE_LIST", {"GP6_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP6_KEY2_SCAN_CASE_SINGLE_LIST", {"GP6_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP6_KEY3_SCAN_CASE_SINGLE_LIST", {"GP6_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP6_KEY4_SCAN_CASE_SINGLE_LIST", {"GP6_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP6_KEY5_SCAN_CASE_SINGLE_LIST", {"GP6_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP6_KEY6_SCAN_CASE_SINGLE_LIST", {"GP6_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP6_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP6_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP6_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP6_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP6_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP6_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP6_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP6_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP6_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP6_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP6_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP6_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP6_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP6_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP6_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP6_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP6_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP6_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP6_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP6_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP6_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP6_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP6_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP6_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kSets, cursor, "*", 4, &keys); + ASSERT_EQ(cursor, 4); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(keys[0], "GP6_KEY1_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[1], "GP6_KEY2_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[2], "GP6_KEY3_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[3], "GP6_KEY4_SCAN_CASE_SINGLE_SET"); + + keys.clear(); + cursor = db.Scan(DataType::kSets, cursor, "*", 4, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP6_KEY5_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[1], "GP6_KEY6_SCAN_CASE_SINGLE_SET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 7 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP7_KEY1_SCAN_CASE_SINGLE_STRING", "GP7_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP7_KEY2_SCAN_CASE_SINGLE_STRING", "GP7_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP7_KEY3_SCAN_CASE_SINGLE_STRING", "GP7_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP7_KEY4_SCAN_CASE_SINGLE_STRING", "GP7_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP7_KEY5_SCAN_CASE_SINGLE_STRING", "GP7_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP7_KEY6_SCAN_CASE_SINGLE_STRING", "GP7_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP7_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP7_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP7_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP7_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP7_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP7_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP7_KEY1_SCAN_CASE_SINGLE_HASH", "GP7_SCAN_CASE_SINGLE_HASH_FIELD1", "GP7_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP7_KEY2_SCAN_CASE_SINGLE_HASH", "GP7_SCAN_CASE_SINGLE_HASH_FIELD2", "GP7_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP7_KEY3_SCAN_CASE_SINGLE_HASH", "GP7_SCAN_CASE_SINGLE_HASH_FIELD3", "GP7_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP7_KEY4_SCAN_CASE_SINGLE_HASH", "GP7_SCAN_CASE_SINGLE_HASH_FIELD4", "GP7_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP7_KEY5_SCAN_CASE_SINGLE_HASH", "GP7_SCAN_CASE_SINGLE_HASH_FIELD5", "GP7_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP7_KEY6_SCAN_CASE_SINGLE_HASH", "GP7_SCAN_CASE_SINGLE_HASH_FIELD6", "GP7_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP7_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP7_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP7_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP7_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP7_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP7_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP7_KEY1_SCAN_CASE_SINGLE_SET", {"GP7_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP7_KEY2_SCAN_CASE_SINGLE_SET", {"GP7_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP7_KEY3_SCAN_CASE_SINGLE_SET", {"GP7_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP7_KEY4_SCAN_CASE_SINGLE_SET", {"GP7_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP7_KEY5_SCAN_CASE_SINGLE_SET", {"GP7_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP7_KEY6_SCAN_CASE_SINGLE_SET", {"GP7_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP7_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP7_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP7_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP7_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP7_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP7_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP7_KEY1_SCAN_CASE_SINGLE_LIST", {"GP7_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP7_KEY2_SCAN_CASE_SINGLE_LIST", {"GP7_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP7_KEY3_SCAN_CASE_SINGLE_LIST", {"GP7_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP7_KEY4_SCAN_CASE_SINGLE_LIST", {"GP7_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP7_KEY5_SCAN_CASE_SINGLE_LIST", {"GP7_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP7_KEY6_SCAN_CASE_SINGLE_LIST", {"GP7_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP7_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP7_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP7_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP7_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP7_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP7_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP7_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP7_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP7_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP7_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP7_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP7_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP7_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP7_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP7_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP7_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP7_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP7_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP7_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP7_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP7_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP7_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP7_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP7_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kSets, cursor, "*", 6, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 6); + ASSERT_EQ(keys[0], "GP7_KEY1_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[1], "GP7_KEY2_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[2], "GP7_KEY3_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[3], "GP7_KEY4_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[4], "GP7_KEY5_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[5], "GP7_KEY6_SCAN_CASE_SINGLE_SET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 8 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP8_KEY1_SCAN_CASE_SINGLE_STRING", "GP8_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP8_KEY2_SCAN_CASE_SINGLE_STRING", "GP8_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP8_KEY3_SCAN_CASE_SINGLE_STRING", "GP8_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP8_KEY4_SCAN_CASE_SINGLE_STRING", "GP8_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP8_KEY5_SCAN_CASE_SINGLE_STRING", "GP8_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP8_KEY6_SCAN_CASE_SINGLE_STRING", "GP8_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP8_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP8_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP8_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP8_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP8_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP8_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP8_KEY1_SCAN_CASE_SINGLE_HASH", "GP8_SCAN_CASE_SINGLE_HASH_FIELD1", "GP8_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP8_KEY2_SCAN_CASE_SINGLE_HASH", "GP8_SCAN_CASE_SINGLE_HASH_FIELD2", "GP8_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP8_KEY3_SCAN_CASE_SINGLE_HASH", "GP8_SCAN_CASE_SINGLE_HASH_FIELD3", "GP8_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP8_KEY4_SCAN_CASE_SINGLE_HASH", "GP8_SCAN_CASE_SINGLE_HASH_FIELD4", "GP8_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP8_KEY5_SCAN_CASE_SINGLE_HASH", "GP8_SCAN_CASE_SINGLE_HASH_FIELD5", "GP8_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP8_KEY6_SCAN_CASE_SINGLE_HASH", "GP8_SCAN_CASE_SINGLE_HASH_FIELD6", "GP8_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP8_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP8_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP8_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP8_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP8_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP8_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP8_KEY1_SCAN_CASE_SINGLE_SET", {"GP8_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP8_KEY2_SCAN_CASE_SINGLE_SET", {"GP8_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP8_KEY3_SCAN_CASE_SINGLE_SET", {"GP8_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP8_KEY4_SCAN_CASE_SINGLE_SET", {"GP8_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP8_KEY5_SCAN_CASE_SINGLE_SET", {"GP8_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP8_KEY6_SCAN_CASE_SINGLE_SET", {"GP8_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP8_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP8_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP8_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP8_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP8_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP8_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP8_KEY1_SCAN_CASE_SINGLE_LIST", {"GP8_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP8_KEY2_SCAN_CASE_SINGLE_LIST", {"GP8_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP8_KEY3_SCAN_CASE_SINGLE_LIST", {"GP8_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP8_KEY4_SCAN_CASE_SINGLE_LIST", {"GP8_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP8_KEY5_SCAN_CASE_SINGLE_LIST", {"GP8_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP8_KEY6_SCAN_CASE_SINGLE_LIST", {"GP8_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP8_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP8_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP8_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP8_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP8_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP8_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP8_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP8_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP8_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP8_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP8_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP8_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP8_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP8_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP8_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP8_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP8_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP8_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP8_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP8_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP8_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP8_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP8_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP8_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kSets, cursor, "*", 10, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 6); + ASSERT_EQ(keys[0], "GP8_KEY1_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[1], "GP8_KEY2_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[2], "GP8_KEY3_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[3], "GP8_KEY4_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[4], "GP8_KEY5_SCAN_CASE_SINGLE_SET"); + ASSERT_EQ(keys[5], "GP8_KEY6_SCAN_CASE_SINGLE_SET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 9 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP9_KEY1_SCAN_CASE_SINGLE_STRING", "GP9_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP9_KEY2_SCAN_CASE_SINGLE_STRING", "GP9_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP9_KEY3_SCAN_CASE_SINGLE_STRING", "GP9_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP9_KEY4_SCAN_CASE_SINGLE_STRING", "GP9_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP9_KEY5_SCAN_CASE_SINGLE_STRING", "GP9_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP9_KEY6_SCAN_CASE_SINGLE_STRING", "GP9_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP9_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP9_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP9_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP9_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP9_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP9_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP9_KEY1_SCAN_CASE_SINGLE_HASH", "GP9_SCAN_CASE_SINGLE_HASH_FIELD1", "GP9_SCAN_CASE_SINGLE_HASH_VALUE1", + &int32_ret); + s = db.HSet("GP9_KEY2_SCAN_CASE_SINGLE_HASH", "GP9_SCAN_CASE_SINGLE_HASH_FIELD2", "GP9_SCAN_CASE_SINGLE_HASH_VALUE2", + &int32_ret); + s = db.HSet("GP9_KEY3_SCAN_CASE_SINGLE_HASH", "GP9_SCAN_CASE_SINGLE_HASH_FIELD3", "GP9_SCAN_CASE_SINGLE_HASH_VALUE3", + &int32_ret); + s = db.HSet("GP9_KEY4_SCAN_CASE_SINGLE_HASH", "GP9_SCAN_CASE_SINGLE_HASH_FIELD4", "GP9_SCAN_CASE_SINGLE_HASH_VALUE4", + &int32_ret); + s = db.HSet("GP9_KEY5_SCAN_CASE_SINGLE_HASH", "GP9_SCAN_CASE_SINGLE_HASH_FIELD5", "GP9_SCAN_CASE_SINGLE_HASH_VALUE5", + &int32_ret); + s = db.HSet("GP9_KEY6_SCAN_CASE_SINGLE_HASH", "GP9_SCAN_CASE_SINGLE_HASH_FIELD6", "GP9_SCAN_CASE_SINGLE_HASH_VALUE6", + &int32_ret); + delete_keys.emplace_back("GP9_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP9_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP9_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP9_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP9_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP9_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP9_KEY1_SCAN_CASE_SINGLE_SET", {"GP9_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP9_KEY2_SCAN_CASE_SINGLE_SET", {"GP9_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP9_KEY3_SCAN_CASE_SINGLE_SET", {"GP9_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP9_KEY4_SCAN_CASE_SINGLE_SET", {"GP9_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP9_KEY5_SCAN_CASE_SINGLE_SET", {"GP9_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP9_KEY6_SCAN_CASE_SINGLE_SET", {"GP9_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP9_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP9_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP9_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP9_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP9_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP9_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP9_KEY1_SCAN_CASE_SINGLE_LIST", {"GP9_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP9_KEY2_SCAN_CASE_SINGLE_LIST", {"GP9_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP9_KEY3_SCAN_CASE_SINGLE_LIST", {"GP9_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP9_KEY4_SCAN_CASE_SINGLE_LIST", {"GP9_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP9_KEY5_SCAN_CASE_SINGLE_LIST", {"GP9_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP9_KEY6_SCAN_CASE_SINGLE_LIST", {"GP9_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP9_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP9_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP9_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP9_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP9_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP9_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP9_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP9_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP9_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP9_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP9_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP9_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP9_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP9_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP9_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP9_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP9_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP9_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP9_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP9_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP9_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP9_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP9_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP9_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kZSets, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 2); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP9_KEY1_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[1], "GP9_KEY2_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = db.Scan(DataType::kZSets, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 4); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP9_KEY3_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[1], "GP9_KEY4_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = db.Scan(DataType::kZSets, cursor, "*", 2, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP9_KEY5_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[1], "GP9_KEY6_SCAN_CASE_SINGLE_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 10 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP10_KEY1_SCAN_CASE_SINGLE_STRING", "GP10_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP10_KEY2_SCAN_CASE_SINGLE_STRING", "GP10_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP10_KEY3_SCAN_CASE_SINGLE_STRING", "GP10_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP10_KEY4_SCAN_CASE_SINGLE_STRING", "GP10_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP10_KEY5_SCAN_CASE_SINGLE_STRING", "GP10_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP10_KEY6_SCAN_CASE_SINGLE_STRING", "GP10_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP10_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP10_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP10_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP10_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP10_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP10_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP10_KEY1_SCAN_CASE_SINGLE_HASH", "GP10_SCAN_CASE_SINGLE_HASH_FIELD1", + "GP10_SCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); + s = db.HSet("GP10_KEY2_SCAN_CASE_SINGLE_HASH", "GP10_SCAN_CASE_SINGLE_HASH_FIELD2", + "GP10_SCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); + s = db.HSet("GP10_KEY3_SCAN_CASE_SINGLE_HASH", "GP10_SCAN_CASE_SINGLE_HASH_FIELD3", + "GP10_SCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); + s = db.HSet("GP10_KEY4_SCAN_CASE_SINGLE_HASH", "GP10_SCAN_CASE_SINGLE_HASH_FIELD4", + "GP10_SCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); + s = db.HSet("GP10_KEY5_SCAN_CASE_SINGLE_HASH", "GP10_SCAN_CASE_SINGLE_HASH_FIELD5", + "GP10_SCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); + s = db.HSet("GP10_KEY6_SCAN_CASE_SINGLE_HASH", "GP10_SCAN_CASE_SINGLE_HASH_FIELD6", + "GP10_SCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); + delete_keys.emplace_back("GP10_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP10_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP10_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP10_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP10_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP10_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP10_KEY1_SCAN_CASE_SINGLE_SET", {"GP10_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP10_KEY2_SCAN_CASE_SINGLE_SET", {"GP10_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP10_KEY3_SCAN_CASE_SINGLE_SET", {"GP10_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP10_KEY4_SCAN_CASE_SINGLE_SET", {"GP10_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP10_KEY5_SCAN_CASE_SINGLE_SET", {"GP10_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP10_KEY6_SCAN_CASE_SINGLE_SET", {"GP10_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP10_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP10_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP10_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP10_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP10_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP10_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP10_KEY1_SCAN_CASE_SINGLE_LIST", {"GP10_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP10_KEY2_SCAN_CASE_SINGLE_LIST", {"GP10_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP10_KEY3_SCAN_CASE_SINGLE_LIST", {"GP10_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP10_KEY4_SCAN_CASE_SINGLE_LIST", {"GP10_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP10_KEY5_SCAN_CASE_SINGLE_LIST", {"GP10_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP10_KEY6_SCAN_CASE_SINGLE_LIST", {"GP10_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP10_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP10_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP10_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP10_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP10_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP10_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP10_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP10_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP10_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP10_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP10_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP10_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP10_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP10_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP10_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP10_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP10_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP10_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP10_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP10_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP10_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP10_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP10_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP10_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kZSets, cursor, "*", 4, &keys); + ASSERT_EQ(cursor, 4); + ASSERT_EQ(keys.size(), 4); + ASSERT_EQ(keys[0], "GP10_KEY1_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[1], "GP10_KEY2_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[2], "GP10_KEY3_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[3], "GP10_KEY4_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = db.Scan(DataType::kZSets, cursor, "*", 4, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 2); + ASSERT_EQ(keys[0], "GP10_KEY5_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[1], "GP10_KEY6_SCAN_CASE_SINGLE_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 11 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP11_KEY1_SCAN_CASE_SINGLE_STRING", "GP11_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP11_KEY2_SCAN_CASE_SINGLE_STRING", "GP11_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP11_KEY3_SCAN_CASE_SINGLE_STRING", "GP11_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP11_KEY4_SCAN_CASE_SINGLE_STRING", "GP11_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP11_KEY5_SCAN_CASE_SINGLE_STRING", "GP11_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP11_KEY6_SCAN_CASE_SINGLE_STRING", "GP11_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP11_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP11_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP11_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP11_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP11_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP11_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP11_KEY1_SCAN_CASE_SINGLE_HASH", "GP11_SCAN_CASE_SINGLE_HASH_FIELD1", + "GP11_SCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); + s = db.HSet("GP11_KEY2_SCAN_CASE_SINGLE_HASH", "GP11_SCAN_CASE_SINGLE_HASH_FIELD2", + "GP11_SCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); + s = db.HSet("GP11_KEY3_SCAN_CASE_SINGLE_HASH", "GP11_SCAN_CASE_SINGLE_HASH_FIELD3", + "GP11_SCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); + s = db.HSet("GP11_KEY4_SCAN_CASE_SINGLE_HASH", "GP11_SCAN_CASE_SINGLE_HASH_FIELD4", + "GP11_SCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); + s = db.HSet("GP11_KEY5_SCAN_CASE_SINGLE_HASH", "GP11_SCAN_CASE_SINGLE_HASH_FIELD5", + "GP11_SCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); + s = db.HSet("GP11_KEY6_SCAN_CASE_SINGLE_HASH", "GP11_SCAN_CASE_SINGLE_HASH_FIELD6", + "GP11_SCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); + delete_keys.emplace_back("GP11_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP11_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP11_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP11_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP11_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP11_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP11_KEY1_SCAN_CASE_SINGLE_SET", {"GP11_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP11_KEY2_SCAN_CASE_SINGLE_SET", {"GP11_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP11_KEY3_SCAN_CASE_SINGLE_SET", {"GP11_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP11_KEY4_SCAN_CASE_SINGLE_SET", {"GP11_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP11_KEY5_SCAN_CASE_SINGLE_SET", {"GP11_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP11_KEY6_SCAN_CASE_SINGLE_SET", {"GP11_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP11_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP11_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP11_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP11_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP11_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP11_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP11_KEY1_SCAN_CASE_SINGLE_LIST", {"GP11_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP11_KEY2_SCAN_CASE_SINGLE_LIST", {"GP11_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP11_KEY3_SCAN_CASE_SINGLE_LIST", {"GP11_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP11_KEY4_SCAN_CASE_SINGLE_LIST", {"GP11_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP11_KEY5_SCAN_CASE_SINGLE_LIST", {"GP11_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP11_KEY6_SCAN_CASE_SINGLE_LIST", {"GP11_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP11_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP11_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP11_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP11_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP11_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP11_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP11_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP11_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP11_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP11_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP11_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP11_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP11_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP11_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP11_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP11_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP11_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP11_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP11_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP11_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP11_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP11_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP11_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP11_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kZSets, cursor, "*", 6, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 6); + ASSERT_EQ(keys[0], "GP11_KEY1_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[1], "GP11_KEY2_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[2], "GP11_KEY3_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[3], "GP11_KEY4_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[4], "GP11_KEY5_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[5], "GP11_KEY6_SCAN_CASE_SINGLE_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); + + // ***************** Group 12 Test ***************** + delete_keys.clear(); + // String + s = db.Set("GP12_KEY1_SCAN_CASE_SINGLE_STRING", "GP12_SCAN_CASE_SINGLE_STRING_VALUE1"); + s = db.Set("GP12_KEY2_SCAN_CASE_SINGLE_STRING", "GP12_SCAN_CASE_SINGLE_STRING_VALUE2"); + s = db.Set("GP12_KEY3_SCAN_CASE_SINGLE_STRING", "GP12_SCAN_CASE_SINGLE_STRING_VALUE3"); + s = db.Set("GP12_KEY4_SCAN_CASE_SINGLE_STRING", "GP12_SCAN_CASE_SINGLE_STRING_VALUE4"); + s = db.Set("GP12_KEY5_SCAN_CASE_SINGLE_STRING", "GP12_SCAN_CASE_SINGLE_STRING_VALUE5"); + s = db.Set("GP12_KEY6_SCAN_CASE_SINGLE_STRING", "GP12_SCAN_CASE_SINGLE_STRING_VALUE6"); + delete_keys.emplace_back("GP12_KEY1_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP12_KEY2_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP12_KEY3_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP12_KEY4_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP12_KEY5_SCAN_CASE_SINGLE_STRING"); + delete_keys.emplace_back("GP12_KEY6_SCAN_CASE_SINGLE_STRING"); + + // Hash + s = db.HSet("GP12_KEY1_SCAN_CASE_SINGLE_HASH", "GP12_SCAN_CASE_SINGLE_HASH_FIELD1", + "GP12_SCAN_CASE_SINGLE_HASH_VALUE1", &int32_ret); + s = db.HSet("GP12_KEY2_SCAN_CASE_SINGLE_HASH", "GP12_SCAN_CASE_SINGLE_HASH_FIELD2", + "GP12_SCAN_CASE_SINGLE_HASH_VALUE2", &int32_ret); + s = db.HSet("GP12_KEY3_SCAN_CASE_SINGLE_HASH", "GP12_SCAN_CASE_SINGLE_HASH_FIELD3", + "GP12_SCAN_CASE_SINGLE_HASH_VALUE3", &int32_ret); + s = db.HSet("GP12_KEY4_SCAN_CASE_SINGLE_HASH", "GP12_SCAN_CASE_SINGLE_HASH_FIELD4", + "GP12_SCAN_CASE_SINGLE_HASH_VALUE4", &int32_ret); + s = db.HSet("GP12_KEY5_SCAN_CASE_SINGLE_HASH", "GP12_SCAN_CASE_SINGLE_HASH_FIELD5", + "GP12_SCAN_CASE_SINGLE_HASH_VALUE5", &int32_ret); + s = db.HSet("GP12_KEY6_SCAN_CASE_SINGLE_HASH", "GP12_SCAN_CASE_SINGLE_HASH_FIELD6", + "GP12_SCAN_CASE_SINGLE_HASH_VALUE6", &int32_ret); + delete_keys.emplace_back("GP12_KEY1_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP12_KEY2_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP12_KEY3_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP12_KEY4_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP12_KEY5_SCAN_CASE_SINGLE_HASH"); + delete_keys.emplace_back("GP12_KEY6_SCAN_CASE_SINGLE_HASH"); + + // Set + s = db.SAdd("GP12_KEY1_SCAN_CASE_SINGLE_SET", {"GP12_SCAN_CASE_SINGLE_SET_MEMBER1"}, &int32_ret); + s = db.SAdd("GP12_KEY2_SCAN_CASE_SINGLE_SET", {"GP12_SCAN_CASE_SINGLE_SET_MEMBER2"}, &int32_ret); + s = db.SAdd("GP12_KEY3_SCAN_CASE_SINGLE_SET", {"GP12_SCAN_CASE_SINGLE_SET_MEMBER3"}, &int32_ret); + s = db.SAdd("GP12_KEY4_SCAN_CASE_SINGLE_SET", {"GP12_SCAN_CASE_SINGLE_SET_MEMBER4"}, &int32_ret); + s = db.SAdd("GP12_KEY5_SCAN_CASE_SINGLE_SET", {"GP12_SCAN_CASE_SINGLE_SET_MEMBER5"}, &int32_ret); + s = db.SAdd("GP12_KEY6_SCAN_CASE_SINGLE_SET", {"GP12_SCAN_CASE_SINGLE_SET_MEMBER6"}, &int32_ret); + delete_keys.emplace_back("GP12_KEY1_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP12_KEY2_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP12_KEY3_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP12_KEY4_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP12_KEY5_SCAN_CASE_SINGLE_SET"); + delete_keys.emplace_back("GP12_KEY6_SCAN_CASE_SINGLE_SET"); + + // List + s = db.LPush("GP12_KEY1_SCAN_CASE_SINGLE_LIST", {"GP12_SCAN_CASE_SINGLE_LIST_NODE1"}, &uint64_ret); + s = db.LPush("GP12_KEY2_SCAN_CASE_SINGLE_LIST", {"GP12_SCAN_CASE_SINGLE_LIST_NODE2"}, &uint64_ret); + s = db.LPush("GP12_KEY3_SCAN_CASE_SINGLE_LIST", {"GP12_SCAN_CASE_SINGLE_LIST_NODE3"}, &uint64_ret); + s = db.LPush("GP12_KEY4_SCAN_CASE_SINGLE_LIST", {"GP12_SCAN_CASE_SINGLE_LIST_NODE4"}, &uint64_ret); + s = db.LPush("GP12_KEY5_SCAN_CASE_SINGLE_LIST", {"GP12_SCAN_CASE_SINGLE_LIST_NODE5"}, &uint64_ret); + s = db.LPush("GP12_KEY6_SCAN_CASE_SINGLE_LIST", {"GP12_SCAN_CASE_SINGLE_LIST_NODE6"}, &uint64_ret); + delete_keys.emplace_back("GP12_KEY1_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP12_KEY2_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP12_KEY3_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP12_KEY4_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP12_KEY5_SCAN_CASE_SINGLE_LIST"); + delete_keys.emplace_back("GP12_KEY6_SCAN_CASE_SINGLE_LIST"); + + // ZSet + s = db.ZAdd("GP12_KEY1_SCAN_CASE_SINGLE_ZSET", {{1, "GP12_SCAN_CASE_SINGLE_LIST_MEMBER1"}}, &int32_ret); + s = db.ZAdd("GP12_KEY2_SCAN_CASE_SINGLE_ZSET", {{1, "GP12_SCAN_CASE_SINGLE_LIST_MEMBER2"}}, &int32_ret); + s = db.ZAdd("GP12_KEY3_SCAN_CASE_SINGLE_ZSET", {{1, "GP12_SCAN_CASE_SINGLE_LIST_MEMBER3"}}, &int32_ret); + s = db.ZAdd("GP12_KEY4_SCAN_CASE_SINGLE_ZSET", {{1, "GP12_SCAN_CASE_SINGLE_LIST_MEMBER4"}}, &int32_ret); + s = db.ZAdd("GP12_KEY5_SCAN_CASE_SINGLE_ZSET", {{1, "GP12_SCAN_CASE_SINGLE_LIST_MEMBER5"}}, &int32_ret); + s = db.ZAdd("GP12_KEY6_SCAN_CASE_SINGLE_ZSET", {{1, "GP12_SCAN_CASE_SINGLE_LIST_MEMBER6"}}, &int32_ret); + delete_keys.emplace_back("GP12_KEY1_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP12_KEY2_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP12_KEY3_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP12_KEY4_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP12_KEY5_SCAN_CASE_SINGLE_ZSET"); + delete_keys.emplace_back("GP12_KEY6_SCAN_CASE_SINGLE_ZSET"); + + keys.clear(); + cursor = 0; + cursor = db.Scan(DataType::kZSets, cursor, "*", 10, &keys); + ASSERT_EQ(cursor, 0); + ASSERT_EQ(keys.size(), 6); + ASSERT_EQ(keys[0], "GP12_KEY1_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[1], "GP12_KEY2_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[2], "GP12_KEY3_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[3], "GP12_KEY4_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[4], "GP12_KEY5_SCAN_CASE_SINGLE_ZSET"); + ASSERT_EQ(keys[5], "GP12_KEY6_SCAN_CASE_SINGLE_ZSET"); + + del_num = db.Del(delete_keys); + ASSERT_EQ(del_num, 30); + sleep(2); + db.Compact(DataType::kAll, true); +} + +// Expire +TEST_F(KeysTest, ExpireTest) { + std::string value; + std::map type_status; + int32_t ret; + + // ***************** Group 1 Test ***************** + // Strings + s = db.Set("GP1_EXPIRE_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + // Hashes + s = db.HSet("GP1_EXPIRE_HASH_KEY", "FIELD", "VALUE", &ret); + ASSERT_TRUE(s.ok()); + + // Sets + s = db.SAdd("GP1_EXPIRE_SET_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + + // Lists + uint64_t llen; + s = db.RPush("GP1_EXPIRE_LIST_KEY", {"NODE"}, &llen); + ASSERT_TRUE(s.ok()); + + // Zsets + s = db.ZAdd("GP1_EXPIRE_ZSET_KEY", {{1, "MEMBER"}}, &ret); + ASSERT_TRUE(s.ok()); + + ret = db.Expire("GP1_EXPIRE_KEY", 1); + ASSERT_EQ(ret, 1); + ret = db.Expire("GP1_EXPIRE_HASH_KEY", 1); + ASSERT_EQ(ret, 1); + ret = db.Expire("GP1_EXPIRE_SET_KEY", 1); + ASSERT_EQ(ret, 1); + ret = db.Expire("GP1_EXPIRE_LIST_KEY", 1); + ASSERT_EQ(ret, 1); + ret = db.Expire("GP1_EXPIRE_ZSET_KEY", 1); + ASSERT_EQ(ret, 1); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + + // Strings + s = db.Get("GP1_EXPIRE_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + // Hashes + s = db.HGet("GP1_EXPIRE_HASH_KEY", "EXPIRE_FIELD", &value); + ASSERT_TRUE(s.IsNotFound()); + + // Sets + s = db.SCard("GP1_EXPIRE_SET_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); + + // Lists + s = db.LLen("GP1_EXPIRE_LIST_KEY", &llen); + ASSERT_TRUE(s.IsNotFound()); + + // ZSets + s = db.ZCard("GP1_EXPIRE_ZSET_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 2 Test ***************** + // Strings + s = db.Set("GP2_EXPIRE_STRING_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_STRING_KEY")); + + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_STRING_KEY", 1); + ASSERT_EQ(ret, 0); + // Hashes + s = db.HSet("GP2_EXPIRE_HASHES_KEY", "FIELD", "VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_HASHES_KEY")); + + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_HASHES_KEY", 1); + ASSERT_EQ(ret, 0); + + // Sets + s = db.SAdd("GP2_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_SETS_KEY")); + + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_SETS_KEY", 1); + ASSERT_EQ(ret, 0); + + // Lists + s = db.RPush("GP2_EXPIRE_LISTS_KEY", {"NODE"}, &llen); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_LISTS_KEY")); + + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_LISTS_KEY", 1); + ASSERT_EQ(ret, 0); + + // Zsets + s = db.ZAdd("GP2_EXPIRE_ZSETS_KEY", {{1, "MEMBER"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(make_expired(&db, "GP2_EXPIRE_ZSETS_KEY")); + + type_status.clear(); + ret = db.Expire("GP2_EXPIRE_ZSETS_KEY", 1); + ASSERT_EQ(ret, 0); + + // ***************** Group 3 Test ***************** + // Strings + s = db.Set("GP3_EXPIRE_STRING_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + int64_t res = 0; + res = db.Del({"GP3_EXPIRE_STRING_KEY"}); + ASSERT_EQ(res, 1); + + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_STRING_KEY", 1); + ASSERT_EQ(ret, 0); + // Hashes + s = db.HSet("GP3_EXPIRE_HASHES_KEY", "FIELD", "VALUE", &ret); + ASSERT_TRUE(s.ok()); + s = db.HDel("GP3_EXPIRE_HASHES_KEY", {"FIELD"}, &ret); + ASSERT_TRUE(s.ok()); + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_HASHES_KEY", 1); + ASSERT_EQ(ret, 0); + + // Sets + s = db.SAdd("GP3_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + s = db.SRem("GP3_EXPIRE_SETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_SETS_KEY", 1); + ASSERT_EQ(ret, 0); + + // Lists + s = db.RPush("GP3_EXPIRE_LISTS_KEY", {"NODE"}, &llen); + ASSERT_TRUE(s.ok()); + std::vector elements; + s = db.LPop("GP3_EXPIRE_LISTS_KEY", 1,&elements); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_LISTS_KEY", 1); + ret = db.Expire("GP3_EXPIRE_LISTS_KEY", 1); + LOG(WARNING) << "ret: " << ret; + for (const auto& ts : type_status) { + LOG(WARNING) << "type: " << storage::DataTypeStrings[static_cast(ts.first)] << " status: " << ts.second.ToString(); + } + ASSERT_EQ(ret, 0); + + // Zsets + s = db.ZAdd("GP3_EXPIRE_ZSETS_KEY", {{1, "MEMBER"}}, &ret); + ASSERT_TRUE(s.ok()); + s = db.ZRem("GP3_EXPIRE_ZSETS_KEY", {"MEMBER"}, &ret); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + ret = db.Expire("GP3_EXPIRE_ZSETS_KEY", 1); + ASSERT_EQ(ret, 0); +} + +// Del +TEST_F(KeysTest, DelTest) { + int32_t ret; + std::string value; + std::map type_status; + std::vector keys{"DEL_KEY"}; + + // Strings + s = db.Set("DEL_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + ret = db.Del(keys); + ASSERT_EQ(ret, 1); + + // Strings + s = db.Get("DEL_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); +} + +// Exists +TEST_F(KeysTest, ExistsTest) { + int32_t ret; + uint64_t llen; + std::map type_status; + std::vector keys{"EXISTS_KEY"}; + + // Strings + s = db.Set("EXISTS_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + ret = db.Exists(keys); + ASSERT_EQ(ret, 1); +} + +// Expireat +TEST_F(KeysTest, ExpireatTest) { + // If the key does not exist + std::map type_status; + int32_t ret = db.Expireat("EXPIREAT_KEY", 0); + ASSERT_EQ(ret, 0); + + // Strings + std::string value; + s = db.Set("EXPIREAT_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + + pstd::TimeType unix_time = pstd::NowMillis(); + int64_t timestamp = unix_time + 1; + ret = db.Expireat("EXPIREAT_KEY", timestamp); + ASSERT_EQ(ret, 1); + + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + // Strings + s = db.Get("EXPIREAT_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + + // Expireat key 0 + s = db.Set("EXPIREAT_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + + ret = db.Expireat("EXPIREAT_KEY", 0); + ASSERT_EQ(ret, 1); + + // Strings + s = db.Get("EXPIREAT_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); +} + +// Persist +TEST_F(KeysTest, PersistTest) { + // If the key does not exist + std::map type_status; + int32_t ret = db.Persist("EXPIREAT_KEY"); + ASSERT_EQ(ret, 0); + + // If the key does not have an associated timeout + // Strings + std::string value; + s = db.Set("PERSIST_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + + ret = db.Persist("PERSIST_KEY"); + ASSERT_EQ(ret, 0); + + // If the timeout was set + ret = db.Expire("PERSIST_KEY", 1000); + ASSERT_EQ(ret, 1); + ret = db.Persist("PERSIST_KEY"); + ASSERT_EQ(ret, 1); + + int64_t ttl_ret; + ttl_ret = db.TTL("PERSIST_KEY"); +} + +// TTL +TEST_F(KeysTest, TTLTest) { + // If the key does not exist + std::map type_status; + int64_t ttl_ret; + ttl_ret = db.TTL("TTL_KEY"); + + // If the key does not have an associated timeout + // Strings + std::string value; + int32_t ret = 0; + s = db.Set("TTL_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + + ttl_ret = db.TTL("TTL_KEY"); + + // If the timeout was set + ret = db.Expire("TTL_KEY", 10); + ASSERT_EQ(ret, 1); + ttl_ret = db.TTL("TTL_KEY"); +} + + +int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("keys_test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/kv_format_test.cc b/tools/pika_migrate/src/storage/tests/kv_format_test.cc new file mode 100644 index 0000000000..0bf8b92af7 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/kv_format_test.cc @@ -0,0 +1,120 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include +#include "glog/logging.h" + +#include "src/debug.h" +#include "src/coding.h" +#include "src/base_key_format.h" +#include "src/base_data_key_format.h" +#include "src/zsets_data_key_format.h" +#include "src/lists_data_key_format.h" +#include "storage/storage_define.h" + +using namespace storage; + +TEST(KVFormatTest, BaseKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001abc\u0000", 6); + BaseKey bk(slice_key); + + rocksdb::Slice slice_enc = bk.Encode(); + std::string expect_enc(8, '\0'); + expect_enc.append("\u0000\u0001\u0001abc\u0000\u0001\u0000\u0000", 10); + expect_enc.append(16, '\0'); + ASSERT_EQ(slice_enc, Slice(expect_enc)); + + ParsedBaseKey pbk(slice_enc); + ASSERT_EQ(pbk.Key(), slice_key); +} + +TEST(KVFormatTest, BaseDataKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001base_data_key\u0000", 16); + rocksdb::Slice slice_data("\u0000\u0001data\u0000", 7); + uint64_t version = 1701848429; + + BaseDataKey bdk(slice_key, version, slice_data); + rocksdb::Slice seek_key_enc = bdk.EncodeSeekKey(); + std::string expect_enc(8, '\0'); + expect_enc.append("\u0000\u0001\u0001base_data_key\u0000\u0001\u0000\u0000", 20); + char dst[9]; + EncodeFixed64(dst, version); + expect_enc.append(dst, 8); + expect_enc.append("\u0000\u0001data\u0000", 7); + ASSERT_EQ(seek_key_enc, Slice(expect_enc)); + + rocksdb::Slice key_enc = bdk.Encode(); + expect_enc.append(16, '\0'); + ASSERT_EQ(key_enc, Slice(expect_enc)); + + ParsedBaseDataKey pbmk(key_enc); + ASSERT_EQ(pbmk.Key(), slice_key); + ASSERT_EQ(pbmk.Data(), slice_data); + ASSERT_EQ(pbmk.Version(), version); +} + +TEST(KVFormatTest, ZsetsScoreKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001base_data_key\u0000", 16); + rocksdb::Slice slice_data("\u0000\u0001data\u0000", 7); + uint64_t version = 1701848429; + double score = -3.5; + + ZSetsScoreKey zsk(slice_key, version, score, slice_data); + // reserve + std::string expect_enc(8, '\0'); + // user_key + expect_enc.append("\u0000\u0001\u0001base_data_key\u0000\u0001\u0000\u0000", 20); + // version + char dst[9]; + EncodeFixed64(dst, version); + expect_enc.append(dst, 8); + // score + const void* addr_score = reinterpret_cast(&score); + EncodeFixed64(dst, *reinterpret_cast(addr_score)); + expect_enc.append(dst, 8); + // data + expect_enc.append("\u0000\u0001data\u0000", 7); + // reserve + expect_enc.append(16, '\0'); + rocksdb::Slice key_enc = zsk.Encode(); + ASSERT_EQ(key_enc, Slice(expect_enc)); + + ParsedZSetsScoreKey pzsk(key_enc); + ASSERT_EQ(pzsk.key(), slice_key); + ASSERT_EQ(pzsk.member(), slice_data); + ASSERT_EQ(pzsk.Version(), version); + ASSERT_EQ(pzsk.score(), score); +} + +TEST(KVFormatTest, ListDataKeyFormat) { + rocksdb::Slice slice_key("\u0000\u0001list_data_key\u0000", 16); + uint64_t version = 1701848429; + uint64_t index = 10; + + ListsDataKey ldk(slice_key, version, index); + rocksdb::Slice key_enc = ldk.Encode(); + std::string expect_enc(8, '\0'); + expect_enc.append("\u0000\u0001\u0001list_data_key\u0000\u0001\u0000\u0000", 20); + char dst[9]; + EncodeFixed64(dst, version); + expect_enc.append(dst, 8); + EncodeFixed64(dst, index); + expect_enc.append(dst, 8); + expect_enc.append(16, '\0'); + ASSERT_EQ(key_enc, Slice(expect_enc)); + + ParsedListsDataKey pldk(key_enc); + ASSERT_EQ(pldk.key(), slice_key); + ASSERT_EQ(pldk.index(), index); + ASSERT_EQ(pldk.Version(), version); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/lists_filter_test.cc b/tools/pika_migrate/src/storage/tests/lists_filter_test.cc new file mode 100644 index 0000000000..5197260d2c --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/lists_filter_test.cc @@ -0,0 +1,251 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "src/base_key_format.h" +#include "src/lists_filter.h" +#include "src/redis.h" +#include "src/zsets_filter.h" +#include "storage/storage.h" + +using namespace storage; +using storage::EncodeFixed64; +using storage::ListsDataFilter; +using storage::ListsDataKey; +using storage::ListsMetaValue; +using storage::Slice; +using storage::Status; + +class ListsFilterTest : public ::testing::Test { + public: + ListsFilterTest() { + std::string db_path = "./db/list_meta"; + if (access(db_path.c_str(), F_OK) != 0) { + mkdir(db_path.c_str(), 0755); + } + options.create_if_missing = true; + s = rocksdb::DB::Open(options, db_path, &meta_db); + if (s.ok()) { + // create column family + rocksdb::ColumnFamilyHandle* cf; + s = meta_db->CreateColumnFamily(rocksdb::ColumnFamilyOptions(), "data_cf", &cf); + delete cf; + delete meta_db; + } + + rocksdb::ColumnFamilyOptions meta_cf_ops(options); + rocksdb::ColumnFamilyOptions data_cf_ops(options); + + // Meta CF + column_families.emplace_back(rocksdb::kDefaultColumnFamilyName, meta_cf_ops); + // Data CF + column_families.emplace_back("data_cf", data_cf_ops); + + s = rocksdb::DB::Open(options, db_path, column_families, &handles, &meta_db); + } + ~ListsFilterTest() override = default; + + void SetUp() override {} + void TearDown() override { + for (auto handle : handles) { + delete handle; + } + delete meta_db; + } + + storage::Options options; + rocksdb::DB* meta_db; + storage::Status s; + + std::vector column_families; + std::vector handles; +}; + +// Data Filter +TEST_F(ListsFilterTest, DataFilterTest) { + char str[8]; + char buf[4]; + bool filter_result; + bool value_changed; + uint64_t version = 0; + std::string new_value; + + // Timeout timestamp is not set, the version is valid. + auto lists_data_filter1 = std::make_unique(meta_db, &handles, DataType::kLists); + ASSERT_TRUE(lists_data_filter1 != nullptr); + + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value1(Slice(str, sizeof(uint64_t))); + version = lists_meta_value1.UpdateVersion(); + + std::string user_key = "FILTER_TEST_KEY"; + BaseMetaKey bmk(user_key); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value1.Encode()); + ASSERT_TRUE(s.ok()); + + ListsDataKey lists_data_key1(user_key, version, 1); + filter_result = + lists_data_filter1->Filter(0, lists_data_key1.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, false); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + ASSERT_TRUE(s.ok()); + + // Timeout timestamp is set, but not expired. + auto lists_data_filter2 = std::make_unique(meta_db, &handles, DataType::kLists); + ASSERT_TRUE(lists_data_filter2 != nullptr); + + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value2(Slice(str, sizeof(uint64_t))); + version = lists_meta_value2.UpdateVersion(); + lists_meta_value2.SetRelativeTimeInMillsec(1); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value2.Encode()); + ASSERT_TRUE(s.ok()); + ListsDataKey lists_data_key2("FILTER_TEST_KEY", version, 1); + filter_result = + lists_data_filter2->Filter(0, lists_data_key2.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, false); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); + ASSERT_TRUE(s.ok()); + + // Timeout timestamp is set, already expired. + auto lists_data_filter3 = std::make_unique(meta_db, &handles, DataType::kLists); + ASSERT_TRUE(lists_data_filter3 != nullptr); + + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value3(Slice(str, sizeof(uint64_t))); + version = lists_meta_value3.UpdateVersion(); + lists_meta_value3.SetRelativeTimeInMillsec(1); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value3.Encode()); + ASSERT_TRUE(s.ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + ListsDataKey lists_data_key3("FILTER_TEST_KEY", version, 1); + filter_result = + lists_data_filter3->Filter(0, lists_data_key3.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, true); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); + ASSERT_TRUE(s.ok()); + + // Timeout timestamp is not set, the version is invalid + auto lists_data_filter4 = std::make_unique(meta_db, &handles, DataType::kLists); + ASSERT_TRUE(lists_data_filter4 != nullptr); + + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value4(Slice(str, sizeof(uint64_t))); + version = lists_meta_value4.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value4.Encode()); + ASSERT_TRUE(s.ok()); + ListsDataKey lists_data_key4("FILTER_TEST_KEY", version, 1); + version = lists_meta_value4.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value4.Encode()); + ASSERT_TRUE(s.ok()); + filter_result = + lists_data_filter4->Filter(0, lists_data_key4.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, true); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); + ASSERT_TRUE(s.ok()); + + // Meta data has been clear + auto lists_data_filter5 = std::make_unique(meta_db, &handles, DataType::kLists); + ASSERT_TRUE(lists_data_filter5 != nullptr); + + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value5(Slice(str, sizeof(uint64_t))); + version = lists_meta_value5.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], bmk.Encode(), lists_meta_value5.Encode()); + ASSERT_TRUE(s.ok()); + ListsDataKey lists_data_value5("FILTER_TEST_KEY", version, 1); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], bmk.Encode()); + ASSERT_TRUE(s.ok()); + filter_result = + lists_data_filter5->Filter(0, lists_data_value5.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, true); + + /* + * The types of keys conflict with each other and trigger compaction, zset filter + */ + BaseMetaKey meta_key(user_key); + auto zset_filter = std::make_unique(meta_db, &handles, DataType::kZSets); + ASSERT_TRUE(zset_filter != nullptr); + + // Insert a zset key + EncodeFixed32(buf, 1); + ZSetsMetaValue zsets_meta_value(DataType::kZSets, Slice(buf, 4)); + version = zsets_meta_value.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], meta_key.Encode(), zsets_meta_value.Encode()); + ASSERT_TRUE(s.ok()); + + // Insert a key of type string with the same name as the list + StringsValue strings_value("FILTER_TEST_VALUE"); + s = meta_db->Put(rocksdb::WriteOptions(), meta_key.Encode(), strings_value.Encode()); + + // zset-filter was used for elimination detection + ZSetsScoreKey base_key(user_key, version, 1, "FILTER_TEST_KEY"); + filter_result = zset_filter->Filter(0, base_key.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, true); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + ASSERT_TRUE(s.ok()); + + /* + * The types of keys conflict with each other and trigger compaction, list filter + */ + auto lists_data_filter = std::make_unique(meta_db, &handles, DataType::kLists); + ASSERT_TRUE(lists_data_filter != nullptr); + + // Insert a list key + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value(Slice(str, sizeof(uint64_t))); + lists_meta_value.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], meta_key.Encode(), lists_meta_value.Encode()); + ASSERT_TRUE(s.ok()); + + // Insert a key of type set with the same name as the list + EncodeFixed32(buf, 1); + SetsMetaValue sets_meta_value(DataType::kSets, Slice(str, 4)); + sets_meta_value.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], meta_key.Encode(), sets_meta_value.Encode()); + ASSERT_TRUE(s.ok()); + + // list-filter was used for elimination detection + ListsDataKey lists_data_key(user_key, version, 1); + filter_result = lists_data_filter->Filter(0, lists_data_key.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, true); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + ASSERT_TRUE(s.ok()); + + /* + * The types of keys conflict with each other and trigger compaction, base filter + */ + auto base_filter = std::make_unique(meta_db, &handles, DataType::kHashes); + ASSERT_TRUE(lists_data_filter != nullptr); + + // Insert a hash key + EncodeFixed32(buf, 1); + HashesMetaValue hash_meta_value(DataType::kHashes, Slice(buf, 4)); + hash_meta_value.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], meta_key.Encode(), hash_meta_value.Encode()); + ASSERT_TRUE(s.ok()); + + // Insert a key of type list with the same name as the hash + EncodeFixed64(str, 1); + ListsMetaValue lists_meta_value6(Slice(str, sizeof(uint64_t))); + lists_meta_value6.UpdateVersion(); + s = meta_db->Put(rocksdb::WriteOptions(), handles[0], meta_key.Encode(), lists_meta_value6.Encode()); + ASSERT_TRUE(s.ok()); + + // base-filter was used for elimination detection + ListsDataKey lists_data_key6(user_key, version, 1); + filter_result = base_filter->Filter(0, lists_data_key6.Encode(), "FILTER_TEST_VALUE", &new_value, &value_changed); + ASSERT_EQ(filter_result, true); + s = meta_db->Delete(rocksdb::WriteOptions(), handles[0], "FILTER_TEST_KEY"); + ASSERT_TRUE(s.ok()); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/lists_test.cc b/tools/pika_migrate/src/storage/tests/lists_test.cc new file mode 100644 index 0000000000..b7dd1d1282 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/lists_test.cc @@ -0,0 +1,2719 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" +#include "storage/storage.h" +#include "storage/util.h" + +using namespace storage; + +static bool elements_match(storage::Storage* const db, const Slice& key, + const std::vector& expect_elements) { + std::vector elements_out; + Status s = db->LRange(key, 0, -1, &elements_out); + LOG(WARNING) << "status: " << s.ToString() << " elements_out size: " << elements_out.size(); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (elements_out.size() != expect_elements.size()) { + return false; + } + if (s.IsNotFound() && expect_elements.empty()) { + return true; + } + for (uint64_t idx = 0; idx < elements_out.size(); ++idx) { + LOG(WARNING) << "element: " << elements_out[idx]; + if (strcmp(elements_out[idx].c_str(), expect_elements[idx].c_str()) != 0) { + return false; + } + } + return true; +} + +static bool elements_match(const std::vector& elements_out, + const std::vector& expect_elements) { + if (elements_out.size() != expect_elements.size()) { + return false; + } + for (uint64_t idx = 0; idx < elements_out.size(); ++idx) { + if (static_cast(strcmp(elements_out[idx].c_str(), expect_elements[idx].c_str()) != 0) != 0) { + return false; + } + } + return true; +} + +static bool len_match(storage::Storage* const db, const Slice& key, uint64_t expect_len) { + uint64_t len = 0; + Status s = db->LLen(key, &len); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (s.IsNotFound() && (expect_len == 0U)) { + return true; + } + return len == expect_len; +} + +static bool make_expired(storage::Storage* const db, const Slice& key) { + std::map type_status; + int ret = db->Expire(key, 1); + if ((ret == 0) || !type_status[storage::DataType::kLists].ok()) { + return false; + } + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + return true; +} + +class ListsTest : public ::testing::Test { + public: + ListsTest() = default; + ~ListsTest() override = default; + + void SetUp() override { + std::string path = "./db/lists"; + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + if (!s.ok()) { + printf("Open db failed, exit...\n"); + exit(1); + } + } + + void TearDown() override { + std::string path = "./db/lists"; + DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +// LIndex +TEST_F(ListsTest, LIndexTest) { // NOLINT + uint64_t num; + std::string element; + std::vector elements; + + // ***************** Group 1 Test ***************** + // "z" -> "e" -> "p" -> "p" -> "l" -> "i" -> "n" + // 0 1 2 3 4 5 6 + // -7 -6 -5 -4 -3 -2 -1 + std::vector gp1_nodes{"n", "i", "l", "p", "p", "e", "z"}; + s = db.LPush("GP1_LINDEX_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LINDEX_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LINDEX_KEY", {"z", "e", "p", "p", "l", "i", "n"})); + + s = db.LIndex("GP1_LINDEX_KEY", 0, &element); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(element, "z"); + + s = db.LIndex("GP1_LINDEX_KEY", 4, &element); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(element, "l"); + + s = db.LIndex("GP1_LINDEX_KEY", 6, &element); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(element, "n"); + + s = db.LIndex("GP1_LINDEX_KEY", 10, &element); + ASSERT_TRUE(s.IsNotFound()); + + s = db.LIndex("GP1_LINDEX_KEY", -1, &element); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(element, "n"); + + s = db.LIndex("GP1_LINDEX_KEY", -4, &element); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(element, "p"); + + s = db.LIndex("GP1_LINDEX_KEY", -7, &element); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(element, "z"); + + s = db.LIndex("GP1_LINDEX_KEY", -10000, &element); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 2 Test ***************** + // "b" -> "a" -> "t" -> "t" -> "l" -> "e" + // 0 1 2 3 4 5 + // -6 -5 -4 -3 -2 -1 + // LIndex time out list + std::vector gp2_nodes{"b", "a", "t", "t", "l", "e"}; + s = db.RPush("GP2_LINDEX_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LINDEX_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LINDEX_KEY", {"b", "a", "t", "t", "l", "e"})); + + ASSERT_TRUE(make_expired(&db, "GP2_LINDEX_KEY")); + ASSERT_TRUE(len_match(&db, "GP2_LINDEX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_LINDEX_KEY", {})); + s = db.LIndex("GP2_LINDEX_KEY", 0, &element); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 3 Test ***************** + // "m" -> "i" -> "s" -> "t" -> "y" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + // LIndex the key that has been deleted + std::vector gp3_nodes{"m", "i", "s", "t", "y"}; + s = db.RPush("GP3_LINDEX_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LINDEX_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LINDEX_KEY", {"m", "i", "s", "t", "y"})); + + std::vector del_keys = {"GP3_LINDEX_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + ASSERT_TRUE(len_match(&db, "GP3_LINDEX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_LINDEX_KEY", {})); + + s = db.LIndex("GP3_LINDEX_KEY", 0, &element); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 4 Test ***************** + // LIndex not exist key + s = db.LIndex("GP4_LINDEX_KEY", 0, &element); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 5 Test ***************** + // "m" -> "i" -> "s" -> "t" -> "y" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + // + // After LPop + // "i" -> "s" -> "t" -> "y" + // 0 1 2 3 + // -4 -3 -2 -1 + std::vector gp5_nodes{"m", "i", "s", "t", "y"}; + s = db.RPush("GP5_LINDEX_KEY", gp5_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_LINDEX_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LINDEX_KEY", {"m", "i", "s", "t", "y"})); + + s = db.LPop("GP5_LINDEX_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"m"})); + + s = db.LIndex("GP5_LINDEX_KEY", -5, &element); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 6 Test ***************** + // "m" -> "i" -> "s" -> "t" -> "y" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + // + // After RPop + // "m" -> "i" -> "s" -> "t" + // 0 1 2 3 + // -4 -3 -2 -1 + std::vector gp6_nodes{"m", "i", "s", "t", "y"}; + s = db.RPush("GP6_LINDEX_KEY", gp6_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_LINDEX_KEY", gp6_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP6_LINDEX_KEY", {"m", "i", "s", "t", "y"})); + + s = db.RPop("GP6_LINDEX_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"y"})); + + s = db.LIndex("GP6_LINDEX_KEY", 4, &element); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 7 Test ***************** + // "m" -> "i" -> "s" -> "t" -> "y" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + // + // After LTrim 1 3 + // "i" -> "s" -> "t" + // 0 1 2 + // -3 -2 -1 + std::vector gp7_nodes{"m", "i", "s", "t", "y"}; + s = db.RPush("GP7_LINDEX_KEY", gp7_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp7_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP7_LINDEX_KEY", gp7_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP7_LINDEX_KEY", {"m", "i", "s", "t", "y"})); + + s = db.LTrim("GP7_LINDEX_KEY", 1, 3); + ASSERT_TRUE(s.ok()); + + s = db.LIndex("GP7_LINDEX_KEY", 3, &element); + ASSERT_TRUE(s.IsNotFound()); + + s = db.LIndex("GP7_LINDEX_KEY", -4, &element); + ASSERT_TRUE(s.IsNotFound()); +} + +// LInsert +TEST_F(ListsTest, LInsertTest) { // NOLINT + int64_t ret; + uint64_t num; + + // ***************** Group 1 Test ***************** + // LInsert not exist key + s = db.LInsert("GP1_LINSERT_KEY", storage::Before, "pivot", "value", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // ***************** Group 2 Test ***************** + // "w" -> "e" -> "r" -> "u" -> "n" + // LInsert not exist pivot value + std::vector gp2_nodes{"w", "e", "r", "u", "n"}; + s = db.RPush("GP2_LINSERT_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LINSERT_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LINSERT_KEY", {"w", "e", "r", "u", "n"})); + + s = db.LInsert("GP2_LINSERT_KEY", storage::Before, "pivot", "value", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, -1); + + // ***************** Group 3 Test ***************** + // "a" -> "p" -> "p" -> "l" -> "e" + // LInsert expire list + std::vector gp3_nodes{"a", "p", "p", "l", "e"}; + s = db.RPush("GP3_LINSERT_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LINSERT_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LINSERT_KEY", {"a", "p", "p", "l", "e"})); + ASSERT_TRUE(make_expired(&db, "GP3_LINSERT_KEY")); + + s = db.LInsert("GP3_LINSERT_KEY", storage::Before, "pivot", "value", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + s = db.LInsert("GP3_LINSERT_KEY", storage::Before, "a", "value", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // ***************** Group 4 Test ***************** + // "a" + std::vector gp4_nodes{"a"}; + s = db.RPush("GP4_LINSERT_KEY", gp4_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_LINSERT_KEY", gp4_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP4_LINSERT_KEY", {"a"})); + + // "x" -> "a" + s = db.LInsert("GP4_LINSERT_KEY", storage::Before, "a", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(len_match(&db, "GP4_LINSERT_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP4_LINSERT_KEY", {"x", "a"})); + + // ***************** Group 5 Test ***************** + // "a" + std::vector gp5_nodes{"a"}; + s = db.RPush("GP5_LINSERT_KEY", gp5_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_LINSERT_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LINSERT_KEY", {"a"})); + + // "a" -> "x" + s = db.LInsert("GP5_LINSERT_KEY", storage::After, "a", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(len_match(&db, "GP5_LINSERT_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP5_LINSERT_KEY", {"a", "x"})); + + // ***************** Group 6 Test ***************** + // "a" -> "b" + std::vector gp6_nodes{"a", "b"}; + s = db.RPush("GP6_LINSERT_KEY", gp6_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_LINSERT_KEY", gp6_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP6_LINSERT_KEY", {"a", "b"})); + + // "x" -> "a" -> "b" + s = db.LInsert("GP6_LINSERT_KEY", storage::Before, "a", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(len_match(&db, "GP6_LINSERT_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP6_LINSERT_KEY", {"x", "a", "b"})); + + // ***************** Group 7 Test ***************** + // "a" -> "b" + std::vector gp7_nodes{"a", "b"}; + s = db.RPush("GP7_LINSERT_KEY", gp7_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp7_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP7_LINSERT_KEY", gp7_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP7_LINSERT_KEY", {"a", "b"})); + + // "a" -> "x" -> "b" + s = db.LInsert("GP7_LINSERT_KEY", storage::After, "a", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(len_match(&db, "GP7_LINSERT_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP7_LINSERT_KEY", {"a", "x", "b"})); + + // ***************** Group 8 Test ***************** + // "a" -> "b" + std::vector gp8_nodes{"a", "b"}; + s = db.RPush("GP8_LINSERT_KEY", gp8_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp8_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP8_LINSERT_KEY", gp8_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP8_LINSERT_KEY", {"a", "b"})); + + // "a" -> "x" -> "b" + s = db.LInsert("GP8_LINSERT_KEY", storage::Before, "b", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(len_match(&db, "GP8_LINSERT_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP8_LINSERT_KEY", {"a", "x", "b"})); + + // ***************** Group 9 Test ***************** + // "a" -> "b" + std::vector gp9_nodes{"a", "b"}; + s = db.RPush("GP9_LINSERT_KEY", gp9_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp9_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP9_LINSERT_KEY", gp9_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP9_LINSERT_KEY", {"a", "b"})); + + // "a" -> "b" -> "x" + s = db.LInsert("GP9_LINSERT_KEY", storage::After, "b", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(len_match(&db, "GP9_LINSERT_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP9_LINSERT_KEY", {"a", "b", "x"})); + + // ***************** Group 10 Test ***************** + // "1" -> "2" -> "3" + std::vector gp10_nodes{"1", "2", "3"}; + s = db.RPush("GP10_LINSERT_KEY", gp10_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp10_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP10_LINSERT_KEY", gp10_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP10_LINSERT_KEY", {"1", "2", "3"})); + + // "1" -> "2" -> "4" -> "3" + s = db.LInsert("GP10_LINSERT_KEY", storage::After, "2", "4", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(len_match(&db, "GP10_LINSERT_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP10_LINSERT_KEY", {"1", "2", "4", "3"})); + + // "1" -> "2" -> "4" -> "3" -> "5" + s = db.LInsert("GP10_LINSERT_KEY", storage::After, "3", "5", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_TRUE(len_match(&db, "GP10_LINSERT_KEY", 5)); + ASSERT_TRUE(elements_match(&db, "GP10_LINSERT_KEY", {"1", "2", "4", "3", "5"})); + + // "1" -> "2" -> "4" -> "3" -> "6" -> "5" + s = db.LInsert("GP10_LINSERT_KEY", storage::Before, "5", "6", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 6); + ASSERT_TRUE(len_match(&db, "GP10_LINSERT_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP10_LINSERT_KEY", {"1", "2", "4", "3", "6", "5"})); + + // "7" -> "1" -> "2" -> "4" -> "3" -> "6" -> "5" + s = db.LInsert("GP10_LINSERT_KEY", storage::Before, "1", "7", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 7); + ASSERT_TRUE(len_match(&db, "GP10_LINSERT_KEY", 7)); + ASSERT_TRUE(elements_match(&db, "GP10_LINSERT_KEY", {"7", "1", "2", "4", "3", "6", "5"})); + + // "7" -> "1" -> "8" -> "2" -> "4" -> "3" -> "6" -> "5" + s = db.LInsert("GP10_LINSERT_KEY", storage::After, "1", "8", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + ASSERT_TRUE(len_match(&db, "GP10_LINSERT_KEY", 8)); + ASSERT_TRUE(elements_match(&db, "GP10_LINSERT_KEY", {"7", "1", "8", "2", "4", "3", "6", "5"})); + + // "7" -> "1" -> "8" -> "9" -> "2" -> "4" -> "3" -> "6" -> "5" + s = db.LInsert("GP10_LINSERT_KEY", storage::Before, "2", "9", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(len_match(&db, "GP10_LINSERT_KEY", 9)); + ASSERT_TRUE(elements_match(&db, "GP10_LINSERT_KEY", {"7", "1", "8", "9", "2", "4", "3", "6", "5"})); +} + +// LLen +TEST_F(ListsTest, LLenTest) { // NOLINT + uint64_t num; + + // ***************** Group 1 Test ***************** + // "l" -> "x" -> "a" + std::vector gp1_nodes{"a", "x", "l"}; + s = db.LPush("GP1_LLEN_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LLEN_KEY", gp1_nodes.size())); + + // The key has timeout + ASSERT_TRUE(make_expired(&db, "GP1_LLEN_KEY")); + ASSERT_TRUE(len_match(&db, "GP1_LLEN_KEY", 0)); + + // ***************** Group 1 Test ***************** + // "p" -> "e" -> "r" -> "g" + std::vector gp2_nodes{"g", "r", "e", "p"}; + s = db.LPush("GP2_LLEN_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LLEN_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LLEN_KEY", {"p", "e", "r", "g"})); + + // Delete the key + std::vector del_keys = {"GP2_LLEN_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + ASSERT_TRUE(len_match(&db, "GP2_LLEN_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_LLEN_KEY", {})); +} + +// LPop +TEST_F(ListsTest, LPopTest) { // NOLINT + uint64_t num; + std::string element; + std::vector elements; + // ***************** Group 1 Test ***************** + // "l" -> "x" -> "a" + std::vector gp1_nodes{"a", "x", "l"}; + s = db.LPush("GP1_LPOP_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LPOP_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LPOP_KEY", {"l", "x", "a"})); + + // "x" -> "a" + + s = db.LPop("GP1_LPOP_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"l"})); + ASSERT_TRUE(len_match(&db, "GP1_LPOP_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP1_LPOP_KEY", {"x", "a"})); + + // after lpop two element, list will be empty + s = db.LPop("GP1_LPOP_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"x"})); + s = db.LPop("GP1_LPOP_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"a"})); + ASSERT_TRUE(len_match(&db, "GP1_LPOP_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP1_LPOP_KEY", {})); + + // lpop empty list + s = db.LPop("GP1_LPOP_KEY", 1, &elements); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 2 Test ***************** + // "p" -> "e" -> "r" -> "g" + std::vector gp2_nodes{"g", "r", "e", "p"}; + s = db.LPush("GP2_LPOP_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LPOP_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LPOP_KEY", {"p", "e", "r", "g"})); + + ASSERT_TRUE(make_expired(&db, "GP2_LPOP_KEY")); + s = db.LPop("GP2_LPOP_KEY", 1, &elements); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP2_LPOP_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_LPOP_KEY", {})); + + // ***************** Group 3 Test ***************** + // "p" -> "o" -> "m" -> "e" -> "i" -> "i" + std::vector gp3_nodes{"i", "i", "e", "m", "o", "p"}; + s = db.LPush("GP3_LPOP_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LPOP_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LPOP_KEY", {"p", "o", "m", "e", "i", "i"})); + + // Delete the key, then try lpop + std::vector del_keys = {"GP3_LPOP_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + ASSERT_TRUE(len_match(&db, "GP3_LPOP_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_LPOP_KEY", {})); + + s = db.LPop("GP3_LPOP_KEY", 1, &elements); + ASSERT_TRUE(s.IsNotFound()); +} + +// LPush +TEST_F(ListsTest, LPushTest) { // NOLINT + int32_t ret; + uint64_t num; + std::string element; + int64_t type_ttl; + std::map type_status; + + // ***************** Group 1 Test ***************** + // "s" -> "l" -> "a" -> "s" -> "h" + std::vector gp1_nodes{"h", "s", "a", "l", "s"}; + s = db.LPush("GP1_LPUSH_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LPUSH_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LPUSH_KEY", {"s", "l", "a", "s", "h"})); + + // ***************** Group 2 Test ***************** + // "a" -> "x" -> "l" + std::vector gp2_nodes1{"l", "x", "a"}; + s = db.LPush("GP2_LPUSH_KEY", gp2_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LPUSH_KEY", gp2_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LPUSH_KEY", {"a", "x", "l"})); + + // "r" -> "o" -> "s" -> "e" + std::vector gp2_nodes2{"e", "s", "o", "r"}; + ASSERT_TRUE(make_expired(&db, "GP2_LPUSH_KEY")); + s = db.LPush("GP2_LPUSH_KEY", gp2_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LPUSH_KEY", gp2_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LPUSH_KEY", {"r", "o", "s", "e"})); + + // ***************** Group 3 Test ***************** + // "d" -> "a" -> "v" -> "i" -> "d" + std::vector gp3_nodes1{"d", "i", "v", "a", "d"}; + s = db.LPush("GP3_LPUSH_KEY", gp3_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LPUSH_KEY", gp3_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LPUSH_KEY", {"d", "a", "v", "i", "d"})); + + // Delete the key + std::vector del_keys = {"GP3_LPUSH_KEY"}; + type_status.clear(); + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + // "g" -> "i" -> "l" -> "m" -> "o" -> "u" -> "r" + std::vector gp3_nodes2{"r", "u", "o", "m", "l", "i", "g"}; + s = db.LPush("GP3_LPUSH_KEY", gp3_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LPUSH_KEY", gp3_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LPUSH_KEY", {"g", "i", "l", "m", "o", "u", "r"})); + + // ***************** Group 4 Test ***************** + // "b" -> "l" -> "u" -> "e" + std::vector gp4_nodes1{"e", "u", "l", "b"}; + s = db.LPush("GP4_LPUSH_KEY", gp4_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_LPUSH_KEY", gp4_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP4_LPUSH_KEY", {"b", "l", "u", "e"})); + + // "t" -> "h" -> "e" -> " " -> "b" -> "l" -> "u" -> "e" + std::vector gp4_nodes2{" ", "e", "h", "t"}; + s = db.LPush("GP4_LPUSH_KEY", gp4_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(8, num); + ASSERT_TRUE(len_match(&db, "GP4_LPUSH_KEY", 8)); + ASSERT_TRUE(elements_match(&db, "GP4_LPUSH_KEY", {"t", "h", "e", " ", "b", "l", "u", "e"})); + + // ***************** Group 5 Test ***************** + // "b" -> "l" -> "u" -> "e" + std::vector gp5_nodes1{"e", "u", "l", "b"}; + s = db.LPush("GP5_LPUSH_KEY", gp5_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_LPUSH_KEY", gp5_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LPUSH_KEY", {"b", "l", "u", "e"})); + + ASSERT_TRUE(make_expired(&db, "GP5_LPUSH_KEY")); + + // "t" -> "h" -> "e" -> " "; + std::vector gp5_nodes2{" ", "e", "h", "t"}; + s = db.LPush("GP5_LPUSH_KEY", gp5_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, num); + ASSERT_TRUE(len_match(&db, "GP5_LPUSH_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP5_LPUSH_KEY", {"t", "h", "e", " "})); + + // ***************** Group 6 Test ***************** + // "b" -> "l" -> "u" -> "e" + std::vector gp6_nodes1{"e", "u", "l", "b"}; + s = db.LPush("GP6_LPUSH_KEY", gp6_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_LPUSH_KEY", gp6_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP6_LPUSH_KEY", {"b", "l", "u", "e"})); + + type_status.clear(); + ret = db.Expire("GP6_LPUSH_KEY", 100); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + type_status.clear(); + type_ttl = db.TTL("GP6_LPUSH_KEY"); + ASSERT_LE(type_ttl, 100); + ASSERT_GE(type_ttl, 0); + + std::vector gp6_elements; + s = db.LPop("GP6_LPUSH_KEY", 1, &gp6_elements); + ASSERT_TRUE(elements_match(gp6_elements, {"b"})); + s = db.LPop("GP6_LPUSH_KEY", 1, &gp6_elements); + ASSERT_TRUE(elements_match(gp6_elements, {"l"})); + s = db.LPop("GP6_LPUSH_KEY", 1, &gp6_elements); + ASSERT_TRUE(elements_match(gp6_elements, {"u"})); + s = db.LPop("GP6_LPUSH_KEY", 1, &gp6_elements); + ASSERT_TRUE(elements_match(gp6_elements, {"e"})); + ASSERT_TRUE(len_match(&db, "GP6_LPUSH_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP6_LPUSH_KEY", {})); + + // "t" -> "h" -> "e" -> " "; + std::vector gp6_nodes2{" ", "e", "h", "t"}; + s = db.LPush("GP6_LPUSH_KEY", gp6_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, num); + ASSERT_TRUE(len_match(&db, "GP6_LPUSH_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP6_LPUSH_KEY", {"t", "h", "e", " "})); + + type_status.clear(); + type_ttl = db.TTL("GP6_LPUSH_KEY"); + ASSERT_EQ(type_ttl, -1); +} + +// LPushx +TEST_F(ListsTest, LPushxTest) { // NOLINT + int64_t ret; + uint64_t num; + + // ***************** Group 1 Test ***************** + // "o" -> "o" -> "o" + std::vector gp1_nodes1{"o", "o", "o"}; + s = db.RPush("GP1_LPUSHX_KEY", gp1_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LPUSHX_KEY", gp1_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LPUSHX_KEY", {"o", "o", "o"})); + + // "z" -> "y" -> "x" -> "o" -> "o" -> "o" + s = db.LPushx("GP1_LPUSHX_KEY", {"x", "y", "z"}, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 6); + ASSERT_TRUE(len_match(&db, "GP1_LPUSHX_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP1_LPUSHX_KEY", {"z", "y", "x", "o", "o", "o"})); + + // "o" -> "o" -> "z" -> "y" -> "x" -> "o" -> "o" -> "o" + std::vector gp1_nodes2{"o", "o"}; + s = db.LPush("GP1_LPUSHX_KEY", gp1_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 8); + ASSERT_TRUE(len_match(&db, "GP1_LPUSHX_KEY", 8)); + ASSERT_TRUE(elements_match(&db, "GP1_LPUSHX_KEY", {"o", "o", "z", "y", "x", "o", "o", "o"})); + + // "z" -> "y" -> "x" -> "o" -> "o" -> "z" -> "y" -> "x" -> "o" -> "o" -> "o" + s = db.LPushx("GP1_LPUSHX_KEY", {"x", "y", "z"}, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 11); + ASSERT_TRUE(len_match(&db, "GP1_LPUSHX_KEY", 11)); + ASSERT_TRUE(elements_match(&db, "GP1_LPUSHX_KEY", {"z", "y", "x", "o", "o", "z", "y", "x", "o", "o", "o"})); + + // ***************** Group 2 Test ***************** + // LPushx not exist key + s = db.LPushx("GP2_LPUSHX_KEY", {"x", "y", "z"}, &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP2_LPUSHX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_LPUSHX_KEY", {})); + + // ***************** Group 3 Test ***************** + // "o" -> "o" -> "o" + // LPushx timeout key + std::vector gp3_nodes{"o", "o", "o"}; + s = db.RPush("GP3_LPUSHX_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LPUSHX_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LPUSHX_KEY", {"o", "o", "o"})); + ASSERT_TRUE(make_expired(&db, "GP3_LPUSHX_KEY")); + + s = db.LPushx("GP3_LPUSHX_KEY", {"x", "y", "z"}, &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP3_LPUSHX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_LPUSHX_KEY", {})); + + // ***************** Group 4 Test ***************** + // LPushx has been deleted key + std::vector gp4_nodes{"o", "o", "o"}; + s = db.RPush("GP4_LPUSHX_KEY", gp4_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_LPUSHX_KEY", gp4_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP4_LPUSHX_KEY", {"o", "o", "o"})); + + // Delete the key + std::vector del_keys = {"GP4_LPUSHX_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + s = db.LPushx("GP4_LPUSHX_KEY", {"x", "y", "z"}, &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP4_LPUSHX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP4_LPUSHX_KEY", {})); + + // ***************** Group 5 Test ***************** + std::vector gp5_nodes{"o", "o", "o"}; + s = db.LPush("GP5_LPUSHX_KEY", gp5_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_LPUSHX_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LPUSHX_KEY", {"o", "o", "o"})); + + // LPushx multi key + // "y" -> "x" -> "o" -> "o" -> "o" + s = db.LPushx("GP5_LPUSHX_KEY", {"x", "y"}, &num); + gp5_nodes.insert(gp5_nodes.begin(), "x"); + gp5_nodes.insert(gp5_nodes.begin(), "y"); + + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP5_LPUSHX_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LPUSHX_KEY", gp5_nodes)); + + // ***************** Group 6 Test ***************** + std::vector gp6_nodes{"o", "o", "o"}; + s = db.LPush("GP6_LPUSHX_KEY", gp6_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_LPUSHX_KEY", gp6_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP6_LPUSHX_KEY", {"o", "o", "o"})); + + // LPushx empty key + s = db.LPushx("GP6_LPUSHX_KEY", {}, &num); + + ASSERT_TRUE(s.ok()); +} + +// LRange +TEST_F(ListsTest, LRangeTest) { // NOLINT + uint64_t num; + + // ***************** Group 1 Test ***************** + // " " -> "a" -> "t" -> " " + std::vector gp1_nodes1{" ", "a", "t", " "}; + s = db.RPush("GP1_LRANGE_KEY", gp1_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LRANGE_KEY", gp1_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LRANGE_KEY", {" ", "a", "t", " "})); + + // "l" -> "i" -> "v" -> "e" -> " " -> "a" -> "t" -> " " + std::vector gp1_nodes2{"e", "v", "i", "l"}; + s = db.LPush("GP1_LRANGE_KEY", gp1_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes1.size() + gp1_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LRANGE_KEY", gp1_nodes1.size() + gp1_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LRANGE_KEY", {"l", "i", "v", "e", " ", "a", "t", " "})); + + // "l" -> "i" -> "v" -> "e" -> " " -> "a" -> "t" -> " " -> "p" -> "o" -> "m" -> "p" -> "e" -> "i" -> "i" + // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 + // -15 -14 -13 -12 -11 -10 -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp1_nodes3{"p", "o", "m", "p", "e", "i", "i"}; + s = db.RPush("GP1_LRANGE_KEY", gp1_nodes3, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes1.size() + gp1_nodes2.size() + gp1_nodes3.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LRANGE_KEY", gp1_nodes1.size() + gp1_nodes2.size() + gp1_nodes3.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LRANGE_KEY", + {"l", "i", "v", "e", " ", "a", "t", " ", "p", "o", "m", "p", "e", "i", "i"})); + + std::vector gp1_range_nodes; + s = db.LRange("GP1_LRANGE_KEY", 0, -1, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t", " ", "p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", 0, 14, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t", " ", "p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -15, -1, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t", " ", "p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", 0, 100, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t", " ", "p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -100, -1, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t", " ", "p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", 5, 6, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"a", "t"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -10, -9, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"a", "t"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -10, 6, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"a", "t"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -15, 6, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -100, 6, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -15, -9, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"l", "i", "v", "e", " ", "a", "t"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", 8, 14, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -7, 14, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -7, -1, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", 8, 100, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"p", "o", "m", "p", "e", "i", "i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -100, -50, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -100, 0, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"l"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -100, -15, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"l"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", 15, 100, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", 14, 100, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"i"})); + + gp1_range_nodes.clear(); + s = db.LRange("GP1_LRANGE_KEY", -1, 100, &gp1_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp1_range_nodes, {"i"})); + + // ***************** Group 2 Test ***************** + // "a" + // 0 + // -1 + std::vector gp2_nodes{"a"}; + s = db.RPush("GP2_LRANGE_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LRANGE_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LRANGE_KEY", {"a"})); + + std::vector gp2_range_nodes; + s = db.LRange("GP2_LRANGE_KEY", 0, 0, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", 0, -1, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", -1, -1, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", -100, 0, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", -100, -1, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", 0, 100, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", -1, 100, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", -100, 100, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {"a"})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", -10, -2, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {})); + + gp2_range_nodes.clear(); + s = db.LRange("GP2_LRANGE_KEY", 1, 2, &gp2_range_nodes); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(gp2_range_nodes, {})); + + // ***************** Group 3 Test ***************** + // LRange not exist key + std::vector gp3_range_nodes; + s = db.LRange("GP3_LRANGE_KEY", 1, 5, &gp3_range_nodes); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(elements_match(gp3_range_nodes, {})); + + // ***************** Group 4 Test ***************** + // "n" -> "o" -> "w" + // 0 1 2 + // -3 -2 -1 + // LRange timeout key + std::vector gp4_nodes{"n", "o", "w"}; + s = db.RPush("GP4_LRANGE_KEY", gp4_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_LRANGE_KEY", gp4_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP4_LRANGE_KEY", {"n", "o", "w"})); + ASSERT_TRUE(make_expired(&db, "GP4_LRANGE_KEY")); + + std::vector gp4_range_nodes; + s = db.LRange("GP4_LRANGE_KEY", 0, 2, &gp4_range_nodes); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(elements_match(gp4_range_nodes, {})); + + // ***************** Group 5 Test ***************** + // "t" -> "o" -> "u" -> "r" + // 0 1 2 3 + // -4 -3 -2 -1 + // LRange has been deleted key + std::vector gp5_nodes{"t", "o", "u", "r"}; + s = db.RPush("GP5_LRANGE_KEY", gp5_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_LRANGE_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LRANGE_KEY", {"t", "o", "u", "r"})); + ASSERT_TRUE(make_expired(&db, "GP5_LRANGE_KEY")); + + // Delete the key + std::vector del_keys = {"GP5_LRANGE_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + std::vector gp5_range_nodes; + s = db.LRange("GP5_LRANGE_KEY", 0, 2, &gp5_range_nodes); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(elements_match(gp5_range_nodes, {})); +} + +// LRem +TEST_F(ListsTest, LRemTest) { // NOLINT + int64_t ret; + uint64_t num; + + // ***************** Group 1 Test ***************** + // "o" + // 0 + // -1 + std::vector gp1_nodes{"o"}; + s = db.RPush("GP1_LREM_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LREM_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LREM_KEY", {"o"})); + + s = db.LRem("GP1_LREM_KEY", 0, "x", &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(num, 0); + ASSERT_TRUE(len_match(&db, "GP1_LREM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP1_LREM_KEY", {"o"})); + + s = db.LRem("GP1_LREM_KEY", 1, "x", &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(num, 0); + ASSERT_TRUE(len_match(&db, "GP1_LREM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP1_LREM_KEY", {"o"})); + + s = db.LRem("GP1_LREM_KEY", -1, "x", &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(num, 0); + ASSERT_TRUE(len_match(&db, "GP1_LREM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP1_LREM_KEY", {"o"})); + + s = db.LRem("GP1_LREM_KEY", 1, "o", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP1_LREM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP1_LREM_KEY", {})); + + // ***************** Group 2 Test ***************** + // "o" + // 0 + // -1 + std::vector gp2_nodes{"o"}; + s = db.RPush("GP2_LREM_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LREM_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LREM_KEY", {"o"})); + + s = db.LRem("GP2_LREM_KEY", -1, "o", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP2_LREM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_LREM_KEY", {})); + + // ***************** Group 3 Test ***************** + // "o" + // 0 + // -1 + std::vector gp3_nodes{"o"}; + s = db.RPush("GP3_LREM_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LREM_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LREM_KEY", {"o"})); + + s = db.LRem("GP3_LREM_KEY", 0, "o", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP3_LREM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_LREM_KEY", {})); + + // ***************** Group 4 Test ***************** + // "o" -> "x" + // 0 1 + // -2 -1 + std::vector gp4_nodes{"o", "x"}; + s = db.RPush("GP4_LREM_KEY", gp4_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_LREM_KEY", gp4_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP4_LREM_KEY", {"o", "x"})); + + s = db.LRem("GP4_LREM_KEY", 0, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP4_LREM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP4_LREM_KEY", {"o"})); + + // ***************** Group 5 Test ***************** + // "o" -> "x" + // 0 1 + // -2 -1 + std::vector gp5_nodes{"o", "x"}; + s = db.RPush("GP5_LREM_KEY", gp5_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_LREM_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LREM_KEY", {"o", "x"})); + + s = db.LRem("GP5_LREM_KEY", 1, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP5_LREM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP5_LREM_KEY", {"o"})); + + // ***************** Group 6 Test ***************** + // "o" -> "x" + // 0 1 + // -2 -1 + std::vector gp6_nodes{"o", "x"}; + s = db.RPush("GP6_LREM_KEY", gp6_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_LREM_KEY", gp6_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP6_LREM_KEY", {"o", "x"})); + + s = db.LRem("GP6_LREM_KEY", 0, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP6_LREM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP6_LREM_KEY", {"o"})); + + // ***************** Group 7 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp7_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP7_LREM_KEY", gp7_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp7_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP7_LREM_KEY", gp7_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP7_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP7_LREM_KEY", 0, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 4); + ASSERT_TRUE(len_match(&db, "GP7_LREM_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP7_LREM_KEY", {"o", "o", "o", "o", "o", "o"})); + + // ***************** Group 8 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp8_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP8_LREM_KEY", gp8_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp8_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP8_LREM_KEY", gp8_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP8_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP8_LREM_KEY", -10, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 4); + ASSERT_TRUE(len_match(&db, "GP8_LREM_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP8_LREM_KEY", {"o", "o", "o", "o", "o", "o"})); + + // ***************** Group 9 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp9_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP9_LREM_KEY", gp9_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp9_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP9_LREM_KEY", gp9_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP9_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP9_LREM_KEY", 10, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 4); + ASSERT_TRUE(len_match(&db, "GP9_LREM_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP9_LREM_KEY", {"o", "o", "o", "o", "o", "o"})); + + // ***************** Group 10 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp10_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP10_LREM_KEY", gp10_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp10_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP10_LREM_KEY", gp10_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP10_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP10_LREM_KEY", 1, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP10_LREM_KEY", 9)); + ASSERT_TRUE(elements_match(&db, "GP10_LREM_KEY", {"o", "o", "o", "x", "o", "x", "o", "o", "x"})); + + // ***************** Group 11 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp11_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP11_LREM_KEY", gp11_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp11_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP11_LREM_KEY", gp11_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP11_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP11_LREM_KEY", 3, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 3); + ASSERT_TRUE(len_match(&db, "GP11_LREM_KEY", 7)); + ASSERT_TRUE(elements_match(&db, "GP11_LREM_KEY", {"o", "o", "o", "o", "o", "o", "x"})); + + // ***************** Group 12 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp12_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP12_LREM_KEY", gp12_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp12_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP12_LREM_KEY", gp12_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP12_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP12_LREM_KEY", 4, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 4); + ASSERT_TRUE(len_match(&db, "GP12_LREM_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP12_LREM_KEY", {"o", "o", "o", "o", "o", "o"})); + + // ***************** Group 13 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp13_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP13_LREM_KEY", gp13_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp13_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP13_LREM_KEY", gp13_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP13_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP13_LREM_KEY", -1, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 1); + ASSERT_TRUE(len_match(&db, "GP13_LREM_KEY", 9)); + ASSERT_TRUE(elements_match(&db, "GP13_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o"})); + + // ***************** Group 14 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp14_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP14_LREM_KEY", gp14_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp14_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP14_LREM_KEY", gp14_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP14_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP14_LREM_KEY", -2, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 2); + ASSERT_TRUE(len_match(&db, "GP14_LREM_KEY", 8)); + ASSERT_TRUE(elements_match(&db, "GP14_LREM_KEY", {"o", "x", "o", "o", "x", "o", "o", "o"})); + + // ***************** Group 15 Test ***************** + // "o" -> "x" -> "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" -> "x" + // 0 1 2 3 4 5 6 7 8 9 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 + std::vector gp15_nodes{"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"}; + s = db.RPush("GP15_LREM_KEY", gp15_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp15_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP15_LREM_KEY", gp14_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP15_LREM_KEY", {"o", "x", "o", "o", "x", "o", "x", "o", "o", "x"})); + + s = db.LRem("GP15_LREM_KEY", -3, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 3); + ASSERT_TRUE(len_match(&db, "GP15_LREM_KEY", 7)); + ASSERT_TRUE(elements_match(&db, "GP15_LREM_KEY", {"o", "x", "o", "o", "o", "o", "o"})); + + // ***************** Group 16 Test ***************** + // "o" -> "x" -> "x" -> "x" -> "x" -> "o" + // 0 1 2 3 4 5 + // -6 -5 -4 -3 -2 -1 + std::vector gp16_nodes{"o", "x", "x", "x", "x", "o"}; + s = db.RPush("GP16_LREM_KEY", gp16_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp16_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP16_LREM_KEY", gp16_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP16_LREM_KEY", {"o", "x", "x", "x", "x", "o"})); + + s = db.LRem("GP16_LREM_KEY", -2, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 2); + ASSERT_TRUE(len_match(&db, "GP16_LREM_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP16_LREM_KEY", {"o", "x", "x", "o"})); + + // ***************** Group 17 Test ***************** + // "o" -> "x" -> "x" -> "x" -> "x" -> "o" + // 0 1 2 3 4 5 + // -6 -5 -4 -3 -2 -1 + std::vector gp17_nodes{"o", "x", "x", "x", "x", "o"}; + s = db.RPush("GP17_LREM_KEY", gp17_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp17_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP17_LREM_KEY", gp17_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP17_LREM_KEY", {"o", "x", "x", "x", "x", "o"})); + + s = db.LRem("GP17_LREM_KEY", 2, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 2); + ASSERT_TRUE(len_match(&db, "GP17_LREM_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP17_LREM_KEY", {"o", "x", "x", "o"})); + + // ***************** Group 18 Test ***************** + // "o" -> "x" -> "x" -> "x" -> "x" -> "o" + // 0 1 2 3 4 5 + // -6 -5 -4 -3 -2 -1 + std::vector gp18_nodes{"o", "x", "x", "x", "x", "o"}; + s = db.RPush("GP18_LREM_KEY", gp18_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp18_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP18_LREM_KEY", gp18_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP18_LREM_KEY", {"o", "x", "x", "x", "x", "o"})); + + s = db.LRem("GP18_LREM_KEY", 3, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 3); + ASSERT_TRUE(len_match(&db, "GP18_LREM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP18_LREM_KEY", {"o", "x", "o"})); + + // ***************** Group 19 Test ***************** + // "o" -> "x" -> "x" -> "x" -> "x" -> "o" + // 0 1 2 3 4 5 + // -6 -5 -4 -3 -2 -1 + std::vector gp19_nodes{"o", "x", "x", "x", "x", "o"}; + s = db.RPush("GP19_LREM_KEY", gp19_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp19_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP19_LREM_KEY", gp19_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP19_LREM_KEY", {"o", "x", "x", "x", "x", "o"})); + + s = db.LRem("GP19_LREM_KEY", 0, "x", &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 4); + ASSERT_TRUE(len_match(&db, "GP19_LREM_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP19_LREM_KEY", {"o", "o"})); + + // ***************** Group 20 Test ***************** + // "o" -> "x" -> "o" + // LRem timeout key + std::vector gp20_nodes{"o", "o", "o"}; + s = db.RPush("GP20_LREM_KEY", gp20_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp20_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP20_LREM_KEY", gp20_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP20_LREM_KEY", {"o", "o", "o"})); + ASSERT_TRUE(make_expired(&db, "GP20_LREM_KEY")); + + s = db.LRem("GP20_LREM_KEY", 0, "x", &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, num); + ASSERT_TRUE(len_match(&db, "GP20_LREM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP20_LREM_KEY", {})); + + // ***************** Group 21 Test ***************** + // LRem not exist key + s = db.LRem("GP21_LREM_KEY", 0, "x", &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, num); + ASSERT_TRUE(len_match(&db, "GP21_LREM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP21_LREM_KEY", {})); +} + +// LSet +TEST_F(ListsTest, LSetTest) { // NOLINT + int64_t ret; + uint64_t num; + + // ***************** Group 1 Test ***************** + // "o" -> "o" -> "o" -> "o" -> "o" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp1_nodes1{"o", "o", "o", "o", "o"}; + s = db.LPush("GP1_LSET_KEY", gp1_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LSET_KEY", gp1_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"o", "o", "o", "o", "o"})); + + s = db.LSet("GP1_LSET_KEY", 0, "x"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"x", "o", "o", "o", "o"})); + + s = db.LSet("GP1_LSET_KEY", -3, "x"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"x", "o", "x", "o", "o"})); + + s = db.LSet("GP1_LSET_KEY", 5, "x"); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"x", "o", "x", "o", "o"})); + + s = db.LSet("GP1_LSET_KEY", -100, "x"); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"x", "o", "x", "o", "o"})); + + s = db.LSet("GP1_LSET_KEY", 0, "o"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"o", "o", "x", "o", "o"})); + + s = db.LSet("GP1_LSET_KEY", -1, "x"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"o", "o", "x", "o", "x"})); + + // "o" -> "o" -> "x" -> "o" -> "x" -> "o" -> "o" + // 0 1 2 3 4 5 6 + // -7 -6 -5 -4 -3 -2 -1 + std::vector gp1_nodes2{"o", "o"}; + s = db.RPush("GP1_LSET_KEY", gp1_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes1.size() + gp1_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LSET_KEY", gp1_nodes1.size() + gp1_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"o", "o", "x", "o", "x", "o", "o"})); + + s = db.LSet("GP1_LSET_KEY", -2, "x"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"o", "o", "x", "o", "x", "x", "o"})); + + s = db.LSet("GP1_LSET_KEY", -7, "x"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP1_LSET_KEY", {"x", "o", "x", "o", "x", "x", "o"})); + + // ***************** Group 2 Test ***************** + // LSet expire key + std::vector gp2_nodes{"o", "o", "o"}; + s = db.LPush("GP2_LSET_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LSET_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LSET_KEY", {"o", "o", "o"})); + ASSERT_TRUE(make_expired(&db, "GP2_LSET_KEY")); + + s = db.LSet("GP2_LSET_KEY", 0, "x"); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 3 Test ***************** + // LSet not exist key + s = db.LSet("GP3_LSET_KEY", 0, "x"); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 4 Test ***************** + std::vector gp4_nodes{"o"}; + s = db.LPush("GP4_LSET_KEY", gp4_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_LSET_KEY", gp4_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP4_LSET_KEY", {"o"})); + + s = db.LSet("GP4_LSET_KEY", 0, "x"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP4_LSET_KEY", {"x"})); + + s = db.LSet("GP4_LSET_KEY", -1, "o"); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(&db, "GP4_LSET_KEY", {"o"})); + + s = db.LSet("GP4_LSET_KEY", -2, "x"); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_TRUE(elements_match(&db, "GP4_LSET_KEY", {"o"})); +} + +// LTrim +TEST_F(ListsTest, LTrimTest) { // NOLINT + uint64_t num; + // ***************** Group 1 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp1_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP1_LTRIM_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_LTRIM_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP1_LTRIM_KEY", 0, 4); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP1_LTRIM_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP1_LTRIM_KEY", 0, -1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP1_LTRIM_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP1_LTRIM_KEY", -5, 4); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP1_LTRIM_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP1_LTRIM_KEY", -5, -1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP1_LTRIM_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + // ***************** Group 2 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp2_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP2_LTRIM_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_LTRIM_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP2_LTRIM_KEY", 0, 2); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP2_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP2_LTRIM_KEY", {"a", "b", "c"})); + + // ***************** Group 3 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp3_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP3_LTRIM_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_LTRIM_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP3_LTRIM_KEY", 0, -3); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP3_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP3_LTRIM_KEY", {"a", "b", "c"})); + + // ***************** Group 4 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp4_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP4_LTRIM_KEY", gp4_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_LTRIM_KEY", gp4_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP4_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP4_LTRIM_KEY", -5, 2); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP4_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP4_LTRIM_KEY", {"a", "b", "c"})); + + // ***************** Group 5 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp5_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP5_LTRIM_KEY", gp5_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_LTRIM_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP5_LTRIM_KEY", -5, -3); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP5_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP5_LTRIM_KEY", {"a", "b", "c"})); + + // ***************** Group 6 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp6_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP6_LTRIM_KEY", gp6_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_LTRIM_KEY", gp6_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP6_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP6_LTRIM_KEY", -100, 2); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP6_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP6_LTRIM_KEY", {"a", "b", "c"})); + + // ***************** Group 7 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp7_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP7_LTRIM_KEY", gp7_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp7_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP7_LTRIM_KEY", gp7_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP7_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP7_LTRIM_KEY", -100, -3); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP7_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP7_LTRIM_KEY", {"a", "b", "c"})); + + // ***************** Group 8 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp8_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP8_LTRIM_KEY", gp8_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp8_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP8_LTRIM_KEY", gp8_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP8_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP8_LTRIM_KEY", 1, 3); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP8_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP8_LTRIM_KEY", {"b", "c", "d"})); + + // ***************** Group 9 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp9_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP9_LTRIM_KEY", gp9_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp9_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP9_LTRIM_KEY", gp9_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP9_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP9_LTRIM_KEY", 1, -2); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP9_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP9_LTRIM_KEY", {"b", "c", "d"})); + + // ***************** Group 10 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp10_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP10_LTRIM_KEY", gp10_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp10_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP10_LTRIM_KEY", gp10_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP10_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP10_LTRIM_KEY", -4, 3); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP10_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP10_LTRIM_KEY", {"b", "c", "d"})); + + // ***************** Group 11 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp11_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP11_LTRIM_KEY", gp11_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp11_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP11_LTRIM_KEY", gp11_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP11_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP11_LTRIM_KEY", -4, -2); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP11_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP11_LTRIM_KEY", {"b", "c", "d"})); + + // ***************** Group 12 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp12_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP12_LTRIM_KEY", gp12_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp12_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP12_LTRIM_KEY", gp12_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP12_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP12_LTRIM_KEY", 2, 2); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP12_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP12_LTRIM_KEY", {"c"})); + + // ***************** Group 13 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp13_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP13_LTRIM_KEY", gp13_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp13_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP13_LTRIM_KEY", gp13_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP13_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP13_LTRIM_KEY", 2, -3); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP13_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP13_LTRIM_KEY", {"c"})); + + // ***************** Group 14 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp14_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP14_LTRIM_KEY", gp14_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp14_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP14_LTRIM_KEY", gp14_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP14_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP14_LTRIM_KEY", -3, -3); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP14_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP14_LTRIM_KEY", {"c"})); + + // ***************** Group 15 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp15_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP15_LTRIM_KEY", gp15_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp15_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP15_LTRIM_KEY", gp15_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP15_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP15_LTRIM_KEY", 2, 4); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP15_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP15_LTRIM_KEY", {"c", "d", "e"})); + + // ***************** Group 16 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp16_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP16_LTRIM_KEY", gp16_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp16_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP16_LTRIM_KEY", gp16_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP16_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP16_LTRIM_KEY", 2, -1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP16_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP16_LTRIM_KEY", {"c", "d", "e"})); + + // ***************** Group 17 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp17_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP17_LTRIM_KEY", gp17_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp17_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP17_LTRIM_KEY", gp17_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP17_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP17_LTRIM_KEY", -3, 4); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP17_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP17_LTRIM_KEY", {"c", "d", "e"})); + + // ***************** Group 18 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp18_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP18_LTRIM_KEY", gp18_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp18_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP18_LTRIM_KEY", gp18_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP18_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP18_LTRIM_KEY", -3, -1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP18_LTRIM_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP18_LTRIM_KEY", {"c", "d", "e"})); + + // ***************** Group 19 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp19_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP19_LTRIM_KEY", gp19_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp19_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP19_LTRIM_KEY", gp19_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP19_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP19_LTRIM_KEY", -100, 100); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP19_LTRIM_KEY", 5)); + ASSERT_TRUE(elements_match(&db, "GP19_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + // ***************** Group 20 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp20_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP20_LTRIM_KEY", gp20_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp20_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP20_LTRIM_KEY", gp20_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP20_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP20_LTRIM_KEY", 0, 0); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP20_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP20_LTRIM_KEY", {"a"})); + + // ***************** Group 21 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp21_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP21_LTRIM_KEY", gp21_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp21_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP21_LTRIM_KEY", gp21_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP21_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP21_LTRIM_KEY", -5, -5); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP21_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP21_LTRIM_KEY", {"a"})); + + // ***************** Group 22 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp22_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP22_LTRIM_KEY", gp22_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp22_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP22_LTRIM_KEY", gp22_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP22_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP22_LTRIM_KEY", -100, 0); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP22_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP22_LTRIM_KEY", {"a"})); + + // ***************** Group 23 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp23_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP23_LTRIM_KEY", gp23_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp23_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP23_LTRIM_KEY", gp23_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP23_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP23_LTRIM_KEY", -100, -5); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP23_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP23_LTRIM_KEY", {"a"})); + + // ***************** Group 24 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp24_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP24_LTRIM_KEY", gp24_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp24_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP24_LTRIM_KEY", gp24_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP24_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP24_LTRIM_KEY", 3, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP24_LTRIM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP24_LTRIM_KEY", {})); + + // ***************** Group 25 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp25_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP25_LTRIM_KEY", gp25_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp25_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP25_LTRIM_KEY", gp25_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP25_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP25_LTRIM_KEY", -100, -110); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP25_LTRIM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP25_LTRIM_KEY", {})); + + // ***************** Group 26 Test ***************** + // "a" -> "b" -> "c" -> "d" -> "e" + // 0 1 2 3 4 + // -5 -4 -3 -2 -1 + std::vector gp26_nodes{"a", "b", "c", "d", "e"}; + s = db.RPush("GP26_LTRIM_KEY", gp26_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp26_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP26_LTRIM_KEY", gp26_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP26_LTRIM_KEY", {"a", "b", "c", "d", "e"})); + + s = db.LTrim("GP26_LTRIM_KEY", 110, 100); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP26_LTRIM_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP26_LTRIM_KEY", {})); + + // ***************** Group 27 Test ***************** + // "a" -> "b" + // 0 1 + // -2 -1 + std::vector gp27_nodes{"a", "b"}; + s = db.RPush("GP27_LTRIM_KEY", gp27_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp27_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP27_LTRIM_KEY", gp27_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP27_LTRIM_KEY", {"a", "b"})); + + s = db.LTrim("GP27_LTRIM_KEY", 0, 0); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP27_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP27_LTRIM_KEY", {"a"})); + + // ***************** Group 28 Test ***************** + // "a" -> "b" + // 0 1 + // -2 -1 + std::vector gp28_nodes{"a", "b"}; + s = db.RPush("GP28_LTRIM_KEY", gp28_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp28_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP28_LTRIM_KEY", gp28_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP28_LTRIM_KEY", {"a", "b"})); + + s = db.LTrim("GP28_LTRIM_KEY", 1, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP28_LTRIM_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP28_LTRIM_KEY", {"b"})); + + // ***************** Group 29 Test ***************** + // "a" -> "b" + // 0 1 + // -2 -1 + std::vector gp29_nodes{"a", "b"}; + s = db.RPush("GP29_LTRIM_KEY", gp29_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp29_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP29_LTRIM_KEY", gp29_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP29_LTRIM_KEY", {"a", "b"})); + + s = db.LTrim("GP29_LTRIM_KEY", -100, 100); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP29_LTRIM_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP29_LTRIM_KEY", {"a", "b"})); + + // ***************** Group 30 Test ***************** + // "a" -> "b" + // 0 1 + // -2 -1 + std::vector gp30_nodes{"a", "b"}; + s = db.RPush("GP30_LTRIM_KEY", gp30_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp30_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP30_LTRIM_KEY", gp30_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP30_LTRIM_KEY", {"a", "b"})); + ASSERT_TRUE(make_expired(&db, "GP30_LTRIM_KEY")); + + s = db.LTrim("GP30_LTRIM_KEY", -100, 100); + ASSERT_TRUE(s.IsNotFound()); +} + +// RPop +TEST_F(ListsTest, RPopTest) { // NOLINT + uint64_t num; + std::string element; + std::vector elements; + // ***************** Group 1 Test ***************** + // "a" -> "x" -> "l" + std::vector gp1_nodes{"l", "x", "a"}; + s = db.LPush("GP1_RPOP_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_RPOP_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_RPOP_KEY", {"a", "x", "l"})); + + // "a" -> "x" + s = db.RPop("GP1_RPOP_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"l"})); + ASSERT_TRUE(len_match(&db, "GP1_RPOP_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP1_RPOP_KEY", {"a", "x"})); + + // After rpop two element, list will be empty + s = db.RPop("GP1_RPOP_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"x"})); + s = db.RPop("GP1_RPOP_KEY", 1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"a"})); + ASSERT_TRUE(len_match(&db, "GP1_RPOP_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP1_RPOP_KEY", {})); + + // lpop empty list + s = db.LPop("GP1_RPOP_KEY", 1, &elements); + ASSERT_TRUE(s.IsNotFound()); + + // ***************** Group 2 Test ***************** + // "g" -> "r" -> "e" -> "p" + std::vector gp2_nodes{"p", "e", "r", "g"}; + s = db.LPush("GP2_RPOP_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_RPOP_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_RPOP_KEY", {"g", "r", "e", "p"})); + + ASSERT_TRUE(make_expired(&db, "GP2_RPOP_KEY")); + s = db.LPop("GP2_RPOP_KEY", 1, &elements); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP2_RPOP_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_RPOP_KEY", {})); + + // ***************** Group 3 Test ***************** + // "p" -> "o" -> "m" -> "e" -> "i" -> "i" + std::vector gp3_nodes{"i", "i", "e", "m", "o", "p"}; + s = db.LPush("GP3_RPOP_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_RPOP_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_RPOP_KEY", {"p", "o", "m", "e", "i", "i"})); + + // Delete the key, then try lpop + std::vector del_keys = {"GP3_RPOP_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + ASSERT_TRUE(len_match(&db, "GP3_RPOP_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOP_KEY", {})); + s = db.RPop("GP3_LPOP_KEY", 1, &elements); + ASSERT_TRUE(s.IsNotFound()); +} + +// RPoplpush +TEST_F(ListsTest, RPoplpushTest) { // NOLINT + int64_t ret; + uint64_t num; + //std::string element; + std::string target; + int64_t type_ttl; + std::map type_status; + std::vector elements; + // ***************** Group 1 Test ***************** + // source "o" + // destination + // ----------------after rpoplpush----------------- + // source + // destination "o" + // + std::vector gp1_nodes{"o"}; + s = db.RPush("GP1_RPOPLPUSH_SOURCE_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_RPOPLPUSH_SOURCE_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_RPOPLPUSH_SOURCE_KEY", {"o"})); + + s = db.RPoplpush("GP1_RPOPLPUSH_SOURCE_KEY", "GP1_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(target, "o"); + ASSERT_TRUE(len_match(&db, "GP1_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP1_RPOPLPUSH_SOURCE_KEY", {})); + ASSERT_TRUE(len_match(&db, "GP1_RPOPLPUSH_DESTINATION_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP1_RPOPLPUSH_DESTINATION_KEY", {"o"})); + + // ***************** Group 2 Test ***************** + // source + // destination "o" + // ----------------after rpoplpush----------------- + // source + // destination "o" + // + std::vector gp2_nodes{"o"}; + s = db.RPush("GP2_RPOPLPUSH_DESTINATION_KEY", gp2_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_RPOPLPUSH_DESTINATION_KEY", gp2_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP2_RPOPLPUSH_DESTINATION_KEY", {"o"})); + + s = db.RPoplpush("GP2_RPOPLPUSH_SOURCE_KEY", "GP2_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(target, ""); + ASSERT_TRUE(len_match(&db, "GP2_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_RPOPLPUSH_SOURCE_KEY", {})); + ASSERT_TRUE(len_match(&db, "GP2_RPOPLPUSH_DESTINATION_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP2_RPOPLPUSH_DESTINATION_KEY", {"o"})); + + // ***************** Group 3 Test ***************** + // source "a" -> "b" -> "c" -> "o" + // destination "a" -> "b" -> "c" + // ----------------after rpoplpush----------------- + // source "a" -> "b" -> "c" + // destination "o" -> "a" -> "b" -> "c" + // ----------------after rpoplpush----------------- + // source "a" -> "b" + // destination "c" -> "o" -> "a" -> "b" -> "c" + // ----------------after rpoplpush----------------- + // source "a" + // destination "b" -> "c" -> "o" -> "a" -> "b" -> "c" + // ----------------after rpoplpush----------------- + // source + // destination "a" -> "b" -> "c" -> "o" -> "a" -> "b" -> "c" + // + std::vector gp3_nodes1{"a", "b", "c", "o"}; + std::vector gp3_nodes2{"a", "b", "c"}; + s = db.RPush("GP3_RPOPLPUSH_SOURCE_KEY", gp3_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", gp3_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c", "o"})); + + s = db.RPush("GP3_RPOPLPUSH_DESTINATION_KEY", gp3_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", gp3_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", {"a", "b", "c"})); + + s = db.RPoplpush("GP3_RPOPLPUSH_SOURCE_KEY", "GP3_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_EQ(target, "o"); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c"})); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", {"o", "a", "b", "c"})); + + s = db.RPoplpush("GP3_RPOPLPUSH_SOURCE_KEY", "GP3_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_EQ(target, "c"); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", {"a", "b"})); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", 5)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", {"c", "o", "a", "b", "c"})); + + s = db.RPoplpush("GP3_RPOPLPUSH_SOURCE_KEY", "GP3_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_EQ(target, "b"); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", {"a"})); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", {"b", "c", "o", "a", "b", "c"})); + + s = db.RPoplpush("GP3_RPOPLPUSH_SOURCE_KEY", "GP3_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_EQ(target, "a"); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", {})); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", 7)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", {"a", "b", "c", "o", "a", "b", "c"})); + + s = db.RPoplpush("GP3_RPOPLPUSH_SOURCE_KEY", "GP3_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_EQ(target, ""); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_SOURCE_KEY", {})); + ASSERT_TRUE(len_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", 7)); + ASSERT_TRUE(elements_match(&db, "GP3_RPOPLPUSH_DESTINATION_KEY", {"a", "b", "c", "o", "a", "b", "c"})); + + // ***************** Group 4 Test ***************** + // source (empty list); + // destination "a" -> "b" -> "c" + // ----------------after rpoplpush----------------- + // source (empty list); + // destination "a" -> "b" -> "c" + // + std::vector gp4_nodes1{"o"}; + std::vector gp4_nodes2{"a", "b", "c"}; + s = db.RPush("GP4_RPOPLPUSH_SOURCE_KEY", gp4_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_RPOPLPUSH_SOURCE_KEY", gp4_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP4_RPOPLPUSH_SOURCE_KEY", {"o"})); + s = db.RPop("GP4_RPOPLPUSH_SOURCE_KEY",1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"o"})); + ASSERT_TRUE(elements_match(&db, "GP4_RPOPLPUSH_SOURCE_KEY", {})); + + s = db.RPush("GP4_RPOPLPUSH_DESTINATION_KEY", gp4_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_RPOPLPUSH_DESTINATION_KEY", gp4_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP4_RPOPLPUSH_DESTINATION_KEY", {"a", "b", "c"})); + + s = db.RPoplpush("GP4_RPOPLPUSH_SOURCE_KEY", "GP4_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(target, ""); + ASSERT_TRUE(len_match(&db, "GP4_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP4_RPOPLPUSH_SOURCE_KEY", {})); + ASSERT_TRUE(len_match(&db, "GP4_RPOPLPUSH_DESTINATION_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP4_RPOPLPUSH_DESTINATION_KEY", {"a", "b", "c"})); + + // ***************** Group 5 Test ***************** + // source "a" -> "b" -> "c" + // destination (empty list); + // ----------------after rpoplpush----------------- + // source "a" -> "b" + // destination "c" + // + std::vector gp5_nodes1{"a", "b", "c"}; + std::vector gp5_nodes2{"o"}; + s = db.RPush("GP5_RPOPLPUSH_SOURCE_KEY", gp5_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_RPOPLPUSH_SOURCE_KEY", gp5_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP5_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c"})); + + s = db.RPush("GP5_RPOPLPUSH_DESTINATION_KEY", gp5_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_RPOPLPUSH_DESTINATION_KEY", gp5_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP5_RPOPLPUSH_DESTINATION_KEY", {"o"})); + s = db.RPop("GP5_RPOPLPUSH_DESTINATION_KEY",1, &elements); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(elements_match(elements, {"o"})); + ASSERT_TRUE(len_match(&db, "GP5_RPOPLPUSH_DESTINATION_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP5_RPOPLPUSH_DESTINATION_KEY", {})); + + s = db.RPoplpush("GP5_RPOPLPUSH_SOURCE_KEY", "GP5_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(target, "c"); + ASSERT_TRUE(len_match(&db, "GP5_RPOPLPUSH_SOURCE_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP5_RPOPLPUSH_SOURCE_KEY", {"a", "b"})); + ASSERT_TRUE(len_match(&db, "GP5_RPOPLPUSH_DESTINATION_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP5_RPOPLPUSH_DESTINATION_KEY", {"c"})); + + // ***************** Group 6 Test ***************** + // source "a" -> "b" -> "c" (timeout list); + // destination "x" -> "y" -> "z" + // ----------------after rpoplpush----------------- + // source "a" -> "b" -> "c" (timeout list); + // destination "x" -> "y" -> "z" + // + std::vector gp6_nodes1{"a", "b", "c"}; + std::vector gp6_nodes2{"x", "y", "z"}; + s = db.RPush("GP6_RPOPLPUSH_SOURCE_KEY", gp6_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_RPOPLPUSH_SOURCE_KEY", gp6_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP6_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c"})); + ASSERT_TRUE(make_expired(&db, "GP6_RPOPLPUSH_SOURCE_KEY")); + + s = db.RPush("GP6_RPOPLPUSH_DESTINATION_KEY", gp6_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_RPOPLPUSH_DESTINATION_KEY", gp6_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP6_RPOPLPUSH_DESTINATION_KEY", {"x", "y", "z"})); + + s = db.RPoplpush("GP6_RPOPLPUSH_SOURCE_KEY", "GP6_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(target, ""); + ASSERT_TRUE(len_match(&db, "GP6_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP6_RPOPLPUSH_SOURCE_KEY", {})); + ASSERT_TRUE(len_match(&db, "GP6_RPOPLPUSH_DESTINATION_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP6_RPOPLPUSH_DESTINATION_KEY", {"x", "y", "z"})); + + // ***************** Group 7 Test ***************** + // source "a" -> "b" -> "c" + // destination "x" -> "y" -> "z" (timeout list); + // ----------------after rpoplpush----------------- + // source "a" -> "b" + // destination "c" + // + std::vector gp7_nodes1{"a", "b", "c"}; + std::vector gp7_nodes2{"x", "y", "z"}; + s = db.RPush("GP7_RPOPLPUSH_SOURCE_KEY", gp7_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp7_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP7_RPOPLPUSH_SOURCE_KEY", gp7_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP7_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c"})); + + s = db.RPush("GP7_RPOPLPUSH_DESTINATION_KEY", gp7_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp7_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP7_RPOPLPUSH_DESTINATION_KEY", gp7_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP7_RPOPLPUSH_DESTINATION_KEY", {"x", "y", "z"})); + ASSERT_TRUE(make_expired(&db, "GP7_RPOPLPUSH_DESTINATION_KEY")); + + s = db.RPoplpush("GP7_RPOPLPUSH_SOURCE_KEY", "GP7_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(target, "c"); + ASSERT_TRUE(len_match(&db, "GP7_RPOPLPUSH_SOURCE_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP7_RPOPLPUSH_SOURCE_KEY", {"a", "b"})); + ASSERT_TRUE(len_match(&db, "GP7_RPOPLPUSH_DESTINATION_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP7_RPOPLPUSH_DESTINATION_KEY", {"c"})); + + // ***************** Group 8 Test ***************** + // source "a" -> "b" -> "c" + // ----------------after rpoplpush----------------- + // source "c" -> "a" -> "b" + // + std::vector gp8_nodes{"a", "b", "c"}; + s = db.RPush("GP8_RPOPLPUSH_SOURCE_KEY", gp8_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp8_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP8_RPOPLPUSH_SOURCE_KEY", gp8_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP8_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c"})); + + s = db.RPoplpush("GP8_RPOPLPUSH_SOURCE_KEY", "GP8_RPOPLPUSH_SOURCE_KEY", &target); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(target, "c"); + ASSERT_TRUE(len_match(&db, "GP8_RPOPLPUSH_SOURCE_KEY", 3)); + ASSERT_TRUE(elements_match(&db, "GP8_RPOPLPUSH_SOURCE_KEY", {"c", "a", "b"})); + + // ***************** Group 9 Test ***************** + // source "a" -> "b" -> "c" (timeout list) + // ----------------after rpoplpush----------------- + // source "a" -> "b" -> "c" (timeout list) + // + std::vector gp9_nodes{"a", "b", "c"}; + s = db.RPush("GP9_RPOPLPUSH_SOURCE_KEY", gp9_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp9_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP9_RPOPLPUSH_SOURCE_KEY", gp9_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP9_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c"})); + ASSERT_TRUE(make_expired(&db, "GP9_RPOPLPUSH_SOURCE_KEY")); + ASSERT_TRUE(len_match(&db, "GP9_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP9_RPOPLPUSH_SOURCE_KEY", {})); + + s = db.RPoplpush("GP9_RPOPLPUSH_SOURCE_KEY", "GP9_RPOPLPUSH_SOURCE_KEY", &target); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(target, ""); + ASSERT_TRUE(len_match(&db, "GP9_RPOPLPUSH_SOURCE_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP9_RPOPLPUSH_SOURCE_KEY", {})); + + // ***************** Group 10 Test ***************** + // source "o" + // ----------------after rpoplpush----------------- + // source "o" + // + std::vector gp10_nodes{"o"}; + s = db.RPush("GP10_RPOPLPUSH_SOURCE_KEY", gp10_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp10_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP10_RPOPLPUSH_SOURCE_KEY", gp10_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP10_RPOPLPUSH_SOURCE_KEY", {"o"})); + + s = db.RPoplpush("GP10_RPOPLPUSH_SOURCE_KEY", "GP10_RPOPLPUSH_SOURCE_KEY", &target); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(target, "o"); + ASSERT_TRUE(len_match(&db, "GP10_RPOPLPUSH_SOURCE_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP10_RPOPLPUSH_SOURCE_KEY", {"o"})); + + // ***************** Group 11 Test ***************** + // source "a" -> "b" -> "c" + // destination "x" -> "y" -> "z" (TTL 100); + // ------------- destination after lpop ------------- + // destination empty (TTL -2) + // --------------- after rpoplpush ----------------- + // source "a" -> "b" + // destination "c" (TTL -1) + // + std::vector gp11_nodes1{"a", "b", "c"}; + std::vector gp11_nodes2{"x", "y", "z"}; + s = db.RPush("GP11_RPOPLPUSH_SOURCE_KEY", gp11_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp11_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP11_RPOPLPUSH_SOURCE_KEY", gp11_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP11_RPOPLPUSH_SOURCE_KEY", {"a", "b", "c"})); + + s = db.RPush("GP11_RPOPLPUSH_DESTINATION_KEY", gp11_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp11_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP11_RPOPLPUSH_DESTINATION_KEY", gp11_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP11_RPOPLPUSH_DESTINATION_KEY", {"x", "y", "z"})); + + type_status.clear(); + ret = db.Expire("GP11_RPOPLPUSH_DESTINATION_KEY", 100); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + s = db.LPop("GP11_RPOPLPUSH_DESTINATION_KEY",1, &elements); + ASSERT_TRUE(elements_match(elements, {"x"})); + s = db.LPop("GP11_RPOPLPUSH_DESTINATION_KEY", 1,&elements); + ASSERT_TRUE(elements_match(elements, {"y"})); + s = db.LPop("GP11_RPOPLPUSH_DESTINATION_KEY", 1,&elements); + ASSERT_TRUE(elements_match(elements, {"z"})); + ASSERT_TRUE(len_match(&db, "GP11_RPOPLPUSH_DESTINATION_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP11_RPOPLPUSH_DESTINATION_KEY", {})); + + s = db.RPoplpush("GP11_RPOPLPUSH_SOURCE_KEY", "GP11_RPOPLPUSH_DESTINATION_KEY", &target); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(target, "c"); + ASSERT_TRUE(len_match(&db, "GP11_RPOPLPUSH_SOURCE_KEY", 2)); + ASSERT_TRUE(elements_match(&db, "GP11_RPOPLPUSH_SOURCE_KEY", {"a", "b"})); + ASSERT_TRUE(len_match(&db, "GP11_RPOPLPUSH_DESTINATION_KEY", 1)); + ASSERT_TRUE(elements_match(&db, "GP11_RPOPLPUSH_DESTINATION_KEY", {"c"})); + + type_status.clear(); + type_ttl = db.TTL("GP11_RPOPLPUSH_DESTINATION_KEY"); + ASSERT_EQ(type_ttl, -1); +} + +// RPush +TEST_F(ListsTest, RPushTest) { // NOLINT + int32_t ret; + uint64_t num; + std::vector elements; + int64_t type_ttl; + std::map type_status; + + // ***************** Group 1 Test ***************** + // "s" -> "l" -> "a" -> "s" -> "h" + std::vector gp1_nodes{"s", "l", "a", "s", "h"}; + s = db.RPush("GP1_RPUSH_KEY", gp1_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_RPUSH_KEY", gp1_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP1_RPUSH_KEY", {"s", "l", "a", "s", "h"})); + + // ***************** Group 2 Test ***************** + // "a" -> "x" -> "l" + std::vector gp2_nodes1{"a", "x", "l"}; + s = db.RPush("GP2_RPUSH_KEY", gp2_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_RPUSH_KEY", gp2_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP2_RPUSH_KEY", {"a", "x", "l"})); + + // "r" -> "o" -> "s" -> "e" + std::vector gp2_nodes2{"r", "o", "s", "e"}; + ASSERT_TRUE(make_expired(&db, "GP2_RPUSH_KEY")); + s = db.RPush("GP2_RPUSH_KEY", gp2_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP2_RPUSH_KEY", gp2_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP2_RPUSH_KEY", {"r", "o", "s", "e"})); + + // ***************** Group 3 Test ***************** + // "d" -> "a" -> "v" -> "i" -> "d" + std::vector gp3_nodes1{"d", "a", "v", "i", "d"}; + s = db.RPush("GP3_RPUSH_KEY", gp3_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_RPUSH_KEY", gp3_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP3_RPUSH_KEY", {"d", "a", "v", "i", "d"})); + + // Delete the key + std::vector del_keys = {"GP3_RPUSH_KEY"}; + type_status.clear(); + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + // "g" -> "i" -> "l" -> "m" -> "o" -> "u" -> "r" + std::vector gp3_nodes2{"g", "i", "l", "m", "o", "u", "r"}; + s = db.RPush("GP3_RPUSH_KEY", gp3_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes2.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_RPUSH_KEY", gp3_nodes2.size())); + ASSERT_TRUE(elements_match(&db, "GP3_RPUSH_KEY", {"g", "i", "l", "m", "o", "u", "r"})); + + // ***************** Group 4 Test ***************** + // "t" -> "h" -> "e" -> " " + std::vector gp4_nodes1{"t", "h", "e", " "}; + s = db.RPush("GP4_RPUSH_KEY", gp4_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_RPUSH_KEY", gp4_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP4_RPUSH_KEY", {"t", "h", "e", " "})); + + // "t" -> "h" -> "e" -> " " -> "b" -> "l" -> "u" -> "e" + std::vector gp4_nodes2{"b", "l", "u", "e"}; + s = db.RPush("GP4_RPUSH_KEY", gp4_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(8, num); + ASSERT_TRUE(len_match(&db, "GP4_RPUSH_KEY", 8)); + ASSERT_TRUE(elements_match(&db, "GP4_RPUSH_KEY", {"t", "h", "e", " ", "b", "l", "u", "e"})); + + // ***************** Group 5 Test ***************** + // "t" -> "h" -> "e" + std::vector gp5_nodes1{"t", "h", "e"}; + s = db.RPush("GP5_RPUSH_KEY", gp5_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_RPUSH_KEY", gp5_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP5_RPUSH_KEY", {"t", "h", "e"})); + + ASSERT_TRUE(make_expired(&db, "GP5_RPUSH_KEY")); + + // "b" -> "l" -> "u" -> "e" + std::vector gp5_nodes2{"b", "l", "u", "e"}; + s = db.RPush("GP5_RPUSH_KEY", gp5_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, num); + ASSERT_TRUE(len_match(&db, "GP5_RPUSH_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP5_RPUSH_KEY", {"b", "l", "u", "e"})); + + // ***************** Group 6 Test ***************** + // "b" -> "l" -> "u" -> "e" + std::vector gp6_nodes1{"b", "l", "u", "e"}; + s = db.RPush("GP6_RPUSH_KEY", gp6_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_RPUSH_KEY", gp6_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP6_RPUSH_KEY", {"b", "l", "u", "e"})); + + type_status.clear(); + ret = db.Expire("GP6_RPUSH_KEY", 100); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + type_status.clear(); + type_ttl = db.TTL("GP6_RPUSH_KEY"); + ASSERT_LE(type_ttl, 100); + ASSERT_GE(type_ttl, 0); + + s = db.LPop("GP6_RPUSH_KEY", 1,&elements); + ASSERT_TRUE(elements_match(elements, {"b"})); + s = db.LPop("GP6_RPUSH_KEY", 1,&elements); + ASSERT_TRUE(elements_match(elements, {"l"})); + s = db.LPop("GP6_RPUSH_KEY", 1,&elements); + ASSERT_TRUE(elements_match(elements, {"u"})); + s = db.LPop("GP6_RPUSH_KEY", 1,&elements); + ASSERT_TRUE(elements_match(elements, {"e"})); + ASSERT_TRUE(len_match(&db, "GP6_RPUSH_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP6_RPUSH_KEY", {})); + + // "t" -> "h" -> "e" + std::vector gp6_nodes2{"t", "h", "e"}; + s = db.RPush("GP6_RPUSH_KEY", gp6_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, num); + ASSERT_TRUE(len_match(&db, "GP6_RPUSH_KEY", 3)); + LOG(WARNING) << "-------------"; + ASSERT_TRUE(elements_match(&db, "GP6_RPUSH_KEY", {"t", "h", "e"})); + + type_status.clear(); + type_ttl = db.TTL("GP6_RPUSH_KEY"); + ASSERT_EQ(type_ttl, -1); +} + +// RPushx +TEST_F(ListsTest, RPushxTest) { // NOLINT + int64_t ret; + uint64_t num; + + // ***************** Group 1 Test ***************** + // "o" -> "o" -> "o" + std::vector gp1_nodes1{"o", "o", "o"}; + s = db.LPush("GP1_RPUSHX_KEY", gp1_nodes1, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_nodes1.size(), num); + ASSERT_TRUE(len_match(&db, "GP1_RPUSHX_KEY", gp1_nodes1.size())); + ASSERT_TRUE(elements_match(&db, "GP1_RPUSHX_KEY", {"o", "o", "o"})); + + // "o" -> "o" -> "o" -> "x" + s = db.RPushx("GP1_RPUSHX_KEY", {"x"}, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 4); + ASSERT_TRUE(len_match(&db, "GP1_RPUSHX_KEY", 4)); + ASSERT_TRUE(elements_match(&db, "GP1_RPUSHX_KEY", {"o", "o", "o", "x"})); + + // "o" -> "o" -> "o" -> "x" -> "o" -> "o" + std::vector gp1_nodes2{"o", "o"}; + s = db.RPush("GP1_RPUSHX_KEY", gp1_nodes2, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(num, 6); + ASSERT_TRUE(len_match(&db, "GP1_RPUSHX_KEY", 6)); + ASSERT_TRUE(elements_match(&db, "GP1_RPUSHX_KEY", {"o", "o", "o", "x", "o", "o"})); + + // "o" -> "o" -> "o" -> "x" -> "o" -> "o" -> "x" + s = db.RPushx("GP1_RPUSHX_KEY", {"x"}, &num); + ASSERT_EQ(num, 7); + ASSERT_TRUE(len_match(&db, "GP1_RPUSHX_KEY", 7)); + ASSERT_TRUE(elements_match(&db, "GP1_RPUSHX_KEY", {"o", "o", "o", "x", "o", "o", "x"})); + + // ***************** Group 2 Test ***************** + // RPushx not exist key + s = db.RPushx("GP2_RPUSHX_KEY", {"x"}, &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP2_RPUSHX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP2_RPUSHX_KEY", {})); + + // ***************** Group 3 Test ***************** + // "o" -> "o" -> "o" + // RPushx timeout key + std::vector gp3_nodes{"o", "o", "o"}; + s = db.RPush("GP3_RPUSHX_KEY", gp3_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp3_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP3_RPUSHX_KEY", gp3_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP3_RPUSHX_KEY", {"o", "o", "o"})); + ASSERT_TRUE(make_expired(&db, "GP3_RPUSHX_KEY")); + + s = db.RPushx("GP3_RPUSHX_KEY", {"x"}, &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP3_RPUSHX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP3_RPUSHX_KEY", {})); + + // ***************** Group 4 Test ***************** + // RPushx has been deleted key + std::vector gp4_nodes{"o", "o", "o"}; + s = db.RPush("GP4_RPUSHX_KEY", gp4_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp4_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP4_RPUSHX_KEY", gp4_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP4_RPUSHX_KEY", {"o", "o", "o"})); + + // Delete the key + std::vector del_keys = {"GP4_RPUSHX_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kLists].ok()); + + s = db.RPushx("GP4_RPUSHX_KEY", {"x"}, &num); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(len_match(&db, "GP4_RPUSHX_KEY", 0)); + ASSERT_TRUE(elements_match(&db, "GP4_RPUSHX_KEY", {})); + + // ***************** Group 5 Test ***************** + std::vector gp5_nodes{"o", "o", "o"}; + s = db.RPush("GP5_RPUSHX_KEY", gp5_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp5_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP5_RPUSHX_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_RPUSHX_KEY", {"o", "o", "o"})); + + // RPushx multi key + // "o" -> "o" -> "o" -> "x" -> "y" + s = db.RPushx("GP5_RPUSHX_KEY", {"x", "y"}, &num); + gp5_nodes.emplace_back("x"); + gp5_nodes.emplace_back("y"); + + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(len_match(&db, "GP5_RPUSHX_KEY", gp5_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP5_RPUSHX_KEY", gp5_nodes)); + + // ***************** Group 6 Test ***************** + std::vector gp6_nodes{"o", "o", "o"}; + s = db.RPush("GP6_RPUSHX_KEY", gp6_nodes, &num); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp6_nodes.size(), num); + ASSERT_TRUE(len_match(&db, "GP6_RPUSHX_KEY", gp6_nodes.size())); + ASSERT_TRUE(elements_match(&db, "GP6_RPUSHX_KEY", {"o", "o", "o"})); + + // LPushx empty key + s = db.RPushx("GP6_RPUSHX_KEY", {}, &num); + + ASSERT_TRUE(s.ok()); +} + +int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("lists_test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/lock_mgr_test.cc b/tools/pika_migrate/src/storage/tests/lock_mgr_test.cc new file mode 100644 index 0000000000..965ecdb980 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/lock_mgr_test.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "src/lock_mgr.h" +#include "src/mutex_impl.h" + +using namespace storage; + +void Func(LockMgr* mgr, int id, const std::string& key) { + mgr->TryLock(key); + printf("thread %d TryLock %s success\n", id, key.c_str()); + std::this_thread::sleep_for(std::chrono::seconds(3)); + mgr->UnLock(key); + printf("thread %d UnLock %s\n", id, key.c_str()); +} + +int main() { + std::shared_ptr factory = std::make_shared(); + LockMgr mgr(1, 3, factory); + + std::thread t1(Func, &mgr, 1, "key_1"); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + std::thread t2(Func, &mgr, 2, "key_2"); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + std::thread t3(Func, &mgr, 3, "key_3"); + std::thread t4(Func, &mgr, 4, "key_4"); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + auto s = mgr.TryLock("key_1"); + printf("thread main TryLock key_1 ret %s\n", s.ToString().c_str()); + mgr.UnLock("key_1"); + printf("thread main UnLock key_1\n"); + + t1.join(); + t2.join(); + t3.join(); + t4.join(); + return 0; +} diff --git a/tools/pika_migrate/src/storage/tests/lru_cache_test.cc b/tools/pika_migrate/src/storage/tests/lru_cache_test.cc new file mode 100644 index 0000000000..82d3e0e1ae --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/lru_cache_test.cc @@ -0,0 +1,493 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "src/lru_cache.h" +#include "storage/storage.h" + +using namespace storage; + +TEST(LRUCacheTest, TestSetCapacityCase1) { + Status s; + std::string value; + storage::LRUCache lru_cache; + lru_cache.SetCapacity(15); + + // ***************** Step 1 ***************** + // (k5, v5) -> (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1); + lru_cache.Insert("k1", "v1", 1); + lru_cache.Insert("k2", "v2", 2); + lru_cache.Insert("k3", "v3", 3); + lru_cache.Insert("k4", "v4", 4); + lru_cache.Insert("k5", "v5", 5); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 15); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 2 ***************** + // (k5, v5) -> (k4, v4) -> (k3, v3) + lru_cache.SetCapacity(12); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 12); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k3", "v3"}})); + + // ***************** Step 3 ***************** + // (k5, v5) + lru_cache.SetCapacity(5); + ASSERT_EQ(lru_cache.Size(), 1); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}})); + + // ***************** Step 4 ***************** + // (k5, v5) + lru_cache.SetCapacity(15); + ASSERT_EQ(lru_cache.Size(), 1); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}})); + + // ***************** Step 5 ***************** + // empty + lru_cache.SetCapacity(1); + ASSERT_EQ(lru_cache.Size(), 0); + ASSERT_EQ(lru_cache.TotalCharge(), 0); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({})); +} + +TEST(LRUCacheTest, TestLookupCase1) { + Status s; + std::string value; + storage::LRUCache lru_cache; + lru_cache.SetCapacity(5); + + // ***************** Step 1 ***************** + // (k5, v5) -> (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1); + lru_cache.Insert("k1", "v1"); + lru_cache.Insert("k2", "v2"); + lru_cache.Insert("k3", "v3"); + lru_cache.Insert("k4", "v4"); + lru_cache.Insert("k5", "v5"); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 2 ***************** + // (k3, v3) -> (k5, v5) -> (k4, v4) -> (k2, v2) -> (k1, v1); + s = lru_cache.Lookup("k3", &value); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k3", "v3"}, {"k5", "v5"}, {"k4", "v4"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 3 ***************** + // (k1, v1) -> (k3, v3) -> (k5, v5) -> (k4, v4) -> (k2, v2); + s = lru_cache.Lookup("k1", &value); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k1", "v1"}, {"k3", "v3"}, {"k5", "v5"}, {"k4", "v4"}, {"k2", "v2"}})); + + // ***************** Step 4 ***************** + // (k4, v4) -> (k1, v1) -> (k3, v3) -> (k5, v5) -> (k2, v2); + s = lru_cache.Lookup("k4", &value); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k1", "v1"}, {"k3", "v3"}, {"k5", "v5"}, {"k2", "v2"}})); + + // ***************** Step 5 ***************** + // (k5, v5) -> (k4, v4) -> (k1, v1) -> (k3, v3) -> (k2, v2); + s = lru_cache.Lookup("k5", &value); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k1", "v1"}, {"k3", "v3"}, {"k2", "v2"}})); + + // ***************** Step 6 ***************** + // (k5, v5) -> (k4, v4) -> (k1, v1) -> (k3, v3) -> (k2, v2); + s = lru_cache.Lookup("k5", &value); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k1", "v1"}, {"k3", "v3"}, {"k2", "v2"}})); +} + +TEST(LRUCacheTest, TestInsertCase1) { + Status s; + storage::LRUCache lru_cache; + lru_cache.SetCapacity(3); + + // ***************** Step 1 ***************** + // (k1, v1) + s = lru_cache.Insert("k1", "v1"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 1); + ASSERT_EQ(lru_cache.TotalCharge(), 1); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k1", "v1"}})); + + // ***************** Step 2 ***************** + // (k2, v2) -> (k1, v1) + s = lru_cache.Insert("k2", "v2"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 2); + ASSERT_EQ(lru_cache.TotalCharge(), 2); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 3 ***************** + // (k3, v3) -> (k2, v2) -> (k1, v1) + s = lru_cache.Insert("k3", "v3"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 3); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 4 ***************** + // (k4, v4) -> (k3, v3) -> (k2, v2) + s = lru_cache.Insert("k4", "v4"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 3); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}})); + + // ***************** Step 5 ***************** + // (k5, v5) -> (k4, v4) -> (k3, v3) + s = lru_cache.Insert("k5", "v5"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 3); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k3", "v3"}})); +} + +TEST(LRUCacheTest, TestInsertCase2) { + Status s; + storage::LRUCache lru_cache; + lru_cache.SetCapacity(5); + + // ***************** Step 1 ***************** + // (k5, v5) -> (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1) + lru_cache.Insert("k1", "v1"); + lru_cache.Insert("k2", "v2"); + lru_cache.Insert("k3", "v3"); + lru_cache.Insert("k4", "v4"); + lru_cache.Insert("k5", "v5"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 2 ***************** + // (k3, v3) -> (k5, v5) -> (k4, v4) -> (k2, v2) -> (k1, v1) + s = lru_cache.Insert("k3", "v3"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k3", "v3"}, {"k5", "v5"}, {"k4", "v4"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 3 ***************** + // (k2, v2) -> (k3, v3) -> (k5, v5) -> (k4, v4) -> (k1, v1) + s = lru_cache.Insert("k2", "v2"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k2", "v2"}, {"k3", "v3"}, {"k5", "v5"}, {"k4", "v4"}, {"k1", "v1"}})); + + // ***************** Step 4 ***************** + // (k1, v1) -> (k2, v2) -> (k3, v3) -> (k5, v5) -> (k4, v4) + s = lru_cache.Insert("k1", "v1"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k1", "v1"}, {"k2", "v2"}, {"k3", "v3"}, {"k5", "v5"}, {"k4", "v4"}})); + + // ***************** Step 5 ***************** + // (k4, v4) -> (k1, v1) -> (k2, v2) -> (k3, v3) -> (k5, v5) + s = lru_cache.Insert("k4", "v4"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k1", "v1"}, {"k2", "v2"}, {"k3", "v3"}, {"k5", "v5"}})); + + // ***************** Step 6 ***************** + // (k4, v4) -> (k1, v1) -> (k2, v2) -> (k3, v3) -> (k5, v5) + s = lru_cache.Insert("k4", "v4"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k1", "v1"}, {"k2", "v2"}, {"k3", "v3"}, {"k5", "v5"}})); + + // ***************** Step 6 ***************** + // (k4, v4) -> (k1, v1) -> (k2, v2) -> (k3, v3) -> (k5, v5) + s = lru_cache.Insert("k0", "v0"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k0", "v0"}, {"k4", "v4"}, {"k1", "v1"}, {"k2", "v2"}, {"k3", "v3"}})); +} + +TEST(LRUCacheTest, TestInsertCase3) { + Status s; + storage::LRUCache lru_cache; + lru_cache.SetCapacity(10); + + // ***************** Step 1 ***************** + // (k1, v1) + s = lru_cache.Insert("k1", "v1"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 1); + ASSERT_EQ(lru_cache.TotalCharge(), 1); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k1", "v1"}})); + + // ***************** Step 2 ***************** + // (k2, v2) -> (k1, v1) + s = lru_cache.Insert("k2", "v2", 2); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 2); + ASSERT_EQ(lru_cache.TotalCharge(), 3); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 3 ***************** + // (k3, v3) -> (k2, v1) -> (k1, v1) + s = lru_cache.Insert("k3", "v3", 3); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 6); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 4 ***************** + // (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1) + s = lru_cache.Insert("k4", "v4", 4); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 4); + ASSERT_EQ(lru_cache.TotalCharge(), 10); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 5 ***************** + // (k5, v5) -> (k4, v4) + s = lru_cache.Insert("k5", "v5", 5); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 2); + ASSERT_EQ(lru_cache.TotalCharge(), 9); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}})); + + // ***************** Step 6 ***************** + // (k6, v6) + s = lru_cache.Insert("k6", "v6", 6); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 1); + ASSERT_EQ(lru_cache.TotalCharge(), 6); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k6", "v6"}})); +} + +TEST(LRUCacheTest, TestInsertCase4) { + Status s; + storage::LRUCache lru_cache; + lru_cache.SetCapacity(10); + + // ***************** Step 1 ***************** + // (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1) + lru_cache.Insert("k1", "v1", 1); + lru_cache.Insert("k2", "v2", 2); + lru_cache.Insert("k3", "v3", 3); + lru_cache.Insert("k4", "v4", 4); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 4); + ASSERT_EQ(lru_cache.TotalCharge(), 10); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 2 ***************** + // empty + lru_cache.Insert("k11", "v11", 11); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 0); + ASSERT_EQ(lru_cache.TotalCharge(), 0); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({})); + + // ***************** Step 3 ***************** + // empty + lru_cache.Insert("k11", "v11", 11); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 0); + ASSERT_EQ(lru_cache.TotalCharge(), 0); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({})); + + // ***************** Step 4 ***************** + // (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1) + lru_cache.Insert("k1", "v1", 1); + lru_cache.Insert("k2", "v2", 2); + lru_cache.Insert("k3", "v3", 3); + lru_cache.Insert("k4", "v4", 4); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 4); + ASSERT_EQ(lru_cache.TotalCharge(), 10); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 5 ***************** + // (k5, k5) -> (k4, v4) + lru_cache.Insert("k5", "v5", 5); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 2); + ASSERT_EQ(lru_cache.TotalCharge(), 9); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}})); + + // ***************** Step 6 ***************** + // (k1, v1) -> (k5, k5) -> (k4, v4) + lru_cache.Insert("k1", "v1", 1); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 10); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k1", "v1"}, {"k5", "v5"}, {"k4", "v4"}})); + + // ***************** Step 7 ***************** + // (k5, v5) -> (k1, k1) -> (k4, v4) + lru_cache.Insert("k5", "v5", 5); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 10); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k1", "v1"}, {"k4", "v4"}})); + + // ***************** Step 8 ***************** + // (k6, v6) + lru_cache.Insert("k6", "v6", 6); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 1); + ASSERT_EQ(lru_cache.TotalCharge(), 6); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k6", "v6"}})); + + // ***************** Step 8 ***************** + // (k2, v2) -> (k6, v6) + lru_cache.Insert("k2", "v2", 2); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 2); + ASSERT_EQ(lru_cache.TotalCharge(), 8); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k2", "v2"}, {"k6", "v6"}})); + + // ***************** Step 9 ***************** + // (k1, v1) -> (k2, v2) -> (k6, v6) + lru_cache.Insert("k1", "v1", 1); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 9); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k1", "v1"}, {"k2", "v2"}, {"k6", "v6"}})); + + // ***************** Step 10 ***************** + // (k3, v3) -> (k1, v1) -> (k2, v2) + lru_cache.Insert("k3", "v3", 3); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 6); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k3", "v3"}, {"k1", "v1"}, {"k2", "v2"}})); +} + +TEST(LRUCacheTest, TestRemoveCase1) { + Status s; + storage::LRUCache lru_cache; + lru_cache.SetCapacity(5); + + // ***************** Step 1 ***************** + // (k5, v5) -> (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1); + lru_cache.Insert("k1", "v1"); + lru_cache.Insert("k2", "v2"); + lru_cache.Insert("k3", "v3"); + lru_cache.Insert("k4", "v4"); + lru_cache.Insert("k5", "v5"); + ASSERT_EQ(lru_cache.Size(), 5); + ASSERT_EQ(lru_cache.TotalCharge(), 5); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k5", "v5"}, {"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 2 ***************** + // (k4, v4) -> (k3, v3) -> (k2, v2) -> (k1, v1); + s = lru_cache.Remove("k5"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 4); + ASSERT_EQ(lru_cache.TotalCharge(), 4); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}, {"k1", "v1"}})); + + // ***************** Step 3 ***************** + // (k4, v4) -> (k3, v3) -> (k2, v2) + s = lru_cache.Remove("k1"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 3); + ASSERT_EQ(lru_cache.TotalCharge(), 3); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k3", "v3"}, {"k2", "v2"}})); + + // ***************** Step 4 ***************** + // (k4, v4) -> (k2, v2) + s = lru_cache.Remove("k3"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 2); + ASSERT_EQ(lru_cache.TotalCharge(), 2); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}, {"k2", "v2"}})); + + // ***************** Step 5 ***************** + // (k4, v4) + s = lru_cache.Remove("k2"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 1); + ASSERT_EQ(lru_cache.TotalCharge(), 1); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({{"k4", "v4"}})); + + // ***************** Step 6 ***************** + // empty + s = lru_cache.Remove("k4"); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(lru_cache.Size(), 0); + ASSERT_EQ(lru_cache.TotalCharge(), 0); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({})); + + // ***************** Step 7 ***************** + // empty + s = lru_cache.Remove("k4"); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(lru_cache.Size(), 0); + ASSERT_EQ(lru_cache.TotalCharge(), 0); + ASSERT_TRUE(lru_cache.LRUAndHandleTableConsistent()); + ASSERT_TRUE(lru_cache.LRUAsExpected({})); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/options_test.cc b/tools/pika_migrate/src/storage/tests/options_test.cc new file mode 100644 index 0000000000..b33177ce2d --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/options_test.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "storage/storage.h" + +using namespace storage; + +class StorageOptionsTest : public ::testing::Test { + public: + StorageOptionsTest() = default; + ~StorageOptionsTest() override = default; + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + StorageOptions storage_options; + storage::Status s; +}; + +// ResetOptions +TEST_F(StorageOptionsTest, ResetOptionsTest) { + std::unordered_map cf_options_map{{"write_buffer_size", "4096"}, + {"max_write_buffer_number", "10"}}; + s = storage_options.ResetOptions(OptionType::kColumnFamily, cf_options_map); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(storage_options.options.write_buffer_size, 4096); + ASSERT_EQ(storage_options.options.max_write_buffer_number, 10); + + std::unordered_map invalid_cf_options_map{{"write_buffer_size", "abc"}, + {"max_write_buffer_number", "0x33"}}; + s = storage_options.ResetOptions(OptionType::kColumnFamily, invalid_cf_options_map); + ASSERT_FALSE(s.ok()); + ASSERT_EQ(storage_options.options.write_buffer_size, 4096); + ASSERT_EQ(storage_options.options.max_write_buffer_number, 10); + + std::unordered_map db_options_map{{"max_open_files", "16"}, + {"max_background_compactions", "32"}}; + s = storage_options.ResetOptions(OptionType::kDB, db_options_map); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(storage_options.options.max_open_files, 16); + ASSERT_EQ(storage_options.options.max_background_compactions, 32); + + std::unordered_map invalid_db_options_map{{"max_open_files", "a"}, + {"max_background_compactions", "bac"}}; + s = storage_options.ResetOptions(OptionType::kDB, invalid_db_options_map); + ASSERT_FALSE(s.ok()); + ASSERT_EQ(storage_options.options.max_open_files, 16); + ASSERT_EQ(storage_options.options.max_background_compactions, 32); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/sets_test.cc b/tools/pika_migrate/src/storage/tests/sets_test.cc new file mode 100644 index 0000000000..5b331b4781 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/sets_test.cc @@ -0,0 +1,2254 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" +#include "storage/storage.h" +#include "storage/util.h" + +using namespace storage; + +class SetsTest : public ::testing::Test { + public: + SetsTest() = default; + ~SetsTest() override = default; + + void SetUp() override { + std::string path = "./db/sets"; + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + } + + void TearDown() override { + std::string path = "./db/sets"; + DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +static bool members_match(storage::Storage* const db, const Slice& key, + const std::vector& expect_members) { + std::vector mm_out; + Status s = db->SMembers(key, &mm_out); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (mm_out.size() != expect_members.size()) { + return false; + } + if (s.IsNotFound() && expect_members.empty()) { + return true; + } + for (const auto& member : expect_members) { + if (find(mm_out.begin(), mm_out.end(), member) == mm_out.end()) { + return false; + } + } + return true; +} + +static bool members_match(const std::vector& mm_out, const std::vector& expect_members) { + if (mm_out.size() != expect_members.size()) { + return false; + } + for (const auto& member : expect_members) { + if (find(mm_out.begin(), mm_out.end(), member) == mm_out.end()) { + return false; + } + } + return true; +} + +static bool members_contains(const std::vector& mm_out, const std::vector& total_members) { + for (const auto& member : mm_out) { + if (find(total_members.begin(), total_members.end(), member) == total_members.end()) { + return false; + } + } + return true; +} + +static bool members_uniquen(const std::vector& members) { + for (int32_t idx = 0; idx < members.size(); ++idx) { + for (int32_t sidx = idx + 1; sidx < members.size(); ++sidx) { + if (members[idx] == members[sidx]) { + return false; + } + } + } + return true; +} + +static bool size_match(storage::Storage* const db, const Slice& key, int32_t expect_size) { + int32_t size = 0; + Status s = db->SCard(key, &size); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (s.IsNotFound() && (expect_size == 0)) { + return true; + } + return size == expect_size; +} + +static bool make_expired(storage::Storage* const db, const Slice& key) { + std::map type_status; + int ret = db->Expire(key, 1); + if ((ret == 0) || !type_status[storage::DataType::kSets].ok()) { + return false; + } + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + return true; +} + +// SAdd +TEST_F(SetsTest, SAddTest) { // NOLINT + int32_t ret = 0; + std::vector members1{"a", "b", "c", "b"}; + s = db.SAdd("SADD_KEY", members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "SADD_KEY", 3)); + ASSERT_TRUE(members_match(&db, "SADD_KEY", {"a", "b", "c"})); + + std::vector members2{"d", "e"}; + s = db.SAdd("SADD_KEY", members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "SADD_KEY", 5)); + ASSERT_TRUE(members_match(&db, "SADD_KEY", {"a", "b", "c", "d", "e"})); + + // The key has timeout + ASSERT_TRUE(make_expired(&db, "SADD_KEY")); + ASSERT_TRUE(size_match(&db, "SADD_KEY", 0)); + + std::vector members3{"a", "b"}; + s = db.SAdd("SADD_KEY", members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "SADD_KEY", 2)); + ASSERT_TRUE(members_match(&db, "SADD_KEY", {"a", "b"})); + + // Delete the key + std::vector del_keys = {"SADD_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kSets].ok()); + ASSERT_TRUE(size_match(&db, "SADD_KEY", 0)); + ASSERT_TRUE(members_match(&db, "SADD_KEY", {})); + + std::vector members4{"a", "x", "l"}; + s = db.SAdd("SADD_KEY", members4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "SADD_KEY", 3)); + ASSERT_TRUE(members_match(&db, "SADD_KEY", {"a", "x", "l"})); + + std::vector members5{"a", "x", "l", "z"}; + s = db.SAdd("SADD_KEY", members5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "SADD_KEY", 4)); + ASSERT_TRUE(members_match(&db, "SADD_KEY", {"a", "x", "l", "z"})); +} + +// SCard +TEST_F(SetsTest, SCardTest) { // NOLINT + int32_t ret = 0; + std::vector members{"MM1", "MM2", "MM3"}; + s = db.SAdd("SCARD_KEY", members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + s = db.SCard("SCARD_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); +} + +// SDiff +TEST_F(SetsTest, SDiffTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SDIFF key1 key2 key3 = {b, d} + std::vector gp1_members1{"a", "b", "c", "d"}; + std::vector gp1_members2{"c"}; + std::vector gp1_members3{"a", "c", "e"}; + s = db.SAdd("GP1_SDIFF_KEY1", gp1_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP1_SDIFF_KEY2", gp1_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP1_SDIFF_KEY3", gp1_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp1_keys{"GP1_SDIFF_KEY1", "GP1_SDIFF_KEY2", "GP1_SDIFF_KEY3"}; + std::vector gp1_members_out; + s = db.SDiff(gp1_keys, &gp1_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp1_members_out, {"b", "d"})); + + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} (expire) + // SDIFF key1 key2 key3 = {a, b, d} + std::map gp1_type_status; + db.Expire("GP1_SDIFF_KEY3", 1); + ASSERT_TRUE(gp1_type_status[storage::DataType::kSets].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + + gp1_members_out.clear(); + s = db.SDiff(gp1_keys, &gp1_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp1_members_out, {"a", "b", "d"})); + + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} (expire key) + // key4 = {} (not exist key) + // SDIFF key1 key2 key3 key4 = {a, b, d} + gp1_keys.emplace_back("GP1_SDIFF_KEY4"); + gp1_members_out.clear(); + s = db.SDiff(gp1_keys, &gp1_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp1_members_out, {"a", "b", "d"})); + + // ***************** Group 2 Test ***************** + // key1 = {} + // key2 = {c} + // key3 = {a, c, e} + // SDIFF key1 key2 key3 = {} + std::vector gp2_members1{}; + std::vector gp2_members2{"c"}; + std::vector gp2_members3{"a", "c", "e"}; + s = db.SAdd("GP2_SDIFF_KEY1", gp2_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.SAdd("GP2_SDIFF_KEY2", gp2_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP2_SDIFF_KEY3", gp2_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp2_keys{"GP2_SDIFF_KEY1", "GP2_SDIFF_KEY2", "GP2_SDIFF_KEY3"}; + std::vector gp2_members_out; + s = db.SDiff(gp2_keys, &gp2_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp2_members_out, {})); + + // ***************** Group 3 Test ***************** + // key1 = {a, b, c, d} + // SDIFF key1 = {a, b, c, d} + std::vector gp3_members1{"a", "b", "c", "d"}; + s = db.SAdd("GP3_SDIFF_KEY1", gp3_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector gp3_keys{"GP3_SDIFF_KEY1"}; + std::vector gp3_members_out; + s = db.SDiff(gp3_keys, &gp3_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp3_members_out, {"a", "b", "c", "d"})); + + // ***************** Group 4 Test ***************** + // key1 = {a, b, c, d} (expire key); + // key2 = {c} + // key3 = {a, c, e} + // SDIFF key1 key2 key3 = {} + std::vector gp4_members1{"a", "b", "c", "d"}; + std::vector gp4_members2{"c"}; + std::vector gp4_members3{"a", "c", "e"}; + s = db.SAdd("GP4_SDIFF_KEY1", gp4_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP4_SDIFF_KEY2", gp4_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP4_SDIFF_KEY3", gp4_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + ASSERT_TRUE(make_expired(&db, "GP4_SDIFF_KEY1")); + + std::vector gp4_keys{"GP4_SDIFF_KEY1", "GP4_SDIFF_KEY2", "GP4_SDIFF_KEY3"}; + std::vector gp4_members_out; + s = db.SDiff(gp4_keys, &gp4_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp4_members_out, {})); + + // ***************** Group 5 Test ***************** + // key1 = {a, b, c, d} (key1 is empty key) + // key2 = {c} + // key3 = {a, c, e} + // SDIFF key1 key2 key3 = {b, d} + std::vector gp5_members1{"a", "b", "c", "d"}; + std::vector gp5_members2{"c"}; + std::vector gp5_members3{"a", "c", "e"}; + s = db.SAdd("", gp5_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP5_SDIFF_KEY2", gp5_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP5_SDIFF_KEY3", gp5_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp5_keys{"", "GP5_SDIFF_KEY2", "GP5_SDIFF_KEY3"}; + std::vector gp5_members_out; + s = db.SDiff(gp5_keys, &gp5_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp5_members_out, {"b", "d"})); + + // double "GP5_SDIFF_KEY3" + gp5_keys.emplace_back("GP5_SDIFF_KEY3"); + gp5_members_out.clear(); + s = db.SDiff(gp5_keys, &gp5_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp5_members_out, {"b", "d"})); + + // ***************** Group 6 Test ***************** + // empty keys + std::vector gp6_keys; + std::vector gp6_members_out; + s = db.SDiff(gp6_keys, &gp6_members_out); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_TRUE(members_match(gp6_members_out, {})); +} + +// SDiffstore +TEST_F(SetsTest, SDiffstoreTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + // destination = {}; + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SDIFFSTORE destination key1 key2 key3 + // destination = {b, d} + std::vector gp1_members1{"a", "b", "c", "d"}; + std::vector gp1_members2{"c"}; + std::vector gp1_members3{"a", "c", "e"}; + s = db.SAdd("GP1_SDIFFSTORE_KEY1", gp1_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP1_SDIFFSTORE_KEY2", gp1_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP1_SDIFFSTORE_KEY3", gp1_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp1_members_out; + std::vector value_to_dest; + std::vector gp1_keys{"GP1_SDIFFSTORE_KEY1", "GP1_SDIFFSTORE_KEY2", "GP1_SDIFFSTORE_KEY3"}; + + s = db.SDiffstore("GP1_SDIFFSTORE_DESTINATION1", gp1_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP1_SDIFFSTORE_DESTINATION1", 2)); + ASSERT_TRUE(members_match(&db, "GP1_SDIFFSTORE_DESTINATION1", {"b", "d"})); + + // destination = {}; + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} (expire) + // SDIFFSTORE destination key1 key2 key3 + // destination = {a, b, d} + std::map gp1_type_status; + db.Expire("GP1_SDIFFSTORE_KEY3", 1); + ASSERT_TRUE(gp1_type_status[storage::DataType::kSets].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + + gp1_members_out.clear(); + s = db.SDiffstore("GP1_SDIFFSTORE_DESTINATION2", gp1_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP1_SDIFFSTORE_DESTINATION2", 3)); + ASSERT_TRUE(members_match(&db, "GP1_SDIFFSTORE_DESTINATION2", {"a", "b", "d"})); + + // destination = {}; + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} (expire key) + // key4 = {} (not exist key) + // SDIFFSTORE destination key1 key2 key3 + // destination = {a, b, d} + gp1_keys.emplace_back("GP1_SDIFFSTORE_KEY4"); + gp1_members_out.clear(); + s = db.SDiffstore("GP1_SDIFFSTORE_DESTINATION3", gp1_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP1_SDIFFSTORE_DESTINATION3", 3)); + ASSERT_TRUE(members_match(&db, "GP1_SDIFFSTORE_DESTINATION3", {"a", "b", "d"})); + + // ***************** Group 2 Test ***************** + // destination = {}; + // key1 = {} + // key2 = {c} + // key3 = {a, c, e} + // SDIFFSTORE destination key1 key2 key3 + // destination = {} + std::vector gp2_members1{}; + std::vector gp2_members2{"c"}; + std::vector gp2_members3{"a", "c", "e"}; + s = db.SAdd("GP2_SDIFFSTORE_KEY1", gp2_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.SAdd("GP2_SDIFFSTORE_KEY2", gp2_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP2_SDIFFSTORE_KEY3", gp2_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp2_keys{"GP2_SDIFFSTORE_KEY1", "GP2_SDIFFSTORE_KEY2", "GP2_SDIFFSTORE_KEY3"}; + std::vector gp2_members_out; + s = db.SDiffstore("GP2_SDIFFSTORE_DESTINATION1", gp2_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP2_SDIFFSTORE_DESTINATION1", 0)); + ASSERT_TRUE(members_match(&db, "GP2_SDIFFSTORE_DESTINATION1", {})); + + // ***************** Group 3 Test ***************** + // destination = {}; + // key1 = {a, b, c, d} + // SDIFFSTORE destination key1 + // destination = {a, b, c, d} + std::vector gp3_members1{"a", "b", "c", "d"}; + s = db.SAdd("GP3_SDIFFSTORE_KEY1", gp3_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector gp3_keys{"GP3_SDIFFSTORE_KEY1"}; + std::vector gp3_members_out; + s = db.SDiffstore("GP3_SDIFFSTORE_DESTINATION1", gp3_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(size_match(&db, "GP3_SDIFFSTORE_DESTINATION1", 4)); + ASSERT_TRUE(members_match(&db, "GP3_SDIFFSTORE_DESTINATION1", {"a", "b", "c", "d"})); + + // ***************** Group 4 Test ***************** + // destination = {}; + // key1 = {a, b, c, d} (expire key); + // key2 = {c} + // key3 = {a, c, e} + // SDIFFSTORE destination key1 key2 key3 + // destination = {} + std::vector gp4_members1{"a", "b", "c", "d"}; + std::vector gp4_members2{"c"}; + std::vector gp4_members3{"a", "c", "e"}; + s = db.SAdd("GP4_SDIFFSTORE_KEY1", gp4_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP4_SDIFFSTORE_KEY2", gp4_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP4_SDIFFSTORE_KEY3", gp4_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + ASSERT_TRUE(make_expired(&db, "GP4_SDIFFSTORE_KEY1")); + + std::vector gp4_keys{"GP4_SDIFFSTORE_KEY1", "GP4_SDIFFSTORE_KEY2", "GP4_SDIFFSTORE_KEY3"}; + std::vector gp4_members_out; + s = db.SDiffstore("GP4_SDIFFSTORE_DESTINATION1", gp4_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP4_SDIFFSTORE_DESTINATION1", 0)); + ASSERT_TRUE(members_match(&db, "GP4_SDIFFSTORE_DESTINATION1", {})); + + // ***************** Group 5 Test ***************** + // the destination already exists, it is overwritten + // destination = {a, x, l} + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SDIFFSTORE destination key1 key2 key3 + // destination = {b, d} + std::vector gp5_destination_members{"a", "x", "l"}; + std::vector gp5_members1{"a", "b", "c", "d"}; + std::vector gp5_members2{"c"}; + std::vector gp5_members3{"a", "c", "e"}; + s = db.SAdd("GP5_SDIFFSTORE_DESTINATION1", gp5_destination_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + s = db.SAdd("GP5_SDIFFSTORE_KEY1", gp5_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP5_SDIFFSTORE_KEY2", gp5_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP5_SDIFFSTORE_KEY3", gp5_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp5_keys{"GP5_SDIFFSTORE_KEY1", "GP5_SDIFFSTORE_KEY2", "GP5_SDIFFSTORE_KEY3"}; + s = db.SDiffstore("GP5_SDIFFSTORE_DESTINATION1", gp5_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP5_SDIFFSTORE_DESTINATION1", 2)); + ASSERT_TRUE(members_match(&db, "GP5_SDIFFSTORE_DESTINATION1", {"b", "d"})); + + // ***************** Group 6 Test ***************** + // test destination equal key1 (the destination already exists, it is + // overwritten) + // destination = {a, b, c, d}; + // key2 = {c} + // key3 = {a, c, e} + // SDIFFSTORE destination destination key2 key3 + // destination = {b, d} + std::vector gp6_destination_members{"a", "b", "c", "d"}; + std::vector gp6_members2{"c"}; + std::vector gp6_members3{"a", "c", "e"}; + s = db.SAdd("GP6_SDIFFSTORE_DESTINATION1", gp6_destination_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP6_SDIFFSTORE_KEY2", gp6_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP6_SDIFFSTORE_KEY3", gp6_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp6_keys{"GP6_SDIFFSTORE_DESTINATION1", "GP6_SDIFFSTORE_KEY2", "GP6_SDIFFSTORE_KEY3"}; + s = db.SDiffstore("GP6_SDIFFSTORE_DESTINATION1", gp6_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP6_SDIFFSTORE_DESTINATION1", 2)); + ASSERT_TRUE(members_match(&db, "GP6_SDIFFSTORE_DESTINATION1", {"b", "d"})); + + // ***************** Group 7 Test ***************** + // test destination exist but timeout (the destination already exists, it is + // overwritten) + // destination = {a, x, l}; + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SDIFFSTORE destination key1 key2 key3 + // destination = {b, d} + std::vector gp7_destination_members{"a", "x", "l"}; + std::vector gp7_members1{"a", "b", "c", "d"}; + std::vector gp7_members2{"c"}; + std::vector gp7_members3{"a", "c", "e"}; + s = db.SAdd("GP7_SDIFFSTORE_DESTINATION1", gp7_destination_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + s = db.SAdd("GP7_SDIFFSTORE_KEY1", gp7_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP7_SDIFFSTORE_KEY2", gp7_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP7_SDIFFSTORE_KEY3", gp7_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + ASSERT_TRUE(make_expired(&db, "GP7_SDIFFSTORE_DESTINATION1")); + + std::vector gp7_keys{"GP7_SDIFFSTORE_KEY1", "GP7_SDIFFSTORE_KEY2", "GP7_SDIFFSTORE_KEY3"}; + s = db.SDiffstore("GP7_SDIFFSTORE_DESTINATION1", gp7_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP7_SDIFFSTORE_DESTINATION1", 2)); + ASSERT_TRUE(members_match(&db, "GP7_SDIFFSTORE_DESTINATION1", {"b", "d"})); +} + +// SInter +TEST_F(SetsTest, SInterTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SINTER key1 key2 key3 = {a, c} + std::vector gp1_members1{"a", "b", "c", "d"}; + std::vector gp1_members2{"a", "c"}; + std::vector gp1_members3{"a", "c", "e"}; + s = db.SAdd("GP1_SINTER_KEY1", gp1_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP1_SINTER_KEY2", gp1_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP1_SINTER_KEY3", gp1_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp1_keys{"GP1_SINTER_KEY1", "GP1_SINTER_KEY2", "GP1_SINTER_KEY3"}; + std::vector gp1_members_out; + s = db.SInter(gp1_keys, &gp1_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp1_members_out, {"a", "c"})); + + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} (expire) + // SINTER key1 key2 key3 = {} + ASSERT_TRUE(make_expired(&db, "GP1_SINTER_KEY3")); + + gp1_members_out.clear(); + s = db.SInter(gp1_keys, &gp1_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp1_members_out, {})); + + // ***************** Group 2 Test ***************** + // key1 = {a, b, c, d} + // key2 = {c} + // key3 = {a, c, e} + // SINTER key1 key2 key3 not_exist_key = {} + std::vector gp2_members1{"a", "b", "c", "d"}; + std::vector gp2_members2{"c"}; + std::vector gp2_members3{"a", "c", "e"}; + s = db.SAdd("GP2_SINTER_KEY1", gp2_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP2_SINTER_KEY2", gp2_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + s = db.SAdd("GP2_SINTER_KEY3", gp2_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp2_keys{"GP2_SINTER_KEY1", "GP2_SINTER_KEY2", "GP2_SINTER_KEY3", "NOT_EXIST_KEY"}; + std::vector gp2_members_out; + s = db.SInter(gp2_keys, &gp2_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp2_members_out, {})); + + // ***************** Group 3 Test ***************** + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {} + // SINTER key1 key2 key3 = {} + std::vector gp3_members1{"a", "b", "c", "d"}; + std::vector gp3_members2{"a", "c"}; + std::vector gp3_members3{"a", "b", "c"}; + s = db.SAdd("GP3_SINTER_KEY1", gp3_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP3_SINTER_KEY2", gp3_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP3_SINTER_KEY3", gp3_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.SRem("GP3_SINTER_KEY3", gp3_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + s = db.SCard("GP3_SINTER_KEY3", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + std::vector gp3_members_out; + s = db.SMembers("GP3_SINTER_KEY3", &gp3_members_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(members_match(gp3_members_out, {})); + + gp3_members_out.clear(); + std::vector gp3_keys{"GP3_SINTER_KEY1", "GP3_SINTER_KEY2", "GP3_SINTER_KEY3"}; + s = db.SInter(gp3_keys, &gp3_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp3_members_out, {})); + + // ***************** Group 4 Test ***************** + // key1 = {} + // key2 = {a, c} + // key3 = {a, b, c, d} + // SINTER key1 key2 key3 = {} + std::vector gp4_members1{"a", "b", "c", "d"}; + std::vector gp4_members2{"a", "c"}; + std::vector gp4_members3{"a", "b", "c", "d"}; + s = db.SAdd("GP4_SINTER_KEY1", gp4_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP4_SINTER_KEY2", gp4_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP4_SINTER_KEY3", gp4_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.SRem("GP4_SINTER_KEY1", gp4_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SCard("GP4_SINTER_KEY1", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + std::vector gp4_members_out; + s = db.SMembers("GP4_SINTER_KEY1", &gp4_members_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(gp4_members_out.size(), 0); + + gp4_members_out.clear(); + std::vector gp4_keys{"GP4_SINTER_KEY1", "GP4_SINTER_KEY2", "GP4_SINTER_KEY3"}; + s = db.SInter(gp4_keys, &gp4_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp4_members_out, {})); + + // ***************** Group 5 Test ***************** + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, b, c} + // SINTER key1 key2 key2 key3 = {a, c} + std::vector gp5_members1{"a", "b", "c", "d"}; + std::vector gp5_members2{"a", "c"}; + std::vector gp5_members3{"a", "b", "c"}; + s = db.SAdd("GP5_SINTER_KEY1", gp5_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP5_SINTER_KEY2", gp5_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP5_SINTER_KEY3", gp5_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp5_members_out; + std::vector gp5_keys{"GP5_SINTER_KEY1", "GP5_SINTER_KEY2", "GP5_SINTER_KEY2", "GP5_SINTER_KEY3"}; + s = db.SInter(gp5_keys, &gp5_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp5_members_out, {"a", "c"})); +} + +// SInterstore +TEST_F(SetsTest, SInterstoreTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SINTERSTORE destination key1 key2 key3 + // destination = {a, c} + std::vector gp1_members1{"a", "b", "c", "d"}; + std::vector gp1_members2{"a", "c"}; + std::vector gp1_members3{"a", "c", "e"}; + s = db.SAdd("GP1_SINTERSTORE_KEY1", gp1_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP1_SINTERSTORE_KEY2", gp1_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP1_SINTERSTORE_KEY3", gp1_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp1_keys{"GP1_SINTERSTORE_KEY1", "GP1_SINTERSTORE_KEY2", "GP1_SINTERSTORE_KEY3"}; + std::vector value_to_dest; + s = db.SInterstore("GP1_SINTERSTORE_DESTINATION1", gp1_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP1_SINTERSTORE_DESTINATION1", 2)); + ASSERT_TRUE(members_match(&db, "GP1_SINTERSTORE_DESTINATION1", {"a", "c"})); + + // ***************** Group 2 Test ***************** + // the destination already exists, it is overwritten. + // destination = {a, x, l} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SINTERSTORE destination key1 key2 key3 + // destination = {a, c} + std::vector gp2_destination_members{"a", "x", "l"}; + std::vector gp2_members1{"a", "b", "c", "d"}; + std::vector gp2_members2{"a", "c"}; + std::vector gp2_members3{"a", "c", "e"}; + s = db.SAdd("GP2_SINTERSTORE_DESTINATION1", gp2_destination_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + s = db.SAdd("GP2_SINTERSTORE_KEY1", gp2_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP2_SINTERSTORE_KEY2", gp2_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP2_SINTERSTORE_KEY3", gp2_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp2_keys{"GP2_SINTERSTORE_KEY1", "GP2_SINTERSTORE_KEY2", "GP2_SINTERSTORE_KEY3"}; + s = db.SInterstore("GP2_SINTERSTORE_DESTINATION1", gp2_keys, value_to_dest, &ret); + + + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP2_SINTERSTORE_DESTINATION1", 2)); + ASSERT_TRUE(members_match(&db, "GP2_SINTERSTORE_DESTINATION1", {"a", "c"})); + + // ***************** Group 3 Test ***************** + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SINTERSTORE destination key1 key2 key3 not_exist_key + // destination = {} + std::vector gp3_members1{"a", "b", "c", "d"}; + std::vector gp3_members2{"a", "c"}; + std::vector gp3_members3{"a", "c", "e"}; + s = db.SAdd("GP3_SINTERSTORE_KEY1", gp3_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP3_SINTERSTORE_KEY2", gp3_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP3_SINTERSTORE_KEY3", gp3_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp3_keys{"GP3_SINTERSTORE_KEY1", "GP3_SINTERSTORE_KEY2", "GP3_SINTERSTORE_KEY3", + "GP3_SINTERSTORE_NOT_EXIST_KEY"}; + s = db.SInterstore("GP3_SINTERSTORE_DESTINATION1", gp3_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP3_SINTERSTORE_DESTINATION1", 0)); + ASSERT_TRUE(members_match(&db, "GP3_SINTERSTORE_DESTINATION1", {})); + + // ***************** Group 4 Test ***************** + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} (expire key); + // key3 = {a, c, e} + // SINTERSTORE destination key1 key2 key3 + // destination = {} + std::vector gp4_members1{"a", "b", "c", "d"}; + std::vector gp4_members2{"a", "c"}; + std::vector gp4_members3{"a", "c", "e"}; + s = db.SAdd("GP4_SINTERSTORE_KEY1", gp4_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP4_SINTERSTORE_KEY2", gp4_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP4_SINTERSTORE_KEY3", gp4_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + ASSERT_TRUE(make_expired(&db, "GP4_SINTERSTORE_KEY2")); + + std::vector gp4_keys{"GP4_SINTERSTORE_KEY1", "GP4_SINTERSTORE_KEY2", "GP4_SINTERSTORE_KEY3"}; + s = db.SInterstore("GP4_SINTERSTORE_DESTINATION1", gp4_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP4_SINTERSTORE_DESTINATION1", 0)); + ASSERT_TRUE(members_match(&db, "GP4_SINTERSTORE_DESTINATION1", {})); + + // ***************** Group 5 Test ***************** + // destination = {} + // key1 = {a, b, c, d} (expire key); + // key2 = {a, c} + // key3 = {a, c, e} + // SINTERSTORE destination key1 key2 key3 + // destination = {} + std::vector gp5_members1{"a", "b", "c", "d"}; + std::vector gp5_members2{"a", "c"}; + std::vector gp5_members3{"a", "c", "e"}; + s = db.SAdd("GP5_SINTERSTORE_KEY1", gp5_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP5_SINTERSTORE_KEY2", gp5_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP5_SINTERSTORE_KEY3", gp5_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + ASSERT_TRUE(make_expired(&db, "GP5_SINTERSTORE_KEY1")); + + std::vector gp5_keys{"GP5_SINTERSTORE_KEY1", "GP5_SINTERSTORE_KEY2", "GP5_SINTERSTORE_KEY3"}; + s = db.SInterstore("GP5_SINTERSTORE_DESTINATION1", gp5_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP5_SINTERSTORE_DESTINATION1", 0)); + ASSERT_TRUE(members_match(&db, "GP5_SINTERSTORE_DESTINATION1", {})); + + // ***************** Group 6 Test ***************** + // destination = {} + // key1 = {} + // key2 = {a, c} + // key3 = {a, c, e} + // SINTERSTORE destination key1 key2 key3 + // destination = {} + std::vector gp6_members1{"a", "b", "c", "d"}; + std::vector gp6_members2{"a", "c"}; + std::vector gp6_members3{"a", "c", "e"}; + s = db.SAdd("GP6_SINTERSTORE_KEY1", gp6_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP6_SINTERSTORE_KEY2", gp6_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP6_SINTERSTORE_KEY3", gp6_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.SRem("GP6_SINTERSTORE_KEY1", gp6_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SCard("GP6_SINTERSTORE_KEY1", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + std::vector gp6_keys{"GP6_SINTERSTORE_KEY1", "GP6_SINTERSTORE_KEY2", "GP6_SINTERSTORE_KEY3"}; + s = db.SInterstore("GP6_SINTERSTORE_DESTINATION1", gp6_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP6_SINTERSTORE_DESTINATION1", 0)); + ASSERT_TRUE(members_match(&db, "GP6_SINTERSTORE_DESTINATION1", {})); + + // ***************** Group 7 Test ***************** + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SINTERSTORE destination not_exist_key key1 key2 key3 + // destination = {} + std::vector gp7_members1{"a", "b", "c", "d"}; + std::vector gp7_members2{"a", "c"}; + std::vector gp7_members3{"a", "c", "e"}; + s = db.SAdd("GP7_SINTERSTORE_KEY1", gp7_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP7_SINTERSTORE_KEY2", gp7_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP7_SINTERSTORE_KEY3", gp7_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp7_keys{"GP7_SINTERSTORE_NOT_EXIST_KEY", "GP7_SINTERSTORE_KEY1", "GP7_SINTERSTORE_KEY2", + "GP7_SINTERSTORE_KEY3"}; + s = db.SInterstore("GP7_SINTERSTORE_DESTINATION1", gp7_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP7_SINTERSTORE_DESTINATION1", 0)); + ASSERT_TRUE(members_match(&db, "GP7_SINTERSTORE_DESTINATION1", {})); + + // ***************** Group 8 Test ***************** + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, b, c, d} + // key3 = {a, b, c, d} + // SINTERSTORE destination key1 key2 key3 + // destination = {a, b, c, d} + std::vector gp8_members1{"a", "b", "c", "d"}; + std::vector gp8_members2{"a", "b", "c", "d"}; + std::vector gp8_members3{"a", "b", "c", "d"}; + s = db.SAdd("GP8_SINTERSTORE_KEY1", gp8_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP8_SINTERSTORE_KEY2", gp8_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP8_SINTERSTORE_KEY3", gp8_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector gp8_keys{ + "GP8_SINTERSTORE_KEY1", + "GP8_SINTERSTORE_KEY2", + "GP8_SINTERSTORE_KEY3", + }; + std::vector gp8_members_out; + s = db.SInterstore("GP8_SINTERSTORE_DESTINATION1", gp8_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(size_match(&db, "GP8_SINTERSTORE_DESTINATION1", 4)); + ASSERT_TRUE(members_match(&db, "GP8_SINTERSTORE_DESTINATION1", {"a", "b", "c", "d"})); +} + +// SIsmember +TEST_F(SetsTest, SIsmemberTest) { // NOLINT + int32_t ret = 0; + std::vector members{"MEMBER"}; + s = db.SAdd("SISMEMBER_KEY", members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // Not exist set key + s = db.SIsmember("SISMEMBER_NOT_EXIST_KEY", "MEMBER", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // Not exist set member + s = db.SIsmember("SISMEMBER_KEY", "NOT_EXIST_MEMBER", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + s = db.SIsmember("SISMEMBER_KEY", "MEMBER", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // Expire set key + std::map type_status; + db.Expire("SISMEMBER_KEY", 1); + ASSERT_TRUE(type_status[storage::DataType::kSets].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.SIsmember("SISMEMBER_KEY", "MEMBER", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); +} + +// SMembers +TEST_F(SetsTest, SMembersTest) { // NOLINT + int32_t ret = 0; + std::vector mid_members_in; + mid_members_in.emplace_back("MID_MEMBER1"); + mid_members_in.emplace_back("MID_MEMBER2"); + mid_members_in.emplace_back("MID_MEMBER3"); + s = db.SAdd("B_SMEMBERS_KEY", mid_members_in, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector members_out; + s = db.SMembers("B_SMEMBERS_KEY", &members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members_out, mid_members_in)); + + // Insert some kv who's position above "mid kv" + std::vector pre_members_in; + pre_members_in.emplace_back("PRE_MEMBER1"); + pre_members_in.emplace_back("PRE_MEMBER2"); + pre_members_in.emplace_back("PRE_MEMBER3"); + s = db.SAdd("A_SMEMBERS_KEY", pre_members_in, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + members_out.clear(); + s = db.SMembers("B_SMEMBERS_KEY", &members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members_out, mid_members_in)); + + // Insert some kv who's position below "mid kv" + std::vector suf_members_in; + suf_members_in.emplace_back("SUF_MEMBER1"); + suf_members_in.emplace_back("SUF_MEMBER2"); + suf_members_in.emplace_back("SUF_MEMBER3"); + s = db.SAdd("C_SMEMBERS_KEY", suf_members_in, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + members_out.clear(); + s = db.SMembers("B_SMEMBERS_KEY", &members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members_out, mid_members_in)); + + // SMembers timeout setes + ASSERT_TRUE(make_expired(&db, "B_SMEMBERS_KEY")); + ASSERT_TRUE(members_match(&db, "B_SMEMBERS_KEY", {})); + + // SMembers not exist setes + ASSERT_TRUE(members_match(&db, "SMEMBERS_NOT_EXIST_KEY", {})); +} + +// SMove +TEST_F(SetsTest, SMoveTest) { // NOLINT + int32_t ret = 0; + // ***************** Group 1 Test ***************** + // source = {a, b, c, d} + // destination = {a, c} + // SMove source destination d + // source = {a, b, c} + // destination = {a, c, d} + std::vector gp1_source{"a", "b", "c", "d"}; + std::vector gp1_destination{"a", "c"}; + s = db.SAdd("GP1_SMOVE_SOURCE", gp1_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP1_SMOVE_DESTINATION", gp1_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.SMove("GP1_SMOVE_SOURCE", "GP1_SMOVE_DESTINATION", "d", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + ASSERT_TRUE(size_match(&db, "GP1_SMOVE_SOURCE", 3)); + ASSERT_TRUE(members_match(&db, "GP1_SMOVE_SOURCE", {"a", "b", "c"})); + ASSERT_TRUE(size_match(&db, "GP1_SMOVE_DESTINATION", 3)); + ASSERT_TRUE(members_match(&db, "GP1_SMOVE_DESTINATION", {"a", "c", "d"})); + + // ***************** Group 2 Test ***************** + // source = {a, b, c, d} + // destination = {a, c} (expire key); + // SMove source destination d + // source = {a, b, c} + // destination = {d} + std::vector gp2_source{"a", "b", "c", "d"}; + std::vector gp2_destination{"a", "c"}; + s = db.SAdd("GP2_SMOVE_SOURCE", gp2_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP2_SMOVE_DESTINATION", gp2_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + ASSERT_TRUE(make_expired(&db, "GP2_SMOVE_DESTINATION")); + + s = db.SMove("GP2_SMOVE_SOURCE", "GP2_SMOVE_DESTINATION", "d", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + ASSERT_TRUE(size_match(&db, "GP2_SMOVE_SOURCE", 3)); + ASSERT_TRUE(members_match(&db, "GP2_SMOVE_SOURCE", {"a", "b", "c"})); + ASSERT_TRUE(size_match(&db, "GP2_SMOVE_DESTINATION", 1)); + ASSERT_TRUE(members_match(&db, "GP2_SMOVE_DESTINATION", {"d"})); + + // ***************** Group 3 Test ***************** + // source = {a, x, l} + // destination = {} + // SMove source destination x + // source = {a, l} + // destination = {x} + std::vector gp3_source{"a", "x", "l"}; + std::vector gp3_destination{"a", "b"}; + s = db.SAdd("GP3_SMOVE_SOURCE", gp3_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + s = db.SAdd("GP3_SMOVE_DESTINATION", gp3_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.SRem("GP3_SMOVE_DESTINATION", gp3_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SCard("GP3_SMOVE_DESTINATION", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + s = db.SMove("GP3_SMOVE_SOURCE", "GP3_SMOVE_DESTINATION", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + ASSERT_TRUE(size_match(&db, "GP3_SMOVE_SOURCE", 2)); + ASSERT_TRUE(members_match(&db, "GP3_SMOVE_SOURCE", {"a", "l"})); + ASSERT_TRUE(size_match(&db, "GP3_SMOVE_DESTINATION", 1)); + ASSERT_TRUE(members_match(&db, "GP3_SMOVE_DESTINATION", {"x"})); + + // ***************** Group 4 Test ***************** + // source = {a, x, l} + // SMove source not_exist_key x + // source = {a, l} + // not_exist_key = {x} + std::vector gp4_source{"a", "x", "l"}; + s = db.SAdd("GP4_SMOVE_SOURCE", gp4_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.SMove("GP4_SMOVE_SOURCE", "GP4_SMOVE_NOT_EXIST_KEY", "x", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + ASSERT_TRUE(size_match(&db, "GP4_SMOVE_SOURCE", 2)); + ASSERT_TRUE(members_match(&db, "GP4_SMOVE_SOURCE", {"a", "l"})); + ASSERT_TRUE(size_match(&db, "GP4_SMOVE_NOT_EXIST_KEY", 1)); + ASSERT_TRUE(members_match(&db, "GP4_SMOVE_NOT_EXIST_KEY", {"x"})); + + // ***************** Group 5 Test ***************** + // source = {} + // destination = {a, x, l} + // SMove source destination x + // source = {} + // destination = {a, x, l} + std::vector gp5_source{"a", "b"}; + std::vector gp5_destination{"a", "x", "l"}; + s = db.SAdd("GP5_SMOVE_SOURCE", gp5_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP5_SMOVE_DESTINATION", gp5_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.SRem("GP5_SMOVE_SOURCE", gp5_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SCard("GP5_SMOVE_SOURCE", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + s = db.SMove("GP5_SMOVE_SOURCE", "GP5_SMOVE_DESTINATION", "x", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + ASSERT_TRUE(size_match(&db, "GP5_SMOVE_SOURCE", 0)); + ASSERT_TRUE(members_match(&db, "GP5_SMOVE_SOURCE", {})); + ASSERT_TRUE(size_match(&db, "GP5_SMOVE_DESTINATION", 3)); + ASSERT_TRUE(members_match(&db, "GP5_SMOVE_DESTINATION", {"a", "x", "l"})); + + // ***************** Group 6 Test ***************** + // source = {a, b, c, d} (expire key); + // destination = {a, c} + // SMove source destination d + // source = {} + // destination = {d} + std::vector gp6_source{"a", "b", "c", "d"}; + std::vector gp6_destination{"a", "c"}; + s = db.SAdd("GP6_SMOVE_SOURCE", gp6_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP6_SMOVE_DESTINATION", gp6_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + ASSERT_TRUE(make_expired(&db, "GP6_SMOVE_SOURCE")); + + s = db.SMove("GP6_SMOVE_SOURCE", "GP6_SMOVE_DESTINATION", "d", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + ASSERT_TRUE(size_match(&db, "GP6_SMOVE_SOURCE", 0)); + ASSERT_TRUE(members_match(&db, "GP6_SMOVE_SOURCE", {})); + ASSERT_TRUE(size_match(&db, "GP6_SMOVE_DESTINATION", 2)); + ASSERT_TRUE(members_match(&db, "GP6_SMOVE_DESTINATION", {"a", "c"})); + + // ***************** Group 7 Test ***************** + // source = {a, b, c, d} + // destination = {a, c} + // SMove source destination x + // source = {a, b, c, d} + // destination = {a, c} + std::vector gp7_source{"a", "b", "c", "d"}; + std::vector gp7_destination{"a", "c"}; + s = db.SAdd("GP7_SMOVE_SOURCE", gp7_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP7_SMOVE_DESTINATION", gp7_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.SMove("GP7_SMOVE_SOURCE", "GP7_SMOVE_DESTINATION", "x", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + ASSERT_TRUE(size_match(&db, "GP7_SMOVE_SOURCE", 4)); + ASSERT_TRUE(members_match(&db, "GP7_SMOVE_SOURCE", {"a", "b", "c", "d"})); + ASSERT_TRUE(size_match(&db, "GP7_SMOVE_DESTINATION", 2)); + ASSERT_TRUE(members_match(&db, "GP7_SMOVE_DESTINATION", {"a", "c"})); + + // ***************** Group 8 Test ***************** + // source = {a, b, c, d} + // destination = {a, c, d} + // SMove source destination d + // source = {a, b, c} + // destination = {a, c, d} + std::vector gp8_source{"a", "b", "c", "d"}; + std::vector gp8_destination{"a", "c", "d"}; + s = db.SAdd("GP8_SMOVE_SOURCE", gp8_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP8_SMOVE_DESTINATION", gp8_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.SMove("GP8_SMOVE_SOURCE", "GP8_SMOVE_DESTINATION", "d", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + ASSERT_TRUE(size_match(&db, "GP8_SMOVE_SOURCE", 3)); + ASSERT_TRUE(members_match(&db, "GP8_SMOVE_SOURCE", {"a", "b", "c"})); + ASSERT_TRUE(size_match(&db, "GP8_SMOVE_DESTINATION", 3)); + ASSERT_TRUE(members_match(&db, "GP8_SMOVE_DESTINATION", {"a", "c", "d"})); + + // ***************** Group 9 Test ***************** + // source = {a, b, c, d} + // SMove source source d + // source = {a, b, c, d} + std::vector gp9_source{"a", "b", "c", "d"}; + s = db.SAdd("GP9_SMOVE_SOURCE", gp8_source, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.SMove("GP9_SMOVE_SOURCE", "GP9_SMOVE_SOURCE", "d", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + ASSERT_TRUE(size_match(&db, "GP9_SMOVE_SOURCE", 4)); + ASSERT_TRUE(members_match(&db, "GP9_SMOVE_SOURCE", {"a", "b", "c", "d"})); +} + +// SPop +TEST_F(SetsTest, SPopTest) { // NOLINT + int32_t ret = 0; + std::vector members; + + // ***************** Group 1 Test ***************** + std::vector gp1_members{"gp1_aa", "gp1_bb", "gp1_cc"}; + s = db.SAdd("GP1_SPOP_KEY", gp1_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp1_out_all; + s = db.SPop("GP1_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_SPOP_KEY", 2)); + + s = db.SPop("GP1_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_SPOP_KEY", 1)); + + + s = db.SPop("GP1_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_SPOP_KEY", 0)); + + gp1_out_all.swap(members); + members.clear(); + + ASSERT_TRUE(size_match(&db, "GP1_SPOP_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP1_SPOP_KEY", {})); + ASSERT_TRUE(members_match(gp1_out_all, gp1_members)); + + // ***************** Group 2 Test ***************** + std::vector gp2_members; + for (int32_t idx = 1; idx <= 1; ++idx) { + gp2_members.push_back("gb2_" + std::to_string(idx)); + } + s = db.SAdd("GP2_SPOP_KEY", gp2_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + std::vector gp2_out_all; + for (int32_t idx = 1; idx <= 1; ++idx) { + s = db.SPop("GP2_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP2_SPOP_KEY", 1 - idx)); + + } + + gp2_out_all.swap(members); + members.clear(); + + ASSERT_TRUE(size_match(&db, "GP2_SPOP_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP2_SPOP_KEY", {})); + ASSERT_TRUE(members_match(gp2_out_all, gp2_members)); + + // ***************** Group 3 Test ***************** + std::vector gp3_members; + for (int32_t idx = 1; idx <= 100; ++idx) { + gp3_members.push_back("gb3_" + std::to_string(idx)); + } + s = db.SAdd("GP3_SPOP_KEY", gp3_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 100); + + std::vector gp3_out_all; + for (int32_t idx = 1; idx <= 100; ++idx) { + s = db.SPop("GP3_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP3_SPOP_KEY", 100 - idx)); + + } + + gp3_out_all.swap(members); + members.clear(); + + ASSERT_TRUE(size_match(&db, "GP3_SPOP_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP3_SPOP_KEY", {})); + ASSERT_TRUE(members_match(gp3_out_all, gp3_members)); + + // ***************** Group 4 Test ***************** + std::vector gp4_members; + for (int32_t idx = 1; idx <= 10000; ++idx) { + gp4_members.push_back("gb4_" + std::to_string(idx)); + } + s = db.SAdd("GP4_SPOP_KEY", gp4_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 10000); + + std::vector gp4_out_all; + for (int32_t idx = 1; idx <= 10000; ++idx) { + s = db.SPop("GP4_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP4_SPOP_KEY", 10000 - idx)); + + } + + gp4_out_all.swap(members); + members.clear(); + + ASSERT_TRUE(size_match(&db, "GP4_SPOP_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP4_SPOP_KEY", {})); + ASSERT_TRUE(members_match(gp4_out_all, gp4_members)); + + // ***************** Group 5 Test ***************** + std::vector gp5_members{"gp5_aa", "gp5_bb", "gp5_cc"}; + s = db.SAdd("GP5_SPOP_KEY", gp5_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + ASSERT_TRUE(make_expired(&db, "GP5_SPOP_KEY")); + + s = db.SPop("GP5_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(size_match(&db, "GP5_SPOP_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP5_SPOP_KEY", {})); + + // ***************** Group 6 Test ***************** + std::vector gp6_members{"gp6_aa", "gp6_bb", "gp6_cc"}; + s = db.SAdd("GP6_SPOP_KEY", gp6_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + // Delete the key + std::vector del_keys = {"GP6_SPOP_KEY"}; + std::map type_status; + db.Del(del_keys); + ASSERT_TRUE(type_status[storage::DataType::kSets].ok()); + + s = db.SPop("GP6_SPOP_KEY", &members, 1); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(size_match(&db, "GP6_SPOP_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP6_SPOP_KEY", {})); + + // ***************** Group 7 Test ***************** + std::vector gp7_members{"gp7_aa", "gp7_bb", "gp7_cc"}; + s = db.SAdd("GP7_SPOP_KEY", gp7_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp7_out_all; + s = db.SPop("GP7_SPOP_KEY", &members, 4); + ASSERT_TRUE(s.ok()); + + gp7_out_all.swap(members); + members.clear(); + + ASSERT_TRUE(size_match(&db, "GP7_SPOP_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP7_SPOP_KEY", {})); + ASSERT_TRUE(members_match(gp7_out_all, gp7_members)); +} + +// SRandmember +TEST_F(SetsTest, SRanmemberTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + std::vector gp1_members{"gp1_aa", "gp1_bb", "gp1_cc", "gp1_dd", "gp1_ee", "gp1_ff", "gp1_gg", "gp1_hh"}; + s = db.SAdd("GP1_SRANDMEMBER_KEY", gp1_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + std::vector gp1_out; + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", 1, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 1); + ASSERT_TRUE(members_uniquen(gp1_out)); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", 3, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 3); + ASSERT_TRUE(members_uniquen(gp1_out)); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", 4, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 4); + ASSERT_TRUE(members_uniquen(gp1_out)); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", 8, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 8); + ASSERT_TRUE(members_uniquen(gp1_out)); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", 10, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 8); + ASSERT_TRUE(members_uniquen(gp1_out)); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", -1, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 1); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", -3, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 3); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", -4, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 4); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", -8, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 8); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + s = db.SRandmember("GP1_SRANDMEMBER_KEY", -10, &gp1_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp1_out.size(), 10); + ASSERT_TRUE(members_contains(gp1_out, gp1_members)); + + // ***************** Group 2 Test ***************** + s = db.SAdd("GP2_SRANDMEMBER_KEY", {"MM"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + std::vector gp2_out; + s = db.SRandmember("GP2_SRANDMEMBER_KEY", 1, &gp2_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_out.size(), 1); + ASSERT_TRUE(members_match(gp2_out, {"MM"})); + + s = db.SRandmember("GP2_SRANDMEMBER_KEY", 3, &gp2_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_out.size(), 1); + ASSERT_TRUE(members_match(gp2_out, {"MM"})); + + s = db.SRandmember("GP2_SRANDMEMBER_KEY", -1, &gp2_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_out.size(), 1); + ASSERT_TRUE(members_match(gp2_out, {"MM"})); + + s = db.SRandmember("GP2_SRANDMEMBER_KEY", -3, &gp2_out); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(gp2_out.size(), 3); + ASSERT_TRUE(members_match(gp2_out, {"MM", "MM", "MM"})); + + // ***************** Group 3 Test ***************** + std::vector gp3_members{"gp1_aa", "gp1_bb", "gp1_cc", "gp1_dd", "gp1_ee", "gp1_ff", "gp1_gg", "gp1_hh"}; + s = db.SAdd("GP3_SRANDMEMBER_KEY", gp3_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + ASSERT_TRUE(make_expired(&db, "GP3_SRANDMEMBER_KEY")); + + std::vector gp3_out; + s = db.SRandmember("GP3_SRANDMEMBER_KEY", 1, &gp3_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(gp3_out.size(), 0); + ASSERT_TRUE(members_match(gp3_out, {})); +} + +// SRem +TEST_F(SetsTest, SRemTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + std::vector gp1_members{"a", "b", "c", "d"}; + s = db.SAdd("GP1_SREM_KEY", gp1_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector gp1_del_members{"a", "b"}; + s = db.SRem("GP1_SREM_KEY", gp1_del_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + ASSERT_TRUE(size_match(&db, "GP1_SREM_KEY", 2)); + ASSERT_TRUE(members_match(&db, "GP1_SREM_KEY", {"c", "d"})); + + // ***************** Group 2 Test ***************** + // srem not exist members + std::vector gp2_members{"a", "b", "c", "d"}; + s = db.SAdd("GP2_SREM_KEY", gp2_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector gp2_del_members{"e", "f"}; + s = db.SRem("GP2_SREM_KEY", gp2_del_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + ASSERT_TRUE(size_match(&db, "GP2_SREM_KEY", 4)); + ASSERT_TRUE(members_match(&db, "GP2_SREM_KEY", {"a", "b", "c", "d"})); + + // ***************** Group 3 Test ***************** + // srem not exist key + std::vector gp3_del_members{"a", "b", "c"}; + s = db.SRem("GP3_NOT_EXIST_KEY", gp3_del_members, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // ***************** Group 4 Test ***************** + // srem timeout key + std::vector gp4_members{"a", "b", "c", "d"}; + s = db.SAdd("GP4_SREM_KEY", gp4_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + ASSERT_TRUE(make_expired(&db, "GP4_SREM_KEY")); + + std::vector gp4_del_members{"a", "b"}; + s = db.SRem("GP4_SREM_KEY", gp4_del_members, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + ASSERT_TRUE(size_match(&db, "GP4_SREM_KEY", 0)); + ASSERT_TRUE(members_match(&db, "GP4_SREM_KEY", {})); +} + +// SUnion +TEST_F(SetsTest, SUnionTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SUNION key1 key2 key3 = {a, b, c, d, e} + std::vector gp1_members1{"a", "b", "c", "d"}; + std::vector gp1_members2{"a", "c"}; + std::vector gp1_members3{"a", "c", "e"}; + s = db.SAdd("GP1_SUNION_KEY1", gp1_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP1_SUNION_KEY2", gp1_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP1_SUNION_KEY3", gp1_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp1_keys{"GP1_SUNION_KEY1", "GP1_SUNION_KEY2", "GP1_SUNION_KEY3"}; + std::vector gp1_members_out; + s = db.SUnion(gp1_keys, &gp1_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp1_members_out, {"a", "b", "c", "d", "e"})); + + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} (expire key); + // SUNION key1 key2 key3 = {a, b, c, d} + std::map gp1_type_status; + db.Expire("GP1_SUNION_KEY3", 1); + ASSERT_TRUE(gp1_type_status[storage::DataType::kSets].ok()); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + gp1_members_out.clear(); + + s = db.SUnion(gp1_keys, &gp1_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp1_members_out, {"a", "b", "c", "d"})); + + // ***************** Group 2 Test ***************** + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SUNION key1 key2 key3 not_exist_key = {a, b, c, d, e} + std::vector gp2_members1{"a", "b", "c", "d"}; + std::vector gp2_members2{"a", "c"}; + std::vector gp2_members3{"a", "c", "e"}; + s = db.SAdd("GP2_SUNION_KEY1", gp2_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP2_SUNION_KEY2", gp2_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP2_SUNION_KEY3", gp2_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp2_keys{"GP2_SUNION_KEY1", "GP2_SUNION_KEY2", "GP2_SUNION_KEY3", "GP2_NOT_EXIST_KEY"}; + std::vector gp2_members_out; + s = db.SUnion(gp2_keys, &gp2_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp2_members_out, {"a", "b", "c", "d", "e"})); + + // ***************** Group 3 Test ***************** + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {} + // SUNION key1 key2 key3 = {a, b, c, d} + std::vector gp3_members1{"a", "b", "c", "d"}; + std::vector gp3_members2{"a", "c"}; + std::vector gp3_members3{"a", "c", "e", "f", "g"}; + s = db.SAdd("GP3_SUNION_KEY1", gp3_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP3_SUNION_KEY2", gp3_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP3_SUNION_KEY3", gp3_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + + s = db.SRem("GP3_SUNION_KEY3", gp3_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + s = db.SCard("GP3_SUNION_KEY3", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + std::vector gp3_members_out; + s = db.SMembers("GP3_SUNION_KEY3", &gp3_members_out); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(gp3_members_out.size(), 0); + + std::vector gp3_keys{"GP3_SUNION_KEY1", "GP3_SUNION_KEY2", "GP3_SUNION_KEY3"}; + gp3_members_out.clear(); + s = db.SUnion(gp3_keys, &gp3_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp3_members_out, {"a", "b", "c", "d"})); + + // ***************** Group 4 Test ***************** + // key1 = {a, b, c, d} + // SUNION key1 = {a, b, c, d} + std::vector gp4_members1{"a", "b", "c", "d"}; + s = db.SAdd("GP4_SUNION_KEY1", gp4_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + std::vector gp4_keys{"GP4_SUNION_KEY1"}; + std::vector gp4_members_out; + s = db.SUnion(gp4_keys, &gp4_members_out); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(gp4_members_out, {"a", "b", "c", "d"})); +} + +// SUnionstore +TEST_F(SetsTest, SUnionstoreTest) { // NOLINT + int32_t ret = 0; + + // ***************** Group 1 Test ***************** + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SUNIONSTORE destination key1 key2 key3 + // destination = {a, b, c, d, e} + std::vector gp1_members1{"a", "b", "c", "d"}; + std::vector gp1_members2{"a", "c"}; + std::vector gp1_members3{"a", "c", "e"}; + s = db.SAdd("GP1_SUNIONSTORE_KEY1", gp1_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP1_SUNIONSTORE_KEY2", gp1_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP1_SUNIONSTORE_KEY3", gp1_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp1_keys{"GP1_SUNIONSTORE_KEY1", "GP1_SUNIONSTORE_KEY2", "GP1_SUNIONSTORE_KEY3"}; + std::vector value_to_dest; + s = db.SUnionstore("GP1_SUNIONSTORE_DESTINATION1", gp1_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_TRUE(size_match(&db, "GP1_SUNIONSTORE_DESTINATION1", 5)); + ASSERT_TRUE(members_match(&db, "GP1_SUNIONSTORE_DESTINATION1", {"a", "b", "c", "d", "e"})); + + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} (expire key); + // SUNIONSTORE destination key1 key2 key3 + // destination = {a, b, c, d} + ASSERT_TRUE(make_expired(&db, "GP1_SUNIONSTORE_KEY3")); + + s = db.SUnionstore("GP1_SUNIONSTORE_DESTINATION1", gp1_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(size_match(&db, "GP1_SUNIONSTORE_DESTINATION1", 4)); + ASSERT_TRUE(members_match(&db, "GP1_SUNIONSTORE_DESTINATION1", {"a", "b", "c", "d"})); + + // ***************** Group 2 Test ***************** + // destination already exists, it is overwritten. + // destination = {a, x, l} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {a, c, e} + // SUNIONSTORE destination key1 key2 key3 + // destination = {a, b, c, d, e} + std::vector gp2_members1{"a", "b", "c", "d"}; + std::vector gp2_members2{"a", "c"}; + std::vector gp2_members3{"a", "c", "e"}; + s = db.SAdd("GP2_SUNIONSTORE_KEY1", gp2_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP2_SUNIONSTORE_KEY2", gp2_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP2_SUNIONSTORE_KEY3", gp2_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp2_keys{"GP2_SUNIONSTORE_KEY1", "GP2_SUNIONSTORE_KEY2", "GP2_SUNIONSTORE_KEY3"}; + s = db.SUnionstore("GP2_SUNIONSTORE_DESTINATION1", gp2_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_TRUE(size_match(&db, "GP2_SUNIONSTORE_DESTINATION1", 5)); + ASSERT_TRUE(members_match(&db, "GP2_SUNIONSTORE_DESTINATION1", {"a", "b", "c", "d", "e"})); + + // ***************** Group 3 Test ***************** + // destination = {} + // key1 = {a, b, c, d} + // key2 = {a, c} + // key3 = {} + // SUNIONSTORE destination key1 key2 key3 + // destination = {a, b, c, d} + std::vector gp3_members1{"a", "b", "c", "d"}; + std::vector gp3_members2{"a", "c"}; + std::vector gp3_members3{"a", "x", "l"}; + s = db.SAdd("GP3_SUNIONSTORE_KEY1", gp3_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.SAdd("GP3_SUNIONSTORE_KEY2", gp3_members2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + s = db.SAdd("GP3_SUNIONSTORE_KEY3", gp3_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.SRem("GP3_SUNIONSTORE_KEY3", gp3_members3, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP3_SUNIONSTORE_KEY3", 0)); + ASSERT_TRUE(members_match(&db, "GP3_SUNIONSTORE_KEY3", {})); + + std::vector gp3_keys{"GP3_SUNIONSTORE_KEY1", "GP3_SUNIONSTORE_KEY2", "GP3_SUNIONSTORE_KEY3"}; + s = db.SUnionstore("GP3_SUNIONSTORE_DESTINATION1", gp3_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(size_match(&db, "GP3_SUNIONSTORE_DESTINATION1", 4)); + ASSERT_TRUE(members_match(&db, "GP3_SUNIONSTORE_DESTINATION1", {"a", "b", "c", "d"})); + + // ***************** Group 4 Test ***************** + // destination = {} + // key1 = {a, x, l} + // SUNIONSTORE destination key1 not_exist_key + // destination = {a, x, l} + std::vector gp4_members1{"a", "x", "l"}; + s = db.SAdd("GP4_SUNIONSTORE_KEY1", gp4_members1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + std::vector gp4_keys{"GP4_SUNIONSTORE_KEY1", "GP4_SUNIONSTORE_NOT_EXIST_KEY"}; + std::vector gp4_members_out; + s = db.SUnionstore("GP4_SUNIONSTORE_DESTINATION1", gp4_keys, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP4_SUNIONSTORE_DESTINATION1", 3)); + ASSERT_TRUE(members_match(&db, "GP4_SUNIONSTORE_DESTINATION1", {"a", "x", "l"})); +} + +// SScan +TEST_F(SetsTest, SScanTest) { // NOLINT + int32_t ret = 0; + int64_t cursor = 0; + int64_t next_cursor = 0; + std::vector member_out; + // ***************** Group 1 Test ***************** + // a b c d e f g h + // 0 1 2 3 4 5 6 7 + std::vector gp1_members{"a", "b", "c", "d", "e", "f", "g", "h"}; + s = db.SAdd("GP1_SSCAN_KEY", gp1_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + s = db.SScan("GP1_SSCAN_KEY", cursor, "*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 3); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(members_match(member_out, {"a", "b", "c"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP1_SSCAN_KEY", cursor, "*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 3); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(members_match(member_out, {"d", "e", "f"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP1_SSCAN_KEY", cursor, "*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 2); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"g", "h"})); + + // ***************** Group 2 Test ***************** + // a b c d e f g h + // 0 1 2 3 4 5 6 7 + std::vector gp2_members{"a", "b", "c", "d", "e", "f", "g", "h"}; + s = db.SAdd("GP2_SSCAN_KEY", gp2_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(members_match(member_out, {"a"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(members_match(member_out, {"b"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(members_match(member_out, {"c"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 4); + ASSERT_TRUE(members_match(member_out, {"d"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(members_match(member_out, {"e"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(members_match(member_out, {"f"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 7); + ASSERT_TRUE(members_match(member_out, {"g"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP2_SSCAN_KEY", cursor, "*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"h"})); + + // ***************** Group 3 Test ***************** + // a b c d e f g h + // 0 1 2 3 4 5 6 7 + std::vector gp3_members{"a", "b", "c", "d", "e", "f", "g", "h"}; + s = db.SAdd("GP3_SSCAN_KEY", gp3_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP3_SSCAN_KEY", cursor, "*", 5, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 5); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(members_match(member_out, {"a", "b", "c", "d", "e"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP3_SSCAN_KEY", cursor, "*", 5, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"f", "g", "h"})); + + // ***************** Group 4 Test ***************** + // a b c d e f g h + // 0 1 2 3 4 5 6 7 + std::vector gp4_members{"a", "b", "c", "d", "e", "f", "g", "h"}; + s = db.SAdd("GP4_SSCAN_KEY", gp4_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP4_SSCAN_KEY", cursor, "*", 10, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 8); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"a", "b", "c", "d", "e", "f", "g", "h"})); + + // ***************** Group 5 Test ***************** + // a_1_ a_2_ a_3_ b_1_ b_2_ b_3_ c_1_ c_2_ c_3 + // 0 1 2 3 4 5 6 7 8 + std::vector gp5_members{"a_1_", "a_2_", "a_3_", "b_1_", "b_2_", "b_3_", "c_1_", "c_2_", "c_3_"}; + s = db.SAdd("GP5_SSCAN_KEY", gp5_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP5_SSCAN_KEY", cursor, "*1*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(members_match(member_out, {"a_1_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP5_SSCAN_KEY", cursor, "*1*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(members_match(member_out, {"b_1_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP5_SSCAN_KEY", cursor, "*1*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"c_1_"})); + + // ***************** Group 6 Test ***************** + // a_1_ a_2_ a_3_ b_1_ b_2_ b_3_ c_1_ c_2_ c_3_ + // 0 1 2 3 4 5 6 7 8 + std::vector gp6_members{"a_1_", "a_2_", "a_3_", "b_1_", "b_2_", "b_3_", "c_1_", "c_2_", "c_3_"}; + s = db.SAdd("GP6_SSCAN_KEY", gp6_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP6_SSCAN_KEY", cursor, "a*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"a_1_", "a_2_", "a_3_"})); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP6_SSCAN_KEY", cursor, "a*", 2, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(members_match(member_out, {"a_1_", "a_2_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP6_SSCAN_KEY", cursor, "a*", 2, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"a_3_"})); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP6_SSCAN_KEY", cursor, "a*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(members_match(member_out, {"a_1_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP6_SSCAN_KEY", cursor, "a*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(members_match(member_out, {"a_2_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP6_SSCAN_KEY", cursor, "a*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"a_3_"})); + + // ***************** Group 7 Test ***************** + // a_1_ a_2_ a_3_ b_1_ b_2_ b_3_ c_1_ c_2_ c_3 + // 0 1 2 3 4 5 6 7 8 + std::vector gp7_members{"a_1_", "a_2_", "a_3_", "b_1_", "b_2_", "b_3_", "c_1_", "c_2_", "c_3_"}; + s = db.SAdd("GP7_SSCAN_KEY", gp7_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP7_SSCAN_KEY", cursor, "b*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"b_1_", "b_2_", "b_3_"})); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP7_SSCAN_KEY", cursor, "b*", 2, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(members_match(member_out, {"b_1_", "b_2_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP7_SSCAN_KEY", cursor, "b*", 2, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"b_3_"})); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP7_SSCAN_KEY", cursor, "b*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(members_match(member_out, {"b_1_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP7_SSCAN_KEY", cursor, "b*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(members_match(member_out, {"b_2_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP7_SSCAN_KEY", cursor, "b*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"b_3_"})); + + // ***************** Group 8 Test ***************** + // a_1_ a_2_ a_3_ b_1_ b_2_ b_3_ c_1_ c_2_ c_3 + // 0 1 2 3 4 5 6 7 8 + std::vector gp8_members{"a_1_", "a_2_", "a_3_", "b_1_", "b_2_", "b_3_", "c_1_", "c_2_", "c_3_"}; + s = db.SAdd("GP8_SSCAN_KEY", gp8_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP8_SSCAN_KEY", cursor, "c*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"c_1_", "c_2_", "c_3_"})); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP8_SSCAN_KEY", cursor, "c*", 2, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(members_match(member_out, {"c_1_", "c_2_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP8_SSCAN_KEY", cursor, "c*", 2, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"c_3_"})); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP8_SSCAN_KEY", cursor, "c*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(members_match(member_out, {"c_1_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP8_SSCAN_KEY", cursor, "c*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(members_match(member_out, {"c_2_"})); + + member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.SScan("GP8_SSCAN_KEY", cursor, "c*", 1, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {"c_3_"})); + + // ***************** Group 9 Test ***************** + // a_1_ a_2_ a_3_ b_1_ b_2_ b_3_ c_1_ c_2_ c_3 + // 0 1 2 3 4 5 6 7 8 + std::vector gp9_members{"a_1_", "a_2_", "a_3_", "b_1_", "b_2_", "b_3_", "c_1_", "c_2_", "c_3_"}; + s = db.SAdd("GP9_SSCAN_KEY", gp9_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP9_SSCAN_KEY", cursor, "d*", 3, &member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(member_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {})); + + // ***************** Group 10 Test ***************** + // a_1_ a_2_ a_3_ b_1_ b_2_ b_3_ c_1_ c_2_ c_3 + // 0 1 2 3 4 5 6 7 8 + // SScan Expired Key + std::vector gp10_members{"a_1_", "a_2_", "a_3_", "b_1_", "b_2_", "b_3_", "c_1_", "c_2_", "c_3_"}; + s = db.SAdd("GP10_SSCAN_KEY", gp10_members, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + ASSERT_TRUE(make_expired(&db, "GP10_SSCAN_KEY")); + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP10_SSCAN_KEY", cursor, "*", 10, &member_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(member_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {})); + + // ***************** Group 11 Test ***************** + // SScan Not Exist Key + member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.SScan("GP11_SSCAN_KEY", cursor, "*", 10, &member_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(member_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(members_match(member_out, {})); +} + +int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("strings_test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/strings_filter_test.cc b/tools/pika_migrate/src/storage/tests/strings_filter_test.cc new file mode 100644 index 0000000000..df5ac7b898 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/strings_filter_test.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "src/strings_filter.h" +#include "storage/storage.h" + +using namespace storage; + +// Filter +TEST(StringsFilterTest, FilterTest) { + std::string new_value; + bool is_stale; + bool value_changed; + auto filter = std::make_unique(); + + int64_t ttl = 1; + StringsValue strings_value("FILTER_VALUE"); + strings_value.SetRelativeTimeInMillsec(ttl); + is_stale = filter->Filter(0, "FILTER_KEY", strings_value.Encode(), &new_value, &value_changed); + ASSERT_FALSE(is_stale); + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + is_stale = filter->Filter(0, "FILTER_KEY", strings_value.Encode(), &new_value, &value_changed); + ASSERT_TRUE(is_stale); +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/strings_test.cc b/tools/pika_migrate/src/storage/tests/strings_test.cc new file mode 100644 index 0000000000..ebab6a2ac3 --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/strings_test.cc @@ -0,0 +1,1061 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" +#include "storage/storage.h" +#include "storage/util.h" + +using namespace storage; + +class StringsTest : public ::testing::Test { + public: + StringsTest() = default; + ~StringsTest() override = default; + + void SetUp() override { + std::string path = "./db/strings"; + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + } + + void TearDown() override { + std::string path = "./db/strings"; + DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +static bool make_expired(storage::Storage* const db, const Slice& key) { + std::map type_status; + int ret = db->Expire(key, 1 * 100); + if ((ret == 0) || !type_status[storage::DataType::kStrings].ok()) { + return false; + } + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + return true; +} + +static bool string_ttl(storage::Storage* const db, const Slice& key, int32_t* ttl) { + int64_t type_ttl; + std::map type_status; + type_ttl = db->TTL(key); + for (const auto& item : type_status) { + if (item.second != Status::OK() && item.second != Status::NotFound()) { + return false; + } + } + *ttl = type_ttl; + return true; +} + +// Append +TEST_F(StringsTest, AppendTest) { + int32_t ret; + std::string value; + std::string new_value; + std::map type_status; + int64_t expired_timestamp_millsec = 0; + int64_t expired_ttl_sec = 0; + + // ***************** Group 1 Test ***************** + s = db.Append("GP1_APPEND_KEY", "HELLO", &ret, &expired_timestamp_millsec, new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_EQ(expired_timestamp_millsec, 0); + + s = db.Append("GP1_APPEND_KEY", " WORLD", &ret, &expired_timestamp_millsec, new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 11); + ASSERT_EQ(expired_timestamp_millsec, 0); + + s = db.Get("GP1_APPEND_KEY", &value); + ASSERT_STREQ(value.c_str(), "HELLO WORLD"); + + // ***************** Group 2 Test ***************** + s = db.Set("GP2_APPEND_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + + int64_t expect_expired_timestamp_millsec = pstd::NowMillis() + 1000 * 100; + ret = db.Expire("GP2_APPEND_KEY", 100 * 1000); + ASSERT_EQ(ret, 1); + type_status.clear(); + expired_ttl_sec = db.TTL("GP2_APPEND_KEY"); + ASSERT_LE(expired_ttl_sec, 100); + ASSERT_GE(expired_ttl_sec, 0); + + std::this_thread::sleep_for(std::chrono::milliseconds(5 * 1000)); + s = db.Append("GP2_APPEND_KEY", "VALUE", &ret, &expired_timestamp_millsec, new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 10); + s = db.Get("GP2_APPEND_KEY", &value); + ASSERT_STREQ(value.c_str(), "VALUEVALUE"); + ASSERT_GE(expired_timestamp_millsec, expect_expired_timestamp_millsec); + ASSERT_LT(expired_timestamp_millsec, expect_expired_timestamp_millsec + 1000); + + type_status.clear(); + expired_ttl_sec = db.TTL("GP2_APPEND_KEY"); + ASSERT_LE(expired_ttl_sec, 95); + ASSERT_GT(expired_ttl_sec, 85); + + // ***************** Group 3 Test ***************** + s = db.Set("GP3_APPEND_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + make_expired(&db, "GP3_APPEND_KEY"); + + s = db.Append("GP3_APPEND_KEY", "VALUE", &ret, &expired_timestamp_millsec, new_value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_EQ(expired_timestamp_millsec, 0); + s = db.Get("GP3_APPEND_KEY", &value); + ASSERT_STREQ(value.c_str(), "VALUE"); + + type_status.clear(); + expired_ttl_sec = db.TTL("GP3_APPEND_KEY"); + ASSERT_EQ(expired_ttl_sec, -1); +} + +// BitCount +TEST_F(StringsTest, BitCountTest) { + int32_t ret; + + // ***************** Group 1 Test ***************** + s = db.Set("GP1_BITCOUNT_KEY", "foobar"); + ASSERT_TRUE(s.ok()); + + // Not have offset + s = db.BitCount("GP1_BITCOUNT_KEY", 0, -1, &ret, false); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 26); + + // Have offset + s = db.BitCount("GP1_BITCOUNT_KEY", 0, 0, &ret, true); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + s = db.BitCount("GP1_BITCOUNT_KEY", 1, 1, &ret, true); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 6); +} + +// BitOp +TEST_F(StringsTest, BitOpTest) { + int64_t ret; + std::string value; + s = db.Set("BITOP_KEY1", "FOOBAR"); + ASSERT_TRUE(s.ok()); + s = db.Set("BITOP_KEY2", "ABCDEF"); + ASSERT_TRUE(s.ok()); + s = db.Set("BITOP_KEY3", "STORAGE"); + ASSERT_TRUE(s.ok()); + std::vector src_keys {"BITOP_KEY1", "BITOP_KEY2", "BITOP_KEY3"}; + + std::string value_to_dest{}; + + // AND + s = db.BitOp(storage::BitOpType::kBitOpAnd, + "BITOP_DESTKEY", src_keys, std::ref(value_to_dest), &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 7); + s = db.Get("BITOP_DESTKEY", &value); + ASSERT_STREQ(value.c_str(), "@@C@AB\x00"); + + // OR + s = db.BitOp(storage::BitOpType::kBitOpOr, + "BITOP_DESTKEY", src_keys, std::ref(value_to_dest), &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 7); + s = db.Get("BITOP_DESTKEY", &value); + ASSERT_STREQ(value.c_str(), "W_OVEWE"); + + // XOR + s = db.BitOp(storage::BitOpType::kBitOpXor, + "BITOP_DESTKEY", src_keys, std::ref(value_to_dest), &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 7); + s = db.Get("BITOP_DESTKEY", &value); + ASSERT_STREQ(value.c_str(), "TYCTESE"); + + // NOT + std::vector not_keys {"BITOP_KEY1"}; + s = db.BitOp(storage::BitOpType::kBitOpNot, + "BITOP_DESTKEY", not_keys, std::ref(value_to_dest), &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 6); + s = db.Get("BITOP_DESTKEY", &value); + ASSERT_STREQ(value.c_str(), "\xb9\xb0\xb0\xbd\xbe\xad"); + // NOT operation more than two parameters + s = db.BitOp(storage::BitOpType::kBitOpNot, + "BITOP_DESTKEY", src_keys, std::ref(value_to_dest), &ret); + ASSERT_TRUE(s.IsInvalidArgument()); +} + +// Decrby +TEST_F(StringsTest, DecrbyTest) { + int64_t ret; + std::string value; + std::map type_status; + int64_t type_ttl; + + // ***************** Group 1 Test ***************** + // If the key is not exist + s = db.Decrby("GP1_DECRBY_KEY", 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -5); + + // If the key contains a string that can not be represented as integer + s = db.Set("GP1_DECRBY_KEY", "DECRBY_VALUE"); + ASSERT_TRUE(s.ok()); + s = db.Decrby("GP1_DECRBY_KEY", 5, &ret); + ASSERT_TRUE(s.IsCorruption()); + + // Less than the minimum number -9223372036854775808 + s = db.Set("GP1_DECRBY_KEY", "-2"); + ASSERT_TRUE(s.ok()); + s = db.Decrby("GP1_DECRBY_KEY", 9223372036854775807, &ret); + ASSERT_TRUE(s.IsInvalidArgument()); + + // ***************** Group 2 Test ***************** + s = db.Set("GP2_DECRBY_KEY", "10"); + ASSERT_TRUE(s.ok()); + ret = db.Expire("GP2_DECRBY_KEY", 100); + ASSERT_EQ(ret, 1); + type_status.clear(); + type_ttl = db.TTL("GP2_DECRBY_KEY"); + ASSERT_LE(type_ttl, 100); + ASSERT_GE(type_ttl, 0); + + s = db.Decrby("GP2_DECRBY_KEY", 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + s = db.Get("GP2_DECRBY_KEY", &value); + ASSERT_EQ(value, "5"); + + type_ttl = db.TTL("GP2_DECRBY_KEY"); + ASSERT_LE(type_ttl, 100); + ASSERT_GE(type_ttl, 0); + + // ***************** Group 3 Test ***************** + s = db.Set("GP3_DECRBY_KEY", "10"); + ASSERT_TRUE(s.ok()); + make_expired(&db, "GP3_DECRBY_KEY"); + + s = db.Decrby("GP3_DECRBY_KEY", 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -5); + s = db.Get("GP3_DECRBY_KEY", &value); + ASSERT_EQ(value, "-5"); + + type_status.clear(); + type_ttl = db.TTL("GP3_DECRBY_KEY"); + ASSERT_EQ(type_ttl, -1); + + // ***************** Group 4 Test ***************** + s = db.Set("GP4_DECRBY_KEY", "100000"); + ASSERT_TRUE(s.ok()); + + s = db.Decrby("GP4_DECRBY_KEY", 50000, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 50000); + s = db.Get("GP4_DECRBY_KEY", &value); + ASSERT_EQ(value, "50000"); +} + +// Get +TEST_F(StringsTest, GetTest) { + std::string value; + s = db.Set("GET_KEY", "GET_VALUE_1"); + ASSERT_TRUE(s.ok()); + + s = db.Get("GET_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "GET_VALUE_1"); + + s = db.Set("GET_KEY", "GET_VALUE_2"); + ASSERT_TRUE(s.ok()); + + s = db.Get("GET_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "GET_VALUE_2"); +} + +// GetBit +TEST_F(StringsTest, GetBitTest) { + int32_t ret; + s = db.SetBit("GETBIT_KEY", 7, 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.GetBit("GETBIT_KEY", 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.GetBit("GETBIT_KEY", 7, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // The offset is beyond the string length + s = db.GetBit("GETBIT_KEY", 100, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); +} + +// Getrange +TEST_F(StringsTest, GetrangeTest) { + std::string value; + s = db.Set("GETRANGE_KEY", "This is a string"); + ASSERT_TRUE(s.ok()); + s = db.Getrange("GETRANGE_KEY", 0, 3, &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "This"); + + s = db.Getrange("GETRANGE_KEY", -3, -1, &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "ing"); + + s = db.Getrange("GETRANGE_KEY", 0, -1, &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "This is a string"); + + s = db.Getrange("GETRANGE_KEY", 10, 100, &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "string"); + + // If the key is not exist + s = db.Getrange("GETRANGE_NOT_EXIST_KEY", 0, -1, &value); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_STREQ(value.c_str(), ""); +} + +// GetSet +TEST_F(StringsTest, GetSetTest) { + std::string value; + // If the key did not exist + s = db.GetSet("GETSET_KEY", "GETSET_VALUE", &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), ""); + + s = db.GetSet("GETSET_KEY", "GETSET_VALUE", &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "GETSET_VALUE"); +} + +// Incrby +TEST_F(StringsTest, IncrbyTest) { + int64_t ret; + std::string value; + std::map type_status; + int64_t expired_timestamp_millsec = 0; + int64_t expired_ttl_sec = 0; + + // ***************** Group 1 Test ***************** + // If the key is not exist + s = db.Incrby("GP1_INCRBY_KEY", 5, &ret, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_EQ(expired_timestamp_millsec, 0); + + // If the key contains a string that can not be represented as integer + s = db.Set("GP1_INCRBY_KEY", "INCRBY_VALUE"); + ASSERT_TRUE(s.ok()); + s = db.Incrby("GP1_INCRBY_KEY", 5, &ret, &expired_timestamp_millsec); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(expired_timestamp_millsec, 0); + + s = db.Set("GP1_INCRBY_KEY", "1"); + ASSERT_TRUE(s.ok()); + // Less than the maximum number 9223372036854775807 + s = db.Incrby("GP1_INCRBY_KEY", 9223372036854775807, &ret, &expired_timestamp_millsec); + ASSERT_TRUE(s.IsInvalidArgument()); + ASSERT_EQ(expired_timestamp_millsec, 0); + + // ***************** Group 2 Test ***************** + s = db.Set("GP2_INCRBY_KEY", "10"); + ASSERT_TRUE(s.ok()); + int64_t expect_expired_timestamp_millsec = pstd::NowMillis() + 1000 * 100; + ret = db.Expire("GP2_INCRBY_KEY", 1000 * 100); + ASSERT_EQ(ret, 1); + type_status.clear(); + + std::this_thread::sleep_for(std::chrono::seconds (5)); + expired_ttl_sec = db.TTL("GP2_INCRBY_KEY"); + ASSERT_LE(expired_ttl_sec, 95); + ASSERT_GT(expired_ttl_sec, 0); + + s = db.Incrby("GP2_INCRBY_KEY", 5, &ret, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 15); + s = db.Get("GP2_INCRBY_KEY", &value); + ASSERT_EQ(value, "15"); + ASSERT_GE(expired_timestamp_millsec, expect_expired_timestamp_millsec); + ASSERT_LT(expired_timestamp_millsec, expect_expired_timestamp_millsec + 1000); + + std::this_thread::sleep_for(std::chrono::seconds (1)); + expired_ttl_sec = db.TTL("GP2_INCRBY_KEY"); + ASSERT_LE(expired_ttl_sec, 94); + ASSERT_GT(expired_ttl_sec, 0); + + // ***************** Group 3 Test ***************** + s = db.Set("GP3_INCRBY_KEY", "10"); + ASSERT_TRUE(s.ok()); + make_expired(&db, "GP3_INCRBY_KEY"); + + s = db.Get("GP3_INCRBY_KEY", &value); + ASSERT_EQ(value, ""); + + expired_timestamp_millsec = 0; + s = db.Incrby("GP3_INCRBY_KEY", 5, &ret, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + s = db.Get("GP3_INCRBY_KEY", &value); + ASSERT_EQ(value, "5"); + ASSERT_EQ(expired_timestamp_millsec, 0); + + type_status.clear(); + expired_ttl_sec = db.TTL("GP3_INCRBY_KEY"); + ASSERT_EQ(expired_ttl_sec, -1); + + // ***************** Group 4 Test ***************** + s = db.Set("GP4_INCRBY_KEY", "50000"); + ASSERT_TRUE(s.ok()); + + s = db.Incrby("GP4_INCRBY_KEY", 50000, &ret, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 100000); + s = db.Get("GP4_INCRBY_KEY", &value); + ASSERT_EQ(value, "100000"); + ASSERT_EQ(expired_timestamp_millsec, 0); +} + +// Incrbyfloat +TEST_F(StringsTest, IncrbyfloatTest) { + int32_t ret; + std::string value; + std::map type_status; + + double eps = 0.1; + + int64_t expired_timestamp_millsec = 0; + int64_t expired_ttl_sec = 0; + + // ***************** Group 1 Test ***************** + s = db.Set("GP1_INCRBYFLOAT_KEY", "10.50"); + ASSERT_TRUE(s.ok()); + s = db.Incrbyfloat("GP1_INCRBYFLOAT_KEY", "0.1", &value, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_NEAR(std::stod(value), 10.6, eps); + ASSERT_EQ(expired_timestamp_millsec, 0); + s = db.Incrbyfloat("GP1_INCRBYFLOAT_KEY", "-5", &value, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_NEAR(std::stod(value), 5.6, eps); + ASSERT_EQ(expired_timestamp_millsec, 0); + + // If the key contains a string that can not be represented as integer + s = db.Set("GP1_INCRBYFLOAT_KEY", "INCRBY_VALUE"); + ASSERT_TRUE(s.ok()); + s = db.Incrbyfloat("GP1_INCRBYFLOAT_KEY", "5", &value, &expired_timestamp_millsec); + ASSERT_TRUE(s.IsCorruption()); + ASSERT_EQ(expired_timestamp_millsec, 0); + + // ***************** Group 2 Test ***************** + s = db.Set("GP2_INCRBYFLOAT_KEY", "10.11111"); + ASSERT_TRUE(s.ok()); + int64_t expect_expired_timestamp_millsec = pstd::NowMillis() + 1000 * 100; + ret = db.Expire("GP2_INCRBYFLOAT_KEY", 100 * 1000); + ASSERT_EQ(ret, 1); + type_status.clear(); + std::this_thread::sleep_for(std::chrono::milliseconds(5 * 1000)); + expired_ttl_sec = db.TTL("GP2_INCRBYFLOAT_KEY"); + ASSERT_LE(expired_ttl_sec, 95); + ASSERT_GT(expired_ttl_sec, 90); + + s = db.Incrbyfloat("GP2_INCRBYFLOAT_KEY", "10.22222", &value, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_NEAR(std::stod(value), 20.33333, eps); + ASSERT_GE(expired_timestamp_millsec, expect_expired_timestamp_millsec); + ASSERT_LT(expired_timestamp_millsec, expect_expired_timestamp_millsec + 1000); + s = db.Get("GP2_INCRBYFLOAT_KEY", &value); + ASSERT_NEAR(std::stod(value), 20.33333, eps); + + std::this_thread::sleep_for(std::chrono::milliseconds(2 * 1000)); + expired_ttl_sec = db.TTL("GP2_INCRBYFLOAT_KEY"); + ASSERT_LE(expired_ttl_sec, 93); + ASSERT_GE(expired_ttl_sec, 90); + + // ***************** Group 3 Test ***************** + s = db.Set("GP3_INCRBYFLOAT_KEY", "10"); + ASSERT_TRUE(s.ok()); + make_expired(&db, "GP3_INCRBYFLOAT_KEY"); + + s = db.Incrbyfloat("GP3_INCRBYFLOAT_KEY", "0.123456", &value, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_NEAR(std::stod(value), 0.123456, eps); + s = db.Get("GP3_INCRBYFLOAT_KEY", &value); + ASSERT_NEAR(std::stod(value), 0.123456, eps); + ASSERT_EQ(expired_timestamp_millsec, 0); + + type_status.clear(); + expired_ttl_sec = db.TTL("GP3_INCRBYFLOAT_KEY"); + ASSERT_EQ(expired_ttl_sec, -1); + + // ***************** Group 4 Test ***************** + s = db.Set("GP4_INCRBYFLOAT_KEY", "100.001"); + ASSERT_TRUE(s.ok()); + + s = db.Incrbyfloat("GP4_INCRBYFLOAT_KEY", "11.11", &value, &expired_timestamp_millsec); + ASSERT_TRUE(s.ok()); + ASSERT_NEAR(std::stod(value), 111.111, eps); + s = db.Get("GP4_INCRBYFLOAT_KEY", &value); + ASSERT_EQ(expired_timestamp_millsec, 0); + ASSERT_NEAR(std::stod(value), 111.111, eps); +} + +// MGet +TEST_F(StringsTest, MGetTest) { + std::vector vss; + + // ***************** Group 1 Test ***************** + std::vector kvs1{ + {"GP1_MGET_KEY1", "VALUE1"}, {"GP1_MGET_KEY2", "VALUE2"}, {"GP1_MGET_KEY3", "VALUE3"}}; + s = db.MSet(kvs1); + ASSERT_TRUE(s.ok()); + std::vector keys1{"", "GP1_MGET_KEY1", "GP1_MGET_KEY2", "GP1_MGET_KEY3", "GP1_MGET_NOT_EXIST_KEY"}; + vss.clear(); + s = db.MGet(keys1, &vss); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss.size(), 5); + ASSERT_TRUE(vss[0].status.IsNotFound()); + ASSERT_EQ(vss[0].value, ""); + ASSERT_TRUE(vss[1].status.ok()); + ASSERT_EQ(vss[1].value, "VALUE1"); + ASSERT_TRUE(vss[2].status.ok()); + ASSERT_EQ(vss[2].value, "VALUE2"); + ASSERT_TRUE(vss[3].status.ok()); + ASSERT_EQ(vss[3].value, "VALUE3"); + ASSERT_TRUE(vss[4].status.IsNotFound()); + ASSERT_EQ(vss[4].value, ""); + + // ***************** Group 2 Test ***************** + std::vector kvs2{{"GP2_MGET_KEY1", "VALUE1"}, {"GP2_MGET_KEY2", "VALUE2"}, {"GP2_MGET_KEY3", ""}}; + s = db.MSet(kvs2); + ASSERT_TRUE(s.ok()); + std::vector keys2{"GP2_MGET_KEY1", "GP2_MGET_KEY2", "GP2_MGET_KEY3", "GP2_MGET_NOT_EXIST_KEY"}; + ASSERT_TRUE(make_expired(&db, "GP2_MGET_KEY2")); + + vss.clear(); + s = db.MGet(keys2, &vss); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(vss.size(), 4); + ASSERT_TRUE(vss[0].status.ok()); + ASSERT_EQ(vss[0].value, "VALUE1"); + ASSERT_TRUE(vss[1].status.IsNotFound()); + ASSERT_EQ(vss[1].value, ""); + ASSERT_TRUE(vss[2].status.ok()); + ASSERT_EQ(vss[2].value, ""); + ASSERT_TRUE(vss[3].status.IsNotFound()); + ASSERT_EQ(vss[3].value, ""); +} + +// MSet +TEST_F(StringsTest, MSetTest) { + std::vector kvs; + kvs.push_back({"", "MSET_EMPTY_VALUE"}); + kvs.push_back({"MSET_TEST_KEY1", "MSET_TEST_VALUE1"}); + kvs.push_back({"MSET_TEST_KEY2", "MSET_TEST_VALUE2"}); + kvs.push_back({"MSET_TEST_KEY3", "MSET_TEST_VALUE3"}); + kvs.push_back({"MSET_TEST_KEY3", "MSET_TEST_VALUE3"}); + s = db.MSet(kvs); + ASSERT_TRUE(s.ok()); +} + +// TODO(@tangruilin): 修复测试代码 +// MSetnx +// TEST_F(StringsTest, MSetnxTest) { +// int32_t ret; +// std::vector kvs; +// kvs.push_back({"", "MSET_EMPTY_VALUE"}); +// kvs.push_back({"MSET_TEST_KEY1", "MSET_TEST_VALUE1"}); +// kvs.push_back({"MSET_TEST_KEY2", "MSET_TEST_VALUE2"}); +// kvs.push_back({"MSET_TEST_KEY3", "MSET_TEST_VALUE3"}); +// kvs.push_back({"MSET_TEST_KEY3", "MSET_TEST_VALUE3"}); +// s = db.MSetnx(kvs, &ret); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(ret, 0); + +// kvs.clear(); +// kvs.push_back({"MSETNX_TEST_KEY1", "MSET_TEST_VALUE1"}); +// kvs.push_back({"MSETNX_TEST_KEY2", "MSET_TEST_VALUE2"}); +// kvs.push_back({"MSETNX_TEST_KEY3", "MSET_TEST_VALUE3"}); +// kvs.push_back({"MSETNX_TEST_KEY3", "MSET_TEST_VALUE3"}); +// s = db.MSetnx(kvs, &ret); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(ret, 1); +// } + +// // Set +// TEST_F(StringsTest, SetTest) { +// s = db.Set("SET_KEY", "SET_VALUE_1"); +// ASSERT_TRUE(s.ok()); + +// std::string value; +// s = db.Get("SET_KEY", &value); +// ASSERT_STREQ(value.c_str(), "SET_VALUE_1"); + +// s = db.Set("SET_KEY", "SET_VALUE_2"); +// ASSERT_TRUE(s.ok()); + +// s = db.Get("SET_KEY", &value); +// ASSERT_STREQ(value.c_str(), "SET_VALUE_2"); +// } + +// SetBit +TEST_F(StringsTest, SetBitTest) { + int32_t ret; + // ***************** Group 1 Test ***************** + s = db.SetBit("GP1_SETBIT_KEY", 7, 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.SetBit("GP1_SETBIT_KEY", 7, 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + std::string value; + s = db.Get("GP1_SETBIT_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_STREQ(value.c_str(), "\x00"); + + // ***************** Group 2 Test ***************** + s = db.SetBit("GP2_SETBIT_KEY", 10081, 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.GetBit("GP2_SETBIT_KEY", 10081, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.SetBit("GP2_SETBIT_KEY", 10081, 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.GetBit("GP2_SETBIT_KEY", 10081, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // ***************** Group 3 Test ***************** + s = db.SetBit("GP3_SETBIT_KEY", 1, 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.GetBit("GP3_SETBIT_KEY", 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.SetBit("GP3_SETBIT_KEY", 1, 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.GetBit("GP3_SETBIT_KEY", 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + // ***************** Group 4 Test ***************** + s = db.SetBit("GP4_SETBIT_KEY", 1, 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(make_expired(&db, "GP4_SETBIT_KEY")); + + s = db.SetBit("GP4_SETBIT_KEY", 1, 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + // ***************** Group 5 Test ***************** + // The offset argument is less than 0 + s = db.SetBit("GP5_SETBIT_KEY", -1, 0, &ret); + ASSERT_TRUE(s.IsInvalidArgument()); +} + +// Setex +TEST_F(StringsTest, SetexTest) { + std::string value; + s = db.Setex("SETEX_KEY", "SETEX_VALUE", 1); + ASSERT_TRUE(s.ok()); + + // The key is not timeout + s = db.Get("SETEX_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "SETEX_VALUE"); + + // The key is timeout + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + s = db.Get("SETEX_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + + // If the ttl equal 0 + s = db.Setex("SETEX_KEY", "SETEX_VALUE", 0); + ASSERT_TRUE(s.IsInvalidArgument()); + + // The ttl is negative + s = db.Setex("SETEX_KEY", "SETEX_VALUE", -1); + ASSERT_TRUE(s.IsInvalidArgument()); +} + +// Setnx +TEST_F(StringsTest, SetnxTest) { + // If the key was set, return 1 + int32_t ret; + s = db.Setnx("SETNX_KEY", "TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + // If the key was not set, return 0 + s = db.Setnx("SETNX_KEY", "TEST_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); +} + +// Setvx +TEST_F(StringsTest, SetvxTest) { + int32_t ret; + int32_t ttl; + std::string value; + // ***************** Group 1 Test ***************** + s = db.Set("GP1_SETVX_KEY", "GP1_SETVX_VALUE"); + ASSERT_TRUE(s.ok()); + + s = db.Setvx("GP1_SETVX_KEY", "GP1_SETVX_VALUE", "GP1_SETVX_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.Get("GP1_SETVX_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "GP1_SETVX_NEW_VALUE"); + + // ***************** Group 2 Test ***************** + s = db.Setvx("GP2_SETVX_KEY", "GP2_SETVX_VALUE", "GP2_SETVX_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.Get("GP2_SETVX_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(value, ""); + + // ***************** Group 3 Test ***************** + s = db.Set("GP3_SETVX_KEY", "GP3_SETVX_VALUE"); + ASSERT_TRUE(s.ok()); + + s = db.Setvx("GP3_SETVX_KEY", "GP3_SETVX_OTHER_VALUE", "GP3_SETVX_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); + + s = db.Get("GP3_SETVX_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "GP3_SETVX_VALUE"); + + // ***************** Group 4 Test ***************** + s = db.Set("GP4_SETVX_KEY", "GP4_SETVX_VALUE"); + ASSERT_TRUE(s.ok()); + + ASSERT_TRUE(make_expired(&db, "GP4_SETVX_KEY")); + s = db.Setvx("GP4_SETVX_KEY", "GP4_SETVX_VALUE", "GP4_SETVX_NEW_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.Get("GP4_SETVX_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(value, ""); + + // ***************** Group 5 Test ***************** + s = db.Set("GP5_SETVX_KEY", "GP5_SETVX_VALUE"); + ASSERT_TRUE(s.ok()); + + s = db.Setvx("GP5_SETVX_KEY", "GP5_SETVX_VALUE", "GP5_SETVX_NEW_VALUE", &ret, 10); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.Get("GP5_SETVX_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "GP5_SETVX_NEW_VALUE"); + + ASSERT_TRUE(string_ttl(&db, "GP5_SETVX_KEY", &ttl)); + ASSERT_LE(0, ttl); + ASSERT_GE(10, ttl); + + // ***************** Group 6 Test ***************** + s = db.Set("GP6_SETVX_KEY", "GP6_SETVX_VALUE"); + ASSERT_TRUE(s.ok()); + + std::map type_status; + ret = db.Expire("GP6_SETVX_KEY", 10 * 1000); + ASSERT_EQ(ret, 1); + + sleep(1); + ASSERT_TRUE(string_ttl(&db, "GP6_SETVX_KEY", &ttl)); + ASSERT_LT(0, ttl); + ASSERT_GT(10, ttl); + + s = db.Setvx("GP6_SETVX_KEY", "GP6_SETVX_VALUE", "GP6_SETVX_NEW_VALUE", &ret, 20 * 1000); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.Get("GP6_SETVX_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "GP6_SETVX_NEW_VALUE"); + + sleep(1); + ASSERT_TRUE(string_ttl(&db, "GP6_SETVX_KEY", &ttl)); + ASSERT_LE(10, ttl); + ASSERT_GE(20, ttl); +} + +// Delvx +TEST_F(StringsTest, DelvxTest) { + int32_t ret; + int32_t ttl; + std::string value; + // ***************** Group 1 Test ***************** + s = db.Set("GP1_DELVX_KEY", "GP1_DELVX_VALUE"); + ASSERT_TRUE(s.ok()); + + s = db.Delvx("GP1_DELVX_KEY", "GP1_DELVX_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.Get("GP1_DELVX_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(value, ""); + + // ***************** Group 2 Test ***************** + s = db.Delvx("GP2_DELVX_KEY", "GP2_DELVX_VALUE", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + s = db.Get("GP2_DELVX_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(value, ""); + + // ***************** Group 3 Test ***************** + s = db.Set("GP3_DELVX_KEY", "GP3_DELVX_VALUE"); + ASSERT_TRUE(s.ok()); + + s = db.Delvx("GP3_DELVX_KEY", "GP3_DELVX_OTHER_VALUE", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); + + s = db.Get("GP3_DELVX_KEY", &value); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(value, "GP3_DELVX_VALUE"); + + // ***************** Group 4 Test ***************** + s = db.Set("GP4_DELVX_KEY", "GP4_DELVX_VALUE"); + ASSERT_TRUE(s.ok()); + + ASSERT_TRUE(make_expired(&db, "GP4_DELVX_KEY")); + s = db.Delvx("GP4_DELVX_KEY", "GP4_DELVX_VALUE", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + s = db.Get("GP4_DELVX_KEY", &value); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(value, ""); +} + +// Setrange +TEST_F(StringsTest, SetrangeTest) { + std::string value; + int32_t ret; + s = db.Set("SETRANGE_KEY", "HELLO WORLD"); + ASSERT_TRUE(s.ok()); + s = db.Setrange("SETRANGE_KEY", 6, "REDIS", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 11); + s = db.Get("SETRANGE_KEY", &value); + ASSERT_STREQ(value.c_str(), "HELLO REDIS"); + + std::vector keys{"SETRANGE_KEY"}; + std::map type_status; + ret = db.Del(keys); + ASSERT_EQ(ret, 1); + // If not exist, padded with zero-bytes to make offset fit + s = db.Setrange("SETRANGE_KEY", 6, "REDIS", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 11); + s = db.Get("SETRANGE_KEY", &value); + ASSERT_STREQ(value.c_str(), "\x00\x00\x00\x00\x00\x00REDIS"); + + // If the offset less than 0 + s = db.Setrange("SETRANGE_KEY", -1, "REDIS", &ret); + ASSERT_TRUE(s.IsInvalidArgument()); +} + +// Strlen +TEST_F(StringsTest, StrlenTest) { + int32_t strlen; + // The value is empty + s = db.Set("STRLEN_EMPTY_KEY", ""); + ASSERT_TRUE(s.ok()); + s = db.Strlen("STRLEN_EMPTY_KEY", &strlen); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(strlen, 0); + + // The key is not exist + s = db.Strlen("STRLEN_NOT_EXIST_KEY", &strlen); + ASSERT_EQ(strlen, 0); + + s = db.Set("STRLEN_KEY", "STRLEN_VALUE"); + ASSERT_TRUE(s.ok()); + s = db.Strlen("STRLEN_KEY", &strlen); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(strlen, 12); +} + +// BitPos +TEST_F(StringsTest, BitPosTest) { + // bitpos key bit + int64_t ret; + s = db.Set("BITPOS_KEY", "\xff\xf0\x00"); + ASSERT_TRUE(s.ok()); + s = db.BitPos("BITPOS_KEY", 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 12); + + // bitpos key bit [start] + s = db.Set("BITPOS_KEY", "\xff\x00\x00"); + ASSERT_TRUE(s.ok()); + s = db.BitPos("BITPOS_KEY", 1, 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + s = db.BitPos("BITPOS_KEY", 1, 2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); + + // bitpos key bit [start] [end] + s = db.BitPos("BITPOS_KEY", 1, 0, 4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + // bit value is not exists + s = db.Set("BITPOS_KEY", "\x00\x00\x00"); + ASSERT_TRUE(s.ok()); + s = db.BitPos("BITPOS_KEY", 1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); + + s = db.Set("BITPOS_KEY", "\xff\xff\xff"); + ASSERT_TRUE(s.ok()); + s = db.BitPos("BITPOS_KEY", 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); + + s = db.BitPos("BITPOS_KEY", 0, 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); + + s = db.BitPos("BITPOS_KEY", 0, 0, -1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); + + // the offset is beyond the range + s = db.BitPos("BITPOS_KEY", 0, 4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, -1); +} + +// PKSetexAt +TEST_F(StringsTest, PKSetexAtTest) { + pstd::TimeType unix_time; + int64_t ttl_ret; + std::map type_status; + + // ***************** Group 1 Test ***************** + unix_time = pstd::NowMillis(); + s = db.PKSetexAt("GP1_PKSETEX_KEY", "VALUE", unix_time + 100*1000); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + ttl_ret = db.TTL("GP1_PKSETEX_KEY"); + ASSERT_LE(ttl_ret, 100); + ASSERT_GE(ttl_ret, 90); + + // ***************** Group 2 Test ***************** + unix_time = pstd::NowMillis(); + s = db.Set("GP2_PKSETEX_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + s = db.PKSetexAt("GP2_PKSETEX_KEY", "VALUE", unix_time + 100*1000); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + ttl_ret = db.TTL("GP2_PKSETEX_KEY"); + ASSERT_LE(ttl_ret, 100); + ASSERT_GE(ttl_ret, 90); + + // ***************** Group 3 Test ***************** + unix_time = pstd::NowMillis(); + s = db.PKSetexAt("GP3_PKSETEX_KEY", "VALUE", unix_time - 100*1000); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + ttl_ret = db.TTL("GP3_PKSETEX_KEY"); + ASSERT_EQ(ttl_ret, -2); + + // ***************** Group 4 Test ***************** + unix_time = pstd::NowMillis(); + s = db.Set("GP4_PKSETEX_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + s = db.PKSetexAt("GP4_PKSETEX_KEY", "VALUE", unix_time - 100*1000); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + ttl_ret = db.TTL("GP4_PKSETEX_KEY"); + ASSERT_EQ(ttl_ret, -2); + + // ***************** Group 5 Test ***************** + unix_time = pstd::NowMillis(); + s = db.PKSetexAt("GP5_PKSETEX_KEY", "VALUE", -unix_time); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + ttl_ret = db.TTL("GP5_PKSETEX_KEY"); + ASSERT_EQ(ttl_ret, -2); + + // ***************** Group 6 Test ***************** + unix_time = pstd::NowMillis(); + s = db.Set("GP6_PKSETEX_KEY", "VALUE"); + ASSERT_TRUE(s.ok()); + s = db.PKSetexAt("GP6_PKSETEX_KEY", "VALUE", -unix_time); + ASSERT_TRUE(s.ok()); + + type_status.clear(); + ttl_ret = db.TTL("GP6_PKSETEX_KEY"); + ASSERT_EQ(ttl_ret, -2); +} + +int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("strings_test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/storage/tests/zsets_test.cc b/tools/pika_migrate/src/storage/tests/zsets_test.cc new file mode 100644 index 0000000000..61df352bda --- /dev/null +++ b/tools/pika_migrate/src/storage/tests/zsets_test.cc @@ -0,0 +1,5249 @@ +// Copyright (c) 2017-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "glog/logging.h" + +#include "pstd/include/pika_codis_slot.h" +#include "pstd/include/env.h" +#include "storage/storage.h" +#include "storage/util.h" + +// using namespace storage; +using storage::Status; +using storage::Slice; +using storage::ScoreMember; +using storage::DataType; + +class ZSetsTest : public ::testing::Test { + public: + ZSetsTest() = default; + ~ZSetsTest() override = default; + + void SetUp() override { + std::string path = "./db/zsets"; + pstd::DeleteDirIfExist(path); + mkdir(path.c_str(), 0755); + storage_options.options.create_if_missing = true; + s = db.Open(storage_options, path); + if (!s.ok()) { + printf("Open db failed, exit...\n"); + exit(1); + } + } + + void TearDown() override { + std::string path = "./db/zsets"; + storage::DeleteFiles(path.c_str()); + } + + static void SetUpTestSuite() {} + static void TearDownTestSuite() {} + + storage::StorageOptions storage_options; + storage::Storage db; + storage::Status s; +}; + +static bool members_match(const std::vector& mm_out, const std::vector& expect_members) { + if (mm_out.size() != expect_members.size()) { + return false; + } + for (const auto& member : expect_members) { + if (find(mm_out.begin(), mm_out.end(), member) == mm_out.end()) { + return false; + } + } + return true; +} + +static bool score_members_match(storage::Storage* const db, const Slice& key, + const std::vector& expect_sm) { + std::vector sm_out; + storage::Status s = db->ZRange(key, 0, -1, &sm_out); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (sm_out.size() != expect_sm.size()) { + return false; + } + if (s.IsNotFound() && expect_sm.empty()) { + return true; + } + for (int idx = 0; idx < sm_out.size(); ++idx) { + if (expect_sm[idx].score != sm_out[idx].score || expect_sm[idx].member != sm_out[idx].member) { + return false; + } + } + return true; +} + +static bool score_members_match(const std::vector& sm_out, + const std::vector& expect_sm) { + if (sm_out.size() != expect_sm.size()) { + return false; + } + for (int idx = 0; idx < sm_out.size(); ++idx) { + if (expect_sm[idx].score != sm_out[idx].score || expect_sm[idx].member != sm_out[idx].member) { + return false; + } + } + return true; +} + +static bool size_match(storage::Storage* const db, const Slice& key, int32_t expect_size) { + int32_t size = 0; + storage::Status s = db->ZCard(key, &size); + if (!s.ok() && !s.IsNotFound()) { + return false; + } + if (s.IsNotFound() && (expect_size == 0)) { + return true; + } + LOG(WARNING) << "size_match ? size: " << size << " expect_size: " << expect_size; + return size == expect_size; +} + +static bool make_expired(storage::Storage* const db, const storage::Slice& key) { + std::map type_status; + int ret = db->Expire(key, 1); + if ((ret == 0) || !type_status[storage::DataType::kZSets].ok()) { + return false; + } + std::this_thread::sleep_for(std::chrono::milliseconds(2000)); + return true; +} + +static bool delete_key(storage::Storage* const db, const storage::Slice& key) { + std::vector del_keys = {key.ToString()}; + std::map type_status; + db->Del(del_keys); + return type_status[storage::DataType::kZSets].ok(); +} + +// ZPopMax +TEST_F(ZSetsTest, ZPopMaxTest) { // NOLINT + int32_t ret; + int64_t type_ttl; + std::map type_status; + + // ***************** Group 1 Test ***************** + // [-0.54, MM4] + // [0, MM2] + // [3.23, MM1] + // [8.0004, MM3] + std::vector gp1_sm{{3.23, "MM1"}, {0, "MM2"}, {8.0004, "MM3"}, {-0.54, "MM4"}}; + storage::Status s = db.ZAdd("GP1_ZPOPMAX_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMAX_KEY", 4)); + ASSERT_TRUE( + score_members_match(&db, "GP1_ZPOPMAX_KEY", {{-0.54, "MM4"}, {0, "MM2"}, {3.23, "MM1"}, {8.0004, "MM3"}})); + std::vector score_members; + s = db.ZPopMax("GP1_ZPOPMAX_KEY", 1, &score_members); + + // [-0.54, MM4] ret: [8.0004, MM3] + // [0, MM2] + // [3.23, MM1] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, score_members.size()); + ASSERT_TRUE(score_members_match(score_members, {{8.0004, "MM3"}})); + ASSERT_TRUE(score_members_match(&db, "GP1_ZPOPMAX_KEY", {{-0.54, "MM4"}, {0, "MM2"}, {3.23, "MM1"}})); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMAX_KEY", 3)); + s = db.ZPopMax("GP1_ZPOPMAX_KEY", 3, &score_members); + + // ret: [3.23, MM1] + // [0, MM2] + // [-0.54, MM4] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, score_members.size()); + ASSERT_TRUE(score_members_match(score_members, {{3.23, "MM1"}, {0, "MM2"}, {-0.54, "MM4"}})); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMAX_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZPOPMAX_KEY", {})); + s = db.ZPopMax("GP1_ZPOPMAX_KEY", 1, &score_members); + + // ret: + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, score_members.size()); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMAX_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZPOPMAX_KEY", {})); + + // ***************** Group 2 Test ***************** + // [0, MM1] + // [0, MM2] + // [0, MM3] + std::vector gp2_sm{{0, "MM1"}, {0, "MM2"}, {0, "MM3"}}; + s = db.ZAdd("GP2_ZPOPMAX_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZPOPMAX_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZPOPMAX_KEY", {{0, "MM1"}, {0, "MM2"}, {0, "MM3"}})); + s = db.ZPopMax("GP2_ZPOPMAX_KEY", 1, &score_members); + + // [0, MM1] ret: [0, MM3] + // [0, MM2] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, score_members.size()); + ASSERT_TRUE(size_match(&db, "GP2_ZPOPMAX_KEY", 2)); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM3"}})); + ASSERT_TRUE(score_members_match(&db, "GP2_ZPOPMAX_KEY", {{0, "MM1"}, {0, "MM2"}})); + s = db.ZPopMax("GP2_ZPOPMAX_KEY", 3, &score_members); + + // ret: [0, MM2] + // [0, MM1] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(2, score_members.size()); + ASSERT_TRUE(size_match(&db, "GP2_ZPOPMAX_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM2"}, {0, "MM1"}})); + ASSERT_TRUE(score_members_match(&db, "GP2_ZPOPMAX_KEY", {})); + + // ***************** Group 3 Test ***************** + // [-1, MM3] + // [-1, MM4] + // [1 / 6.0, MM5] + // [1 / 6.0, MM6] + // [0.532445, MM7] + // [0.532445, MM8] + // [1, MM1] + // [1, MM2] + // [2e5 + 3.98, MM10] + // [2e5 + 3.98, MM9] + std::vector gp3_sm{ + {1, "MM1"}, {1, "MM2"}, {-1, "MM3"}, {-1, "MM4"}, {1 / 6.0, "MM5"}, + {1 / 6.0, "MM6"}, {0.532445, "MM7"}, {0.532445, "MM8"}, {2e5 + 3.98, "MM9"}, {2e5 + 3.98, "MM10"}}; + s = db.ZAdd("GP3_ZPOPMAX_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(10, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZPOPMAX_KEY", 10)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZPOPMAX_KEY", + {{-1, "MM3"}, + {-1, "MM4"}, + {1 / 6.0, "MM5"}, + {1 / 6.0, "MM6"}, + {0.532445, "MM7"}, + {0.532445, "MM8"}, + {1, "MM1"}, + {1, "MM2"}, + {2e5 + 3.98, "MM10"}, + {2e5 + 3.98, "MM9"}})); + s = db.ZPopMax("GP3_ZPOPMAX_KEY", 5, &score_members); + + // [-1, MM3] ret: [2e5 + 3.98, MM9] + // [-1, MM4] [2e5 + 3.98, MM10] + // [1 / 6.0, MM5] [1, MM2] + // [1 / 6.0, MM6] [1, MM1] + // [0.532445, MM7] [0.532445, MM8] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(5, score_members.size()); + ASSERT_TRUE(size_match(&db, "GP3_ZPOPMAX_KEY", 5)); + ASSERT_TRUE(score_members_match( + score_members, {{2e5 + 3.98, "MM9"}, {2e5 + 3.98, "MM10"}, {1, "MM2"}, {1, "MM1"}, {0.532445, "MM8"}})); + ASSERT_TRUE(score_members_match(&db, "GP3_ZPOPMAX_KEY", + {{-1, "MM3"}, {-1, "MM4"}, {1 / 6.0, "MM5"}, {1 / 6.0, "MM6"}, {0.532445, "MM7"}})); + + // ***************** Group 4 Test ***************** + // + s = db.ZPopMax("GP4_ZPOPMAX_KEY", 1, &score_members); + + // ret: + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, score_members.size()); + + // ***************** Group 5 Test ***************** + // [-1, MM1] + // [0, MM2] + // [1, MM3] + std::vector gp5_sm1{{-1, "MM1"}, {0, "MM2"}, {1, "MM3"}}; + s = db.ZAdd("GP5_ZPOPMAX_KEY", gp5_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZPOPMAX_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZPOPMAX_KEY", {{-1, "MM1"}, {0, "MM2"}, {1, "MM3"}})); + ASSERT_TRUE(make_expired(&db, "GP5_ZPOPMAX_KEY")); + ASSERT_TRUE(size_match(&db, "GP5_ZPOPMAX_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZPOPMAX_KEY", {})); + s = db.ZPopMax("GP5_ZPOPMAX_KEY", 1, &score_members); + + // ret: + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, score_members.size()); +} + +// ZPopMin +TEST_F(ZSetsTest, ZPopMinTest) { // NOLINT + int32_t ret; + std::map type_ttl; + std::map type_status; + + // ***************** Group 1 Test ***************** + // [-0.54, MM4] + // [0, MM2] + // [3.23, MM1] + // [8.0004, MM3] + std::vector gp1_sm{{3.23, "MM1"}, {0, "MM2"}, {8.0004, "MM3"}, {-0.54, "MM4"}}; + Status s = db.ZAdd("GP1_ZPOPMIN_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMIN_KEY", 4)); + ASSERT_TRUE( + score_members_match(&db, "GP1_ZPOPMIN_KEY", {{-0.54, "MM4"}, {0, "MM2"}, {3.23, "MM1"}, {8.0004, "MM3"}})); + std::vector score_members; + s = db.ZPopMin("GP1_ZPOPMIN_KEY", 1, &score_members); + + // [0, MM2] ret: [-0.54, MM4] + // [3.23, MM1] + // [8.0004, MM3] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, score_members.size()); + ASSERT_TRUE(score_members_match(score_members, {{-0.54, "MM4"}})); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMIN_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZPOPMIN_KEY", {{0, "MM2"}, {3.23, "MM1"}, {8.0004, "MM3"}})); + s = db.ZPopMin("GP1_ZPOPMIN_KEY", 3, &score_members); + + // ret: [0, MM2] + // [3.23, MM1] + // [8.0004, MM3] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, score_members.size()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM2"}, {3.23, "MM1"}, {8.0004, "MM3"}})); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMIN_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZPOPMIN_KEY", {})); + s = db.ZPopMin("GP1_ZPOPMIN_KEY", 1, &score_members); + + // ret: + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, score_members.size()); + ASSERT_TRUE(size_match(&db, "GP1_ZPOPMIN_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZPOPMIN_KEY", {})); + + // ***************** Group 2 Test ***************** + // [0, MM1] + // [0, MM2] + // [0, MM3] + std::vector gp2_sm{{0, "MM1"}, {0, "MM2"}, {0, "MM3"}}; + s = db.ZAdd("GP2_ZPOPMIN_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZPOPMIN_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZPOPMIN_KEY", {{0, "MM1"}, {0, "MM2"}, {0, "MM3"}})); + s = db.ZPopMin("GP2_ZPOPMIN_KEY", 1, &score_members); + + // [0, MM2] ret: [0, MM1] + // [0, MM3] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, score_members.size()); + ASSERT_TRUE(size_match(&db, "GP2_ZPOPMIN_KEY", 2)); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}})); + ASSERT_TRUE(score_members_match(&db, "GP2_ZPOPMIN_KEY", {{0, "MM2"}, {0, "MM3"}})); + s = db.ZPopMin("GP2_ZPOPMIN_KEY", 3, &score_members); + + // ret: [0, MM2] + // [0, MM3] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_members.size(), 2); + ASSERT_TRUE(size_match(&db, "GP2_ZPOPMIN_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM2"}, {0, "MM3"}})); + ASSERT_TRUE(score_members_match(&db, "GP2_ZPOPMIN_KEY", {})); + + // ***************** Group 3 Test ***************** + // [-1, MM3] + // [-1, MM4] + // [1 / 6.0, MM5] + // [1 / 6.0, MM6] + // [0.532445, MM7] + // [0.532445, MM8] + // [1, MM1] + // [1, MM2] + // [2e5 + 3.98, MM10] + // [2e5 + 3.98, MM9] + std::vector gp3_sm{ + {1, "MM1"}, {1, "MM2"}, {-1, "MM3"}, {-1, "MM4"}, {1 / 6.0, "MM5"}, + {1 / 6.0, "MM6"}, {0.532445, "MM7"}, {0.532445, "MM8"}, {2e5 + 3.98, "MM9"}, {2e5 + 3.98, "MM10"}}; + s = db.ZAdd("GP3_ZPOPMIN_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(10, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZPOPMIN_KEY", 10)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZPOPMIN_KEY", + {{-1, "MM3"}, + {-1, "MM4"}, + {1 / 6.0, "MM5"}, + {1 / 6.0, "MM6"}, + {0.532445, "MM7"}, + {0.532445, "MM8"}, + {1, "MM1"}, + {1, "MM2"}, + {2e5 + 3.98, "MM10"}, + {2e5 + 3.98, "MM9"}})); + s = db.ZPopMin("GP3_ZPOPMIN_KEY", 5, &score_members); + + // [0.532445, MM8] ret: [-1, MM3] + // [1, MM1] [-1, MM4] + // [1, MM2] [1 / 6.0, MM5] + // [2e5 + 3.98, MM10] [1 / 6.0, MM6] + // [2e5 + 3.98, MM9] [0.532445, MM7] + ASSERT_TRUE(s.ok()); + ASSERT_EQ(5, score_members.size()); + ASSERT_TRUE(size_match(&db, "GP3_ZPOPMIN_KEY", 5)); + ASSERT_TRUE(score_members_match( + &db, "GP3_ZPOPMIN_KEY", {{0.532445, "MM8"}, {1, "MM1"}, {1, "MM2"}, {2e5 + 3.98, "MM10"}, {2e5 + 3.98, "MM9"}})); + ASSERT_TRUE(score_members_match(score_members, + {{-1, "MM3"}, {-1, "MM4"}, {1 / 6.0, "MM5"}, {1 / 6.0, "MM6"}, {0.532445, "MM7"}})); + + // ***************** Group 4 Test ***************** + // + s = db.ZPopMin("GP4_ZPOPMIN_KEY", 1, &score_members); + + // ret: + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, score_members.size()); + + // ***************** Group 5 Test ***************** + // [-1, MM1] + // [0, MM2] + // [1, MM3] + std::vector gp5_sm1{{-1, "MM1"}, {0, "MM2"}, {1, "MM3"}}; + s = db.ZAdd("GP5_ZPOPMIN_KEY", gp5_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZPOPMIN_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZPOPMIN_KEY", {{-1, "MM1"}, {0, "MM2"}, {1, "MM3"}})); + ASSERT_TRUE(make_expired(&db, "GP5_ZPOPMIN_KEY")); + ASSERT_TRUE(size_match(&db, "GP5_ZPOPMIN_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZPOPMIN_KEY", {})); + s = db.ZPopMin("GP5_ZPOPMIN_KEY", 1, &score_members); + + // ret: + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, score_members.size()); +} + +// ZAdd +TEST_F(ZSetsTest, ZAddTest) { // NOLINT + int32_t ret; + int64_t type_ttl; + std::map type_status; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{3.23, "MM1"}, {0, "MM2"}, {8.0004, "MM3"}, {-0.54, "MM4"}}; + s = db.ZAdd("GP1_ZADD_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZADD_KEY", 4)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZADD_KEY", {{-0.54, "MM4"}, {0, "MM2"}, {3.23, "MM1"}, {8.0004, "MM3"}})); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{0, "MM1"}, {0, "MM1"}, {0, "MM2"}, {0, "MM3"}}; + s = db.ZAdd("GP2_ZADD_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZADD_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZADD_KEY", {{0, "MM1"}, {0, "MM2"}, {0, "MM3"}})); + + // ***************** Group 3 Test ***************** + std::vector gp3_sm{{1 / 1.0, "MM1"}, {1 / 3.0, "MM2"}, {1 / 6.0, "MM3"}, {1 / 7.0, "MM4"}}; + s = db.ZAdd("GP3_ZADD_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZADD_KEY", 4)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZADD_KEY", + {{1 / 7.0, "MM4"}, {1 / 6.0, "MM3"}, {1 / 3.0, "MM2"}, {1 / 1.0, "MM1"}})); + + // ***************** Group 4 Test ***************** + std::vector gp4_sm{{-1 / 1.0, "MM1"}, {-1 / 3.0, "MM2"}, {-1 / 6.0, "MM3"}, {-1 / 7.0, "MM4"}}; + s = db.ZAdd("GP4_ZADD_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZADD_KEY", 4)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZADD_KEY", + {{-1 / 1.0, "MM1"}, {-1 / 3.0, "MM2"}, {-1 / 6.0, "MM3"}, {-1 / 7.0, "MM4"}})); + + // ***************** Group 5 Test ***************** + // [0, MM1] + s = db.ZAdd("GP5_ZADD_KEY", {{0, "MM1"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZADD_KEY", {{0, "MM1"}})); + + // [-0.5333, MM2] + // [0, MM1] + s = db.ZAdd("GP5_ZADD_KEY", {{-0.5333, "MM2"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 2)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZADD_KEY", {{-0.5333, "MM2"}, {0, "MM1"}})); + + // [-0.5333, MM2] + // [0, MM1] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{1.79769e+308, "MM3"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZADD_KEY", {{-0.5333, "MM2"}, {0, "MM1"}, {1.79769e+308, "MM3"}})); + + // [-0.5333, MM2] + // [0, MM1] + // [50000, MM4] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{50000, "MM4"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 4)); + ASSERT_TRUE( + score_members_match(&db, "GP5_ZADD_KEY", {{-0.5333, "MM2"}, {0, "MM1"}, {50000, "MM4"}, {1.79769e+308, "MM3"}})); + + // [-1.79769e+308, MM5] + // [-0.5333, MM2] + // [0, MM1] + // [50000, MM4] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{-1.79769e+308, "MM5"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 5)); + ASSERT_TRUE(score_members_match( + &db, "GP5_ZADD_KEY", + {{-1.79769e+308, "MM5"}, {-0.5333, "MM2"}, {0, "MM1"}, {50000, "MM4"}, {1.79769e+308, "MM3"}})); + + // [-1.79769e+308, MM5] + // [-0.5333, MM2] + // [0, MM1] + // [0, MM6] + // [50000, MM4] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{0, "MM6"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 6)); + ASSERT_TRUE(score_members_match( + &db, "GP5_ZADD_KEY", + {{-1.79769e+308, "MM5"}, {-0.5333, "MM2"}, {0, "MM1"}, {0, "MM6"}, {50000, "MM4"}, {1.79769e+308, "MM3"}})); + + // [-1.79769e+308, MM5] + // [-0.5333, MM2] + // [0, MM1] + // [50000, MM4] + // [100000, MM6] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{100000, "MM6"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 6)); + ASSERT_TRUE(score_members_match( + &db, "GP5_ZADD_KEY", + {{-1.79769e+308, "MM5"}, {-0.5333, "MM2"}, {0, "MM1"}, {50000, "MM4"}, {100000, "MM6"}, {1.79769e+308, "MM3"}})); + + // [-1.79769e+308, MM5] + // [-0.5333, MM2] + // [-0.5333, MM7] + // [0, MM1] + // [50000, MM4] + // [100000, MM6] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{-0.5333, "MM7"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 7)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZADD_KEY", + {{-1.79769e+308, "MM5"}, + {-0.5333, "MM2"}, + {-0.5333, "MM7"}, + {0, "MM1"}, + {50000, "MM4"}, + {100000, "MM6"}, + {1.79769e+308, "MM3"}})); + + // [-1.79769e+308, MM5] + // [-0.5333, MM2] + // [-0.5333, MM7] + // [-1/3.0f, MM8] + // [0, MM1] + // [50000, MM4] + // [100000, MM6] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{-1 / 3.0, "MM8"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 8)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZADD_KEY", + {{-1.79769e+308, "MM5"}, + {-0.5333, "MM2"}, + {-0.5333, "MM7"}, + {-1 / 3.0, "MM8"}, + {0, "MM1"}, + {50000, "MM4"}, + {100000, "MM6"}, + {1.79769e+308, "MM3"}})); + + // [-1.79769e+308, MM5] + // [-0.5333, MM2] + // [-0.5333, MM7] + // [-1/3.0f, MM8] + // [0, MM1] + // [1/3.0f, MM9] + // [50000, MM4] + // [100000, MM6] + // [1.79769e+308, MM3] + s = db.ZAdd("GP5_ZADD_KEY", {{1 / 3.0, "MM9"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 9)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZADD_KEY", + {{-1.79769e+308, "MM5"}, + {-0.5333, "MM2"}, + {-0.5333, "MM7"}, + {-1 / 3.0, "MM8"}, + {0, "MM1"}, + {1 / 3.0, "MM9"}, + {50000, "MM4"}, + {100000, "MM6"}, + {1.79769e+308, "MM3"}})); + + // [0, MM1] + // [0, MM2] + // [0, MM3] + // [0, MM4] + // [0, MM5] + // [0, MM6] + // [0, MM7] + // [0, MM8] + // [0, MM9] + s = db.ZAdd( + "GP5_ZADD_KEY", + {{0, "MM1"}, {0, "MM2"}, {0, "MM3"}, {0, "MM4"}, {0, "MM5"}, {0, "MM6"}, {0, "MM7"}, {0, "MM8"}, {0, "MM9"}}, + &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZADD_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP5_ZADD_KEY", + {{0, "MM1"}, {0, "MM2"}, {0, "MM3"}, {0, "MM4"}, {0, "MM5"}, {0, "MM6"}, {0, "MM7"}, {0, "MM8"}, {0, "MM9"}})); + + // ***************** Group 6 Test ***************** + std::vector gp6_sm1{{-1, "MM1"}, {0, "MM2"}, {1, "MM3"}}; + s = db.ZAdd("GP6_ZADD_KEY", gp6_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP6_ZADD_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP6_ZADD_KEY", {{-1, "MM1"}, {0, "MM2"}, {1, "MM3"}})); + ASSERT_TRUE(make_expired(&db, "GP6_ZADD_KEY")); + ASSERT_TRUE(size_match(&db, "GP6_ZADD_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP6_ZADD_KEY", {})); + + std::vector gp6_sm2{{-100, "MM1"}, {0, "MM2"}, {100, "MM3"}}; + s = db.ZAdd("GP6_ZADD_KEY", gp6_sm2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP6_ZADD_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP6_ZADD_KEY", {{-100, "MM1"}, {0, "MM2"}, {100, "MM3"}})); + + // ***************** Group 7 Test ***************** + std::vector gp7_sm1{{-0.123456789, "MM1"}, {0, "MM2"}, {0.123456789, "MM3"}}; + s = db.ZAdd("GP7_ZADD_KEY", gp7_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP7_ZADD_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP7_ZADD_KEY", {{-0.123456789, "MM1"}, {0, "MM2"}, {0.123456789, "MM3"}})); + ASSERT_TRUE(delete_key(&db, "GP7_ZADD_KEY")); + ASSERT_TRUE(size_match(&db, "GP7_ZADD_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP7_ZADD_KEY", {})); + + std::vector gp7_sm2{{-1234.56789, "MM1"}, {0, "MM2"}, {1234.56789, "MM3"}}; + s = db.ZAdd("GP7_ZADD_KEY", gp7_sm2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP7_ZADD_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP7_ZADD_KEY", {{-1234.56789, "MM1"}, {0, "MM2"}, {1234.56789, "MM3"}})); + + s = db.ZAdd("GP7_ZADD_KEY", {{1234.56789, "MM1"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP7_ZADD_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP7_ZADD_KEY", {{0, "MM2"}, {1234.56789, "MM1"}, {1234.56789, "MM3"}})); + + // ***************** Group 8 Test ***************** + std::vector gp8_sm1{{1, "MM1"}}; + std::vector gp8_sm2{{2, "MM2"}}; + s = db.ZAdd("GP8_ZADD_KEY", gp8_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP8_ZADD_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP8_ZADD_KEY", {{1, "MM1"}})); + + type_status.clear(); + ret = db.Expire("GP8_ZADD_KEY", 100); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(type_status[storage::DataType::kZSets].ok()); + + type_status.clear(); + type_ttl = db.TTL("GP8_ZADD_KEY"); + ASSERT_LE(type_ttl, 100); + ASSERT_GE(type_ttl, 0); + + s = db.ZRem("GP8_ZADD_KEY", {"MM1"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZAdd("GP8_ZADD_KEY", gp8_sm2, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP8_ZADD_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP8_ZADD_KEY", {{2, "MM2"}})); + + type_status.clear(); + type_ttl = db.TTL("GP8_ZADD_KEY"); + ASSERT_EQ(type_ttl, -1); +} + +// ZCard +TEST_F(ZSetsTest, ZCardTest) { // NOLINT + int32_t ret; + double score; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{-1, "MM1"}, {-2, "MM2"}, {-3, "MM3"}, {-4, "MM4"}}; + s = db.ZAdd("GP1_ZCARD_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZCARD_KEY", 4)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZCARD_KEY", {{-4, "MM4"}, {-3, "MM3"}, {-2, "MM2"}, {-1, "MM1"}})); + s = db.ZCard("GP1_ZCARD_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}}; + s = db.ZAdd("GP2_ZCARD_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(5, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZCARD_KEY", 5)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZCARD_KEY", {{1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + s = db.ZCard("GP2_ZCARD_KEY", &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(5, ret); + + // ***************** Group 3 Test ***************** + std::vector gp3_sm{{1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}}; + s = db.ZAdd("GP3_ZCARD_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(5, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZCARD_KEY", 5)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZCARD_KEY", {{1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + ASSERT_TRUE(make_expired(&db, "GP3_ZCARD_KEY")); + s = db.ZCard("GP3_ZCARD_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + + // ***************** Group 4 Test ***************** + s = db.ZCard("GP4_ZCARD_KEY", &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); +} + +// ZCount +TEST_F(ZSetsTest, ZCountTest) { // NOLINT + int32_t ret; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{101010.1010101, "MM1"}, {101010.0101010, "MM2"}, {-100.000000001, "MM3"}, + {-100.000000002, "MM4"}, {-100.000000001, "MM5"}, {-100.000000002, "MM6"}}; + s = db.ZAdd("GP1_ZCOUNT_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZCOUNT_KEY", 6)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZCOUNT_KEY", + {{-100.000000002, "MM4"}, + {-100.000000002, "MM6"}, + {-100.000000001, "MM3"}, + {-100.000000001, "MM5"}, + {101010.0101010, "MM2"}, + {101010.1010101, "MM1"}})); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000002, 101010.1010101, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 6); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000002, 101010.1010101, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000002, 101010.1010101, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000002, 101010.1010101, false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100000000, 100000000, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 6); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100000000, 100000000, false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 6); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000002, -100.000000002, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000002, -100.000000002, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000002, -100.000000002, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000001, -100.000000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100000000, 100, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000001, 100000000, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.ZCount("GP1_ZCOUNT_KEY", -100.000000001, 100000000, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP2_ZCOUNT_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZCOUNT_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP2_ZCOUNT_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + ASSERT_TRUE(make_expired(&db, "GP2_ZCOUNT_KEY")); + s = db.ZCount("GP2_ZCOUNT_KEY", -100000000, 100000000, true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // ***************** Group 3 Test ***************** + s = db.ZCount("GP3_ZCOUNT_KEY", -100000000, 100000000, true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // ***************** Group 4 Test ***************** + std::vector gp4_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP4_ZCOUNT_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZCOUNT_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP4_ZCOUNT_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZCount("GP4_ZCOUNT_KEY", -100, -50, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZCount("GP4_ZCOUNT_KEY", -100, 0, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZCount("GP4_ZCOUNT_KEY", -100, 0, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZCount("GP4_ZCOUNT_KEY", -100, 4, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + + s = db.ZCount("GP4_ZCOUNT_KEY", -100, 4, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.ZCount("GP4_ZCOUNT_KEY", 0, 8, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZCount("GP4_ZCOUNT_KEY", 0, 8, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + s = db.ZCount("GP4_ZCOUNT_KEY", 0, 8, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + s = db.ZCount("GP4_ZCOUNT_KEY", 0, 8, false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 7); + + s = db.ZCount("GP4_ZCOUNT_KEY", 3, 5, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.ZCount("GP4_ZCOUNT_KEY", 3, 5, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.ZCount("GP4_ZCOUNT_KEY", 3, 5, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.ZCount("GP4_ZCOUNT_KEY", 3, 5, false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZCount("GP4_ZCOUNT_KEY", 100, 100, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZCount("GP4_ZCOUNT_KEY", 0, 0, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZCount("GP4_ZCOUNT_KEY", 0, 0, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZCount("GP4_ZCOUNT_KEY", 8, 8, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZCount("GP4_ZCOUNT_KEY", 7, 8, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.ZCount("GP4_ZCOUNT_KEY", 7, 8, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZCount("GP4_ZCOUNT_KEY", 7, 8, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZCount("GP4_ZCOUNT_KEY", 7, 8, false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); +} + +// ZIncrby +TEST_F(ZSetsTest, ZIncrbyTest) { // NOLINT + int32_t ret; + double score; + int64_t type_ttl; + std::map type_status; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{101010.1010101, "MM1"}, {101010.0101010, "MM2"}}; + s = db.ZAdd("GP1_ZINCRBY_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(2, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZINCRBY_KEY", 2)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZINCRBY_KEY", {{101010.0101010, "MM2"}, {101010.1010101, "MM1"}})); + + s = db.ZIncrby("GP1_ZINCRBY_KEY", "MM1", -0.1010101, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010); + + s = db.ZIncrby("GP1_ZINCRBY_KEY", "MM2", -0.0101010, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010); + + s = db.ZIncrby("GP1_ZINCRBY_KEY", "MM3", 101010, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010); + + ASSERT_TRUE(size_match(&db, "GP1_ZINCRBY_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZINCRBY_KEY", {{101010, "MM1"}, {101010, "MM2"}, {101010, "MM3"}})); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{101010.1010101010, "MM1"}}; + s = db.ZAdd("GP2_ZINCRBY_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZINCRBY_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZINCRBY_KEY", {{101010.1010101010, "MM1"}})); + + s = db.ZIncrby("GP2_ZINCRBY_KEY", "MM1", 0.0101010101, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010.1111111111); + + s = db.ZIncrby("GP2_ZINCRBY_KEY", "MM1", -0.11111, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010.0000011111); + + s = db.ZIncrby("GP2_ZINCRBY_KEY", "MM1", -0.0000011111, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010); + + s = db.ZIncrby("GP2_ZINCRBY_KEY", "MM1", 101010, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 202020); + + ASSERT_TRUE(size_match(&db, "GP2_ZINCRBY_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZINCRBY_KEY", {{202020, "MM1"}})); + + // ***************** Group 3 Test ***************** + std::vector gp3_sm{{1, "MM1"}, {2, "MM2"}, {3, "MM3"}}; + s = db.ZAdd("GP3_ZINCRBY_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZINCRBY_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZINCRBY_KEY", {{1, "MM1"}, {2, "MM2"}, {3, "MM3"}})); + + ASSERT_TRUE(make_expired(&db, "GP3_ZINCRBY_KEY")); + ASSERT_TRUE(size_match(&db, "GP3_ZINCRBY_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZINCRBY_KEY", {})); + + s = db.ZIncrby("GP3_ZINCRBY_KEY", "MM1", 101010.010101, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010.010101); + ASSERT_TRUE(size_match(&db, "GP3_ZINCRBY_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZINCRBY_KEY", {{101010.010101, "MM1"}})); + + s = db.ZIncrby("GP3_ZINCRBY_KEY", "MM2", 202020.020202, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 202020.020202); + ASSERT_TRUE(size_match(&db, "GP3_ZINCRBY_KEY", 2)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZINCRBY_KEY", {{101010.010101, "MM1"}, {202020.020202, "MM2"}})); + + s = db.ZIncrby("GP3_ZINCRBY_KEY", "MM3", 303030.030303, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 303030.030303); + ASSERT_TRUE(size_match(&db, "GP3_ZINCRBY_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZINCRBY_KEY", + {{101010.010101, "MM1"}, {202020.020202, "MM2"}, {303030.030303, "MM3"}})); + + s = db.ZIncrby("GP3_ZINCRBY_KEY", "MM1", 303030.030303, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 404040.040404); + ASSERT_TRUE(size_match(&db, "GP3_ZINCRBY_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZINCRBY_KEY", + {{202020.020202, "MM2"}, {303030.030303, "MM3"}, {404040.040404, "MM1"}})); + + // ***************** Group 4 Test ***************** + s = db.ZIncrby("GP4_ZINCRBY_KEY", "MM1", -101010.010101, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, -101010.010101); + ASSERT_TRUE(size_match(&db, "GP4_ZINCRBY_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZINCRBY_KEY", {{-101010.010101, "MM1"}})); + + s = db.ZIncrby("GP4_ZINCRBY_KEY", "MM2", 101010.010101, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 101010.010101); + ASSERT_TRUE(size_match(&db, "GP4_ZINCRBY_KEY", 2)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZINCRBY_KEY", {{-101010.010101, "MM1"}, {101010.010101, "MM2"}})); + + // ***************** Group 5 Test ***************** + s = db.ZAdd("GP5_ZINCRBY_KEY", {{1, "MM1"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(make_expired(&db, "GP5_ZINCRBY_KEY")); + + s = db.ZIncrby("GP5_ZINCRBY_KEY", "MM2", 2, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 2); + ASSERT_TRUE(size_match(&db, "GP5_ZINCRBY_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZINCRBY_KEY", {{2, "MM2"}})); + + // ***************** Group 6 Test ***************** + s = db.ZAdd("GP6_ZINCRBY_KEY", {{1, "MM1"}}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + type_status.clear(); + ret = db.Expire("GP6_ZINCRBY_KEY", 100); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(type_status[storage::DataType::kZSets].ok()); + + type_status.clear(); + type_ttl = db.TTL("GP6_ZINCRBY_KEY"); + ASSERT_LE(type_ttl, 100); + ASSERT_GE(type_ttl, 0); + + s = db.ZRem("GP6_ZINCRBY_KEY", {"MM1"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZIncrby("GP6_ZINCRBY_KEY", "MM1", 1, &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(score, 1); + ASSERT_TRUE(size_match(&db, "GP6_ZINCRBY_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP6_ZINCRBY_KEY", {{1, "MM1"}})); + + type_status.clear(); + type_ttl = db.TTL("GP6_ZINCRBY_KEY"); + ASSERT_EQ(type_ttl, -1); +} + +// ZRange +TEST_F(ZSetsTest, ZRangeTest) { // NOLINT + int32_t ret; + std::vector score_members; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{0, "MM1"}}; + s = db.ZAdd("GP1_ZRANGE_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZRANGE_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZRANGE_KEY", {{0, "MM1"}})); + + s = db.ZRange("GP1_ZRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}})); + + // ***************** Group 2 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp2_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP2_ZRANGE_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZRANGE_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP2_ZRANGE_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 0, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -9, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -9, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -100, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 0, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -100, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 0, 0, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM0"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -9, -9, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM0"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 8, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -1, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 0, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 0, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -9, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -9, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -100, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -100, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 3, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -6, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 3, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -6, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 3, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -6, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 3, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -6, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", -6, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRange("GP2_ZRANGE_KEY", 3, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 3 Test ***************** + std::vector gp3_sm{{0, "MM1"}}; + s = db.ZAdd("GP3_ZRANGE_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZRANGE_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZRANGE_KEY", {{0, "MM1"}})); + ASSERT_TRUE(make_expired(&db, "GP3_ZRANGE_KEY")); + + s = db.ZRange("GP3_ZRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 4 Test ***************** + s = db.ZRange("GP4_ZRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(score_members_match(score_members, {})); +} + +// ZRangebyscore +TEST_F(ZSetsTest, ZRangebyscoreTest) { // NOLINT + int32_t ret; + std::vector score_members; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP1_ZRANGEBYSCORE_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // count = max offset = 0 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, std::numeric_limits::max(), 0, + &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // count = 18 offset = 0 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 18, 0, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // count = 10 offset = 0 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 10, 0, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}})); + + // count = 10 offset = 1 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 10, 1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}})); + + // count = 10 offset = 17 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 10, 17, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{987654321.0000001, "MM18"}})); + + // count = 10 offset = 18 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 10, 18, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {})); + + // count = 10 offset = 19 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 10, 19, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {})); + + // count = 10000 offset = 1 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 10000, 1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // count = 10000 offset = 10000 + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, 10000, 10000, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), -1000.000000000001, true, true, + &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), -1000.000000000001, true, false, + &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -1000.000000000001, std::numeric_limits::max(), true, true, + &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -1000.000000000001, std::numeric_limits::max(), false, true, + &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -987654321.0000001, 987654321.0000001, true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -987654321.0000001, 987654321.0000001, false, false, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -999999999, -1000.000000000001, true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -999999999, -1000.000000000001, true, false, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -654321.0000000001, -4321.000000000001, true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, {{-654321.0000000001, "MM4"}, {-54321.00000000001, "MM5"}, {-4321.000000000001, "MM6"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -654321.0000000001, -4321.000000000001, false, false, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-54321.00000000001, "MM5"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", 0, 0, true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM11"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", 0, 0, false, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", 4321.000000000001, 987654321.0000001, true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", 4321.000000000001, 987654321.0000001, false, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", 4321.000000000001, 987654321.0000001, false, false, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, { + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + })); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -1000.000000000001, 987654321.0000001, true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", -1000.000000000001, 987654321.0000001, false, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + s = db.ZRangebyscore("GP1_ZRANGEBYSCORE_KEY", 999999999, std::numeric_limits::max(), true, true, + &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP2_ZRANGEBYSCORE_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(make_expired(&db, "GP2_ZRANGEBYSCORE_KEY")); + s = db.ZRangebyscore("GP2_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, &score_members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 3 Test ***************** + s = db.ZRangebyscore("GP3_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, &score_members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 4 Test ***************** + std::vector gp4_sm{ + {std::numeric_limits::lowest(), "MM0"}, {0, "MM1"}, {std::numeric_limits::max(), "MM2"}}; + s = db.ZAdd("GP4_ZRANGEBYSCORE_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + + s = db.ZRangebyscore("GP4_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{std::numeric_limits::lowest(), "MM0"}, {0, "MM1"}, {std::numeric_limits::max(), "MM2"}})); + + s = db.ZRangebyscore("GP4_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), false, false, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}})); + + s = db.ZRangebyscore("GP4_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, false, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{std::numeric_limits::lowest(), "MM0"}, {0, "MM1"}})); + + s = db.ZRangebyscore("GP4_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), false, true, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}, {std::numeric_limits::max(), "MM2"}})); +} + +// TODO(@tangruilin): 修复测试代码 +// ZRank +// TEST_F(ZSetsTest, ZRankTest) { // NOLINT +// int32_t ret, rank; + +// // ***************** Group 1 Test ***************** +// // {-5, MM0} {-3, MM1} {-1, MM2} {0, MM3} {1, MM4} {3, MM5} {5, MM6} +// // 0 1 2 3 4 5 6 +// std::vector gp1_sm {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, +// "MM5"}, {5, "MM6"}}; s = db.ZAdd("GP1_ZRANK_KEY", gp1_sm, &ret); ASSERT_TRUE(s.ok()); ASSERT_EQ(7, ret); + +// s = db.ZRank("GP1_ZRANK_KEY", "MM0", &rank); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(rank, 0); + +// s = db.ZRank("GP1_ZRANK_KEY", "MM2", &rank); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(rank, 2); + +// s = db.ZRank("GP1_ZRANK_KEY", "MM4", &rank); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(rank, 4); + +// s = db.ZRank("GP1_ZRANK_KEY", "MM6", &rank); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(rank, 6); + +// s = db.ZRank("GP1_ZRANK_KEY", "MM", &rank); +// ASSERT_TRUE(s.IsNotFound()); +// ASSERT_EQ(rank, -1); + +// // ***************** Group 2 Test ***************** +// std::vector gp2_sm {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, +// "MM5"}, {5, "MM6"}}; s = db.ZAdd("GP2_ZRANK_KEY", gp2_sm, &ret); ASSERT_TRUE(s.ok()); ASSERT_EQ(7, ret); +// ASSERT_TRUE(make_expired(&db, "GP2_ZRANGE_KEY")); + +// s = db.ZRank("GP2_ZRANGE_KEY", "MM0", &rank); +// ASSERT_TRUE(s.IsNotFound()); +// ASSERT_EQ(-1, rank); + +// // ***************** Group 3 Test ***************** +// s = db.ZRank("GP3_ZRANGE_KEY", "MM0", &rank); +// ASSERT_TRUE(s.IsNotFound()); +// ASSERT_EQ(-1, rank); +// } + +// ZRem +TEST_F(ZSetsTest, ZRemTest) { // NOLINT + int32_t ret; + + // ***************** Group 1 Test ***************** + // {-5, MM0} {-3, MM1} {-1, MM2} {0, MM3} {1, MM4} {3, MM5} {5, MM6} + // 0 1 2 3 4 5 6 + std::vector gp1_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP1_ZREM_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZREM_KEY", 7)); + ASSERT_TRUE(score_members_match( + &db, "GP1_ZREM_KEY", {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, "MM5"}, {5, "MM6"}})); + + s = db.ZRem("GP1_ZREM_KEY", {"MM1", "MM3", "MM5"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZREM_KEY", 4)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZREM_KEY", {{-5, "MM0"}, {-1, "MM2"}, {1, "MM4"}, {5, "MM6"}})); + + // ***************** Group 2 Test ***************** + // {-5, MM0} {-3, MM1} {-1, MM2} {0, MM3} {1, MM4} {3, MM5} {5, MM6} + // 0 1 2 3 4 5 6 + std::vector gp2_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP2_ZREM_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZREM_KEY", 7)); + ASSERT_TRUE(score_members_match( + &db, "GP2_ZREM_KEY", {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, "MM5"}, {5, "MM6"}})); + + s = db.ZRem("GP2_ZREM_KEY", {"MM0", "MM1", "MM2", "MM3", "MM4", "MM5", "MM6"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZREM_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZREM_KEY", {})); + + s = db.ZRem("GP2_ZREM_KEY", {"MM0", "MM1", "MM2"}, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZREM_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZREM_KEY", {})); + + // ***************** Group 3 Test ***************** + // {-5, MM0} {-3, MM1} {-1, MM2} {0, MM3} {1, MM4} {3, MM5} {5, MM6} + // 0 1 2 3 4 5 6 + std::vector gp3_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP3_ZREM_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZREM_KEY", 7)); + ASSERT_TRUE(score_members_match( + &db, "GP3_ZREM_KEY", {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, "MM5"}, {5, "MM6"}})); + + s = db.ZRem("GP3_ZREM_KEY", {"MM0", "MM0", "MM1", "MM1", "MM2", "MM2"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZREM_KEY", 4)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZREM_KEY", {{0, "MM3"}, {1, "MM4"}, {3, "MM5"}, {5, "MM6"}})); + + // ***************** Group 4 Test ***************** + // {-5, MM0} {-3, MM1} {-1, MM2} {0, MM3} {1, MM4} {3, MM5} {5, MM6} + // 0 1 2 3 4 5 6 + std::vector gp4_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP4_ZREM_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZREM_KEY", 7)); + ASSERT_TRUE(score_members_match( + &db, "GP4_ZREM_KEY", {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, "MM5"}, {5, "MM6"}})); + + s = db.ZRem("GP4_ZREM_KEY", {"MM", "YY", "CC"}, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZREM_KEY", 7)); + ASSERT_TRUE(score_members_match( + &db, "GP4_ZREM_KEY", {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, "MM5"}, {5, "MM6"}})); + + // ***************** Group 5 Test ***************** + // {-5, MM0} {-3, MM1} {-1, MM2} {0, MM3} {1, MM4} {3, MM5} {5, MM6} + // 0 1 2 3 4 5 6 + std::vector gp5_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP5_ZREM_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZREM_KEY", 7)); + ASSERT_TRUE(score_members_match( + &db, "GP5_ZREM_KEY", {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, {1, "MM4"}, {3, "MM5"}, {5, "MM6"}})); + ASSERT_TRUE(make_expired(&db, "GP5_ZREM_KEY")); + + s = db.ZRem("GP5_ZREM_KEY", {"MM0", "MM1", "MM2"}, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZREM_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZREM_KEY", {})); + + // ***************** Group 5 Test ***************** + // Not exist ZSet + s = db.ZRem("GP6_ZREM_KEY", {"MM0", "MM1", "MM2"}, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP6_ZREM_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP6_ZREM_KEY", {})); +} + +// ZRemrangebyrank +TEST_F(ZSetsTest, ZRemrangebyrankTest) { // NOLINT + int32_t ret; + std::vector score_members; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{0, "MM1"}}; + s = db.ZAdd("GP1_ZREMMRANGEBYRANK_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZREMMRANGEBYRANK_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZREMMRANGEBYRANK_KEY", {{0, "MM1"}})); + + s = db.ZRemrangebyrank("GP1_ZREMMRANGEBYRANK_KEY", 0, -1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 2 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp2_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP2_ZREMRANGEBYRANK_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP2_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRemrangebyrank("GP2_ZREMRANGEBYRANK_KEY", 0, 8, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 3 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp3_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP3_ZREMRANGEBYRANK_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP3_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRemrangebyrank("GP3_ZREMRANGEBYRANK_KEY", -9, -1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 4 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp4_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP4_ZREMRANGEBYRANK_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP4_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP4_ZREMRANGEBYRANK_KEY", 0, -1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 5 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp5_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP5_ZREMRANGEBYRANK_KEY", gp5_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP5_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP5_ZREMRANGEBYRANK_KEY", -9, 8, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 6 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp6_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP6_ZREMRANGEBYRANK_KEY", gp6_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP6_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP6_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP6_ZREMRANGEBYRANK_KEY", -100, 8, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP6_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 7 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp7_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP7_ZREMRANGEBYRANK_KEY", gp7_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP7_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP7_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP7_ZREMRANGEBYRANK_KEY", 0, 100, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP7_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 8 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp8_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP8_ZREMRANGEBYRANK_KEY", gp8_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP8_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP8_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP8_ZREMRANGEBYRANK_KEY", -100, 100, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP8_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 9 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp9_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP9_ZREMRANGEBYRANK_KEY", gp9_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP9_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP9_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP9_ZREMRANGEBYRANK_KEY", 0, 0, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP9_ZREMRANGEBYRANK_KEY", 8)); + ASSERT_TRUE(score_members_match( + &db, "GP9_ZREMRANGEBYRANK_KEY", + {{1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 10 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp10_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP10_ZREMRANGEBYRANK_KEY", gp10_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP10_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP10_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP10_ZREMRANGEBYRANK_KEY", -9, -9, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP10_ZREMRANGEBYRANK_KEY", 8)); + ASSERT_TRUE(score_members_match( + &db, "GP10_ZREMRANGEBYRANK_KEY", + {{1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 11 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp11_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP11_ZREMRANGEBYRANK_KEY", gp11_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP11_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP11_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP11_ZREMRANGEBYRANK_KEY", 8, 8, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP11_ZREMRANGEBYRANK_KEY", 8)); + ASSERT_TRUE(score_members_match( + &db, "GP11_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}})); + + // ***************** Group 12 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp12_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP12_ZREMRANGEBYRANK_KEY", gp12_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP12_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP12_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP12_ZREMRANGEBYRANK_KEY", -1, -1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP12_ZREMRANGEBYRANK_KEY", 8)); + ASSERT_TRUE(score_members_match( + &db, "GP12_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}})); + + // ***************** Group 13 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp13_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP13_ZREMRANGEBYRANK_KEY", gp13_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP13_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP13_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP13_ZREMRANGEBYRANK_KEY", 0, 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP13_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP13_ZREMRANGEBYRANK_KEY", {{6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 14 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp14_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP14_ZREMRANGEBYRANK_KEY", gp14_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP14_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP14_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP14_ZREMRANGEBYRANK_KEY", 0, -4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP14_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP14_ZREMRANGEBYRANK_KEY", {{6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 15 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp15_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP15_ZREMRANGEBYRANK_KEY", gp15_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP15_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP15_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP15_ZREMRANGEBYRANK_KEY", -9, -4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP15_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP15_ZREMRANGEBYRANK_KEY", {{6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 16 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp16_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP16_ZREMRANGEBYRANK_KEY", gp16_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP16_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP16_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP16_ZREMRANGEBYRANK_KEY", -9, 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP16_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP16_ZREMRANGEBYRANK_KEY", {{6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 17 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp17_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP17_ZREMRANGEBYRANK_KEY", gp17_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP17_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP17_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP17_ZREMRANGEBYRANK_KEY", -100, 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP17_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP17_ZREMRANGEBYRANK_KEY", {{6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 18 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp18_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP18_ZREMRANGEBYRANK_KEY", gp18_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP18_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP18_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP18_ZREMRANGEBYRANK_KEY", -100, -4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP18_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP18_ZREMRANGEBYRANK_KEY", {{6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 19 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp19_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP19_ZREMRANGEBYRANK_KEY", gp19_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP19_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP19_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP19_ZREMRANGEBYRANK_KEY", 3, 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP19_ZREMRANGEBYRANK_KEY", 6)); + ASSERT_TRUE(score_members_match(&db, "GP19_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 20 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp20_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP20_ZREMRANGEBYRANK_KEY", gp20_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP20_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP20_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP20_ZREMRANGEBYRANK_KEY", -6, -4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP20_ZREMRANGEBYRANK_KEY", 6)); + ASSERT_TRUE(score_members_match(&db, "GP20_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 21 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp21_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP21_ZREMRANGEBYRANK_KEY", gp21_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP21_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP21_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP21_ZREMRANGEBYRANK_KEY", 3, -4, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP21_ZREMRANGEBYRANK_KEY", 6)); + ASSERT_TRUE(score_members_match(&db, "GP21_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 22 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp22_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP22_ZREMRANGEBYRANK_KEY", gp22_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP22_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP22_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP22_ZREMRANGEBYRANK_KEY", -6, 5, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP22_ZREMRANGEBYRANK_KEY", 6)); + ASSERT_TRUE(score_members_match(&db, "GP22_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + // ***************** Group 23 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp23_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP23_ZREMRANGEBYRANK_KEY", gp23_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP23_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP23_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP23_ZREMRANGEBYRANK_KEY", 3, 8, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP23_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP23_ZREMRANGEBYRANK_KEY", {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}})); + + // ***************** Group 24 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp24_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP24_ZREMRANGEBYRANK_KEY", gp24_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP24_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP24_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP24_ZREMRANGEBYRANK_KEY", -6, -1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP24_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP24_ZREMRANGEBYRANK_KEY", {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}})); + + // ***************** Group 25 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp25_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP25_ZREMRANGEBYRANK_KEY", gp25_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP25_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP25_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP25_ZREMRANGEBYRANK_KEY", 3, -1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP25_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP25_ZREMRANGEBYRANK_KEY", {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}})); + + // ***************** Group 26 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp26_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP26_ZREMRANGEBYRANK_KEY", gp26_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP26_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP26_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP26_ZREMRANGEBYRANK_KEY", -6, 8, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP26_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP26_ZREMRANGEBYRANK_KEY", {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}})); + + // ***************** Group 27 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp27_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP27_ZREMRANGEBYRANK_KEY", gp27_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP27_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP27_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP27_ZREMRANGEBYRANK_KEY", -6, 100, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP27_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP27_ZREMRANGEBYRANK_KEY", {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}})); + + // ***************** Group 28 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp28_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP28_ZREMRANGEBYRANK_KEY", gp28_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP28_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP28_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + s = db.ZRemrangebyrank("GP28_ZREMRANGEBYRANK_KEY", 3, 100, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP28_ZREMRANGEBYRANK_KEY", 3)); + ASSERT_TRUE(score_members_match(&db, "GP28_ZREMRANGEBYRANK_KEY", {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}})); + + // ***************** Group 29 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 0 1 2 3 4 5 6 7 8 + // -9 -8 -7 -6 -5 -4 -3 -2 -1 + std::vector gp29_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP29_ZREMRANGEBYRANK_KEY", gp29_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP29_ZREMRANGEBYRANK_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP29_ZREMRANGEBYRANK_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + ASSERT_TRUE(make_expired(&db, "GP29_ZREMRANGEBYRANK_KEY")); + s = db.ZRemrangebyrank("GP29_ZREMRANGEBYRANK_KEY", 0, 0, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP29_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP29_ZREMRANGEBYRANK_KEY", {})); + + // ***************** Group 30 Test ***************** + s = db.ZRemrangebyrank("GP30_ZREMRANGEBYRANK_KEY", 0, 0, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP30_ZREMRANGEBYRANK_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP30_ZREMRANGEBYRANK_KEY", {})); +} + +// ZRemrangebyscore +TEST_F(ZSetsTest, ZRemrangebyscoreTest) { // NOLINT + int32_t ret; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP1_ZREMRANGEBYSCORE_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + + s = db.ZRemrangebyscore("GP1_ZREMRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZREMRANGEBYSCORE_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZREMRANGEBYSCORE_KEY", {})); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP2_ZREMRANGEBYSCORE_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP2_ZREMRANGEBYSCORE_KEY", -10000000000, -999999999, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZREMRANGEBYSCORE_KEY", 18)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 3 Test ***************** + std::vector gp3_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP3_ZREMRANGEBYSCORE_KEY", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP3_ZREMRANGEBYSCORE_KEY", -987654321.0000001, -7654321.000000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP3_ZREMRANGEBYSCORE_KEY", 15)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZREMRANGEBYSCORE_KEY", + {{-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 4 Test ***************** + std::vector gp4_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP4_ZREMRANGEBYSCORE_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP4_ZREMRANGEBYSCORE_KEY", -999999999, -4321.000000000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZREMRANGEBYSCORE_KEY", 12)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZREMRANGEBYSCORE_KEY", + {{-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 5 Test ***************** + std::vector gp5_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP5_ZREMRANGEBYSCORE_KEY", gp5_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP5_ZREMRANGEBYSCORE_KEY", -1000.000000000001, -1000.000000000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP5_ZREMRANGEBYSCORE_KEY", 15)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 6 Test ***************** + std::vector gp6_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP6_ZREMRANGEBYSCORE_KEY", gp6_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP6_ZREMRANGEBYSCORE_KEY", -100.0000000000001, 100.0000000000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP6_ZREMRANGEBYSCORE_KEY", 15)); + ASSERT_TRUE(score_members_match(&db, "GP6_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 7 Test ***************** + std::vector gp7_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP7_ZREMRANGEBYSCORE_KEY", gp7_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP7_ZREMRANGEBYSCORE_KEY", 0, 0, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP7_ZREMRANGEBYSCORE_KEY", 17)); + ASSERT_TRUE(score_members_match(&db, "GP7_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 8 Test ***************** + std::vector gp8_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP8_ZREMRANGEBYSCORE_KEY", gp8_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP8_ZREMRANGEBYSCORE_KEY", 4321.000000000001, 654321.0000000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(3, ret); + ASSERT_TRUE(size_match(&db, "GP8_ZREMRANGEBYSCORE_KEY", 15)); + ASSERT_TRUE(score_members_match(&db, "GP8_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 9 Test ***************** + std::vector gp9_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP9_ZREMRANGEBYSCORE_KEY", gp9_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP9_ZREMRANGEBYSCORE_KEY", 4321.000000000001, 987654321.0000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP9_ZREMRANGEBYSCORE_KEY", 12)); + ASSERT_TRUE(score_members_match(&db, "GP9_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}})); + + // ***************** Group 10 Test ***************** + std::vector gp10_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP10_ZREMRANGEBYSCORE_KEY", gp10_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP10_ZREMRANGEBYSCORE_KEY", 987654321.0000001, 987654321.0000001, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP10_ZREMRANGEBYSCORE_KEY", 17)); + ASSERT_TRUE(score_members_match(&db, "GP10_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}})); + + // ***************** Group 11 Test ***************** + std::vector gp11_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP11_ZREMRANGEBYSCORE_KEY", gp11_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + ASSERT_TRUE(make_expired(&db, "GP11_ZREMRANGEBYSCORE_KEY")); + + s = db.ZRemrangebyscore("GP11_ZREMRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP11_ZREMRANGEBYSCORE_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP11_ZREMRANGEBYSCORE_KEY", {})); + + // ***************** Group 12 Test ***************** + s = db.ZRemrangebyscore("GP12_ZREMRANGEBYSCORE_KEY", std::numeric_limits::lowest(), + std::numeric_limits::max(), true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(0, ret); + ASSERT_TRUE(size_match(&db, "GP12_ZREMRANGEBYSCORE_KEY", 0)); + + // ***************** Group 13 Test ***************** + std::vector gp13_sm{{0, "MM0"}}; + + s = db.ZAdd("GP13_ZREMRANGEBYSCORE_KEY", gp13_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + + s = db.ZRemrangebyscore("GP13_ZREMRANGEBYSCORE_KEY", -1, 1, true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP13_ZREMRANGEBYSCORE_KEY", 0)); + ASSERT_TRUE(score_members_match(&db, "GP13_ZREMRANGEBYSCORE_KEY", {})); + + // ***************** Group 14 Test ***************** + std::vector gp14_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP14_ZREMRANGEBYSCORE_KEY", gp14_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP14_ZREMRANGEBYSCORE_KEY", -987654321.0000001, -7654321.000000001, false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP14_ZREMRANGEBYSCORE_KEY", 17)); + ASSERT_TRUE(score_members_match(&db, "GP14_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 15 Test ***************** + std::vector gp15_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP15_ZREMRANGEBYSCORE_KEY", gp15_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP15_ZREMRANGEBYSCORE_KEY", -987654321.0000001, -7654321.000000001, true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(2, ret); + ASSERT_TRUE(size_match(&db, "GP15_ZREMRANGEBYSCORE_KEY", 16)); + ASSERT_TRUE(score_members_match(&db, "GP15_ZREMRANGEBYSCORE_KEY", + {{-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); + + // ***************** Group 16 Test ***************** + std::vector gp16_sm{{-987654321.0000001, "MM1"}, + {-87654321.00000001, "MM2"}, + {-7654321.000000001, "MM3"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}}; + + s = db.ZAdd("GP16_ZREMRANGEBYSCORE_KEY", gp16_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(18, ret); + s = db.ZRemrangebyscore("GP16_ZREMRANGEBYSCORE_KEY", -987654321.0000001, -7654321.000000001, false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(2, ret); + ASSERT_TRUE(size_match(&db, "GP16_ZREMRANGEBYSCORE_KEY", 16)); + ASSERT_TRUE(score_members_match(&db, "GP16_ZREMRANGEBYSCORE_KEY", + {{-987654321.0000001, "MM1"}, + {-654321.0000000001, "MM4"}, + {-54321.00000000001, "MM5"}, + {-4321.000000000001, "MM6"}, + {-1000.000000000001, "MM7"}, + {-1000.000000000001, "MM8"}, + {-1000.000000000001, "MM9"}, + {-100.0000000000001, "MM10"}, + {0, "MM11"}, + {100.0000000000001, "MM12"}, + {4321.000000000001, "MM13"}, + {54321.00000000001, "MM14"}, + {654321.0000000001, "MM15"}, + {7654321.000000001, "MM16"}, + {87654321.00000001, "MM17"}, + {987654321.0000001, "MM18"}})); +} + +// ZRevrange +TEST_F(ZSetsTest, ZRevrangeTest) { // NOLINT + int32_t ret; + std::vector score_members; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{0, "MM1"}}; + s = db.ZAdd("GP1_ZREVRANGE_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZREVRANGE_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZREVRANGE_KEY", {{0, "MM1"}})); + + s = db.ZRevrange("GP1_ZREVRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}})); + + s = db.ZRevrange("GP1_ZREVRANGE_KEY", 0, 0, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}})); + + s = db.ZRevrange("GP1_ZREVRANGE_KEY", -1, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}})); + + // ***************** Group 2 Test ***************** + // + // {0, MM0} {1, MM1} {2, MM2} {3, MM3} {4, MM4} {5, MM5} {6, MM6} {7, MM7} {8, MM8} + // 8 7 6 5 4 3 2 1 0 + // -1 -2 -3 -4 -5 -6 -7 -8 -9 + std::vector gp2_sm{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, + {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}}; + s = db.ZAdd("GP2_ZREVRANGE_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(9, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZREVRANGE_KEY", 9)); + ASSERT_TRUE(score_members_match( + &db, "GP2_ZREVRANGE_KEY", + {{0, "MM0"}, {1, "MM1"}, {2, "MM2"}, {3, "MM3"}, {4, "MM4"}, {5, "MM5"}, {6, "MM6"}, {7, "MM7"}, {8, "MM8"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 0, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -9, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -9, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -100, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 0, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -100, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match( + score_members, + {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 0, 0, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{8, "MM8"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -9, -9, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{8, "MM8"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 8, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -1, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 0, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 0, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -9, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -9, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -100, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -100, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{8, "MM8"}, {7, "MM7"}, {6, "MM6"}, {5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 3, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -6, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 3, -4, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -6, 5, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 3, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -6, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 3, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -6, 8, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", -6, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP2_ZREVRANGE_KEY", 3, 100, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE( + score_members_match(score_members, {{5, "MM5"}, {4, "MM4"}, {3, "MM3"}, {2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + // ***************** Group 3 Test ***************** + std::vector gp3_sm1{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}}; + std::vector gp3_sm2{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}}; + std::vector gp3_sm3{{0, "MM0"}, {1, "MM1"}, {2, "MM2"}}; + s = db.ZAdd("GP3_ZREVRANGE_KEY1", gp3_sm1, &ret); + ASSERT_TRUE(s.ok()); + s = db.ZAdd("GP3_ZREVRANGE_KEY2", gp3_sm2, &ret); + ASSERT_TRUE(s.ok()); + s = db.ZAdd("GP3_ZREVRANGE_KEY3", gp3_sm3, &ret); + ASSERT_TRUE(s.ok()); + + s = db.ZRevrange("GP3_ZREVRANGE_KEY2", 0, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{2, "MM2"}, {1, "MM1"}, {0, "MM0"}})); + + s = db.ZRevrange("GP3_ZREVRANGE_KEY2", 0, 0, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{2, "MM2"}})); + + s = db.ZRevrange("GP3_ZREVRANGE_KEY2", -1, -1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{0, "MM0"}})); + + s = db.ZRevrange("GP3_ZREVRANGE_KEY2", 1, 1, &score_members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(score_members_match(score_members, {{1, "MM1"}})); + + // ***************** Group 4 Test ***************** + std::vector gp4_sm{{0, "MM1"}}; + s = db.ZAdd("GP4_ZREVRANGE_KEY", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(1, ret); + ASSERT_TRUE(size_match(&db, "GP4_ZREVRANGE_KEY", 1)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZREVRANGE_KEY", {{0, "MM1"}})); + ASSERT_TRUE(make_expired(&db, "GP4_ZREVRANGE_KEY")); + + s = db.ZRevrange("GP4_ZREVRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(score_members_match(score_members, {})); + + // ***************** Group 5 Test ***************** + s = db.ZRevrange("GP5_ZREVRANGE_KEY", 0, -1, &score_members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(score_members_match(score_members, {})); +} + +// TODO(@tangruilin): 修复测试代码 +// ZRevrangebyscore +// TEST_F(ZSetsTest, ZRevrangebyscoreTest) { // NOLINT +// int32_t ret; +// std::vector score_members; + +// // ***************** Group 1 Test ***************** +// std::vector gp1_sm {{-987654321.0000001, "MM1" }, {-87654321.00000001, "MM2" }, +// {-7654321.000000001, "MM3" }, +// {-654321.0000000001, "MM4" }, {-54321.00000000001, "MM5" }, +// {-4321.000000000001, "MM6" }, +// {-1000.000000000001, "MM7" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM9" }, +// {-100.0000000000001, "MM10"}, { 0, "MM11"}, { +// 100.0000000000001, "MM12"}, { 4321.000000000001, "MM13"}, { +// 54321.00000000001, "MM14"}, { 654321.0000000001, "MM15"}, { +// 7654321.000000001, "MM16"}, { 87654321.00000001, "MM17"}, { +// 987654321.0000001, "MM18"}}; + +// s = db.ZAdd("GP1_ZREVRANGEBYSCORE_KEY", gp1_sm, &ret); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(18, ret); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, { 87654321.00000001, "MM17"}, { +// 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// // count = max offset = 0 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, std::numeric_limits::max(), 0, &score_members); +// ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, { 87654321.00000001, "MM17"}, { +// 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// // count = 18 offset = 0 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 18, 0, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, { 87654321.00000001, "MM17"}, { +// 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// // count = 10 offset = 0 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10, 0, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, { 87654321.00000001, "MM17"}, { +// 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }})); +// // count = 10 offset = 1 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10, 1, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, { { 87654321.00000001, "MM17"}, { +// 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }})); + +// // count = 10 offset = 2 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10, 2, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, { { +// 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }})); + +// // count = 10 offset = 17 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10, 17, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{-987654321.0000001, "MM1" }})); + +// // count = 10 offset = 18 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10, 18, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {})); + +// // count = 10 offset = 19 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10, 19, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {})); + +// // count = 10000 offset = 1 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10000, 1, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, { { 87654321.00000001, "MM17"}, { +// 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// // count = 10000 offset = 10000 +// s = db.ZRevrangebyscore("GP1_ZRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, 10000, 10000, &score_members); ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", std::numeric_limits::lowest(), -1000.000000000001, +// true, true, &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, +// {{-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", std::numeric_limits::lowest(), -1000.000000000001, +// true, false, &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, +// {{-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -1000.000000000001, std::numeric_limits::max(), true, +// true, &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, +// "MM18"}, { 87654321.00000001, "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -1000.000000000001, std::numeric_limits::max(), false, +// true, &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, +// "MM18"}, { 87654321.00000001, "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -987654321.0000001, 987654321.0000001, true, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, +// { 87654321.00000001, "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -987654321.0000001, 987654321.0000001, false, false, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, { { 87654321.00000001, +// "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, })); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -999999999, -1000.000000000001 , true, true, &score_members); +// ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }, +// {-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -999999999, -1000.000000000001 , true, false, &score_members); +// ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{-4321.000000000001, "MM6" }, {-54321.00000000001, "MM5" }, +// {-654321.0000000001, "MM4" }, +// {-7654321.000000001, "MM3" }, {-87654321.00000001, "MM2" }, +// {-987654321.0000001, "MM1" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -654321.0000000001, -4321.000000000001, true, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{-4321.000000000001, "MM6" }, +// {-54321.00000000001, "MM5" }, {-654321.0000000001, "MM4" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -654321.0000000001, -4321.000000000001, false, false, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{-54321.00000000001, "MM5" +// }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", 0, 0, true, true, &score_members); +// ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {{0, "MM11"}})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", 0, 0, false, true, &score_members); +// ASSERT_TRUE(s.ok()); +// ASSERT_TRUE(score_members_match(score_members, {})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", 4321.000000000001, 987654321.0000001, true, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, +// { 87654321.00000001, "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", 4321.000000000001, 987654321.0000001, false, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, +// { 87654321.00000001, "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, })); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", 4321.000000000001, 987654321.0000001, false, false, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, { { 87654321.00000001, +// "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, })); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -1000.000000000001, 987654321.0000001, true, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, +// { 87654321.00000001, "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}, +// {-1000.000000000001, "MM9" }, {-1000.000000000001, "MM8" }, +// {-1000.000000000001, "MM7" }})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", -1000.000000000001, 987654321.0000001, false, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{ 987654321.0000001, "MM18"}, +// { 87654321.00000001, "MM17"}, { 7654321.000000001, "MM16"}, +// { 654321.0000000001, "MM15"}, { 54321.00000000001, "MM14"}, { +// 4321.000000000001, "MM13"}, { 100.0000000000001, "MM12"}, { 0, +// "MM11"}, {-100.0000000000001, "MM10"}})); + +// s = db.ZRevrangebyscore("GP1_ZREVRANGEBYSCORE_KEY", 999999999, std::numeric_limits::max(), true, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {})); + +// // ***************** Group 2 Test ***************** +// std::vector gp2_sm {{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, +// {0, "MM3"}, {1, "MM4"}, {3, "MM5"}, +// {5, "MM6"}}; +// s = db.ZAdd("GP2_ZREVRANGEBYSCORE_KEY", gp2_sm, &ret); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(7, ret); +// ASSERT_TRUE(make_expired(&db, "GP2_ZREVRANGEBYSCORE_KEY")); +// s = db.ZRevrangebyscore("GP2_ZREVRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, &score_members); ASSERT_TRUE(s.IsNotFound()); +// ASSERT_TRUE(score_members_match(score_members, {})); + +// // ***************** Group 3 Test ***************** +// s = db.ZRevrangebyscore("GP3_ZREVRANGEBYSCORE_KEY", std::numeric_limits::lowest(), +// std::numeric_limits::max(), true, true, &score_members); ASSERT_TRUE(s.IsNotFound()); +// ASSERT_TRUE(score_members_match(score_members, {})); + +// // ***************** Group 4 Test ***************** +// std::vector gp4_sm {{-1000000000.0000000001, "MM0"}, +// {0, "MM1"}, +// { 1000000000.0000000001, "MM2"}}; +// s = db.ZAdd("GP4_ZREVRANGEBYSCORE_KEY", gp4_sm, &ret); +// ASSERT_TRUE(s.ok()); +// ASSERT_EQ(3, ret); + +// s = db.ZRevrangebyscore("GP4_ZREVRANGEBYSCORE_KEY", -1000000000.0000000001, 1000000000.0000000001, true, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{1000000000.0000000001, +// "MM2"}, {0, "MM1"}, {-1000000000.0000000001, "MM0"}})); + +// s = db.ZRevrangebyscore("GP4_ZREVRANGEBYSCORE_KEY", -1000000000.0000000001, 1000000000.0000000001, false, false, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}})); + +// s = db.ZRevrangebyscore("GP4_ZREVRANGEBYSCORE_KEY", -1000000000.0000000001, 1000000000.0000000001, true, false, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{0, "MM1"}, +// {-1000000000.0000000001, "MM0"}})); + +// s = db.ZRevrangebyscore("GP4_ZREVRANGEBYSCORE_KEY", -1000000000.0000000001, 1000000000.0000000001, false, true, +// &score_members); ASSERT_TRUE(s.ok()); ASSERT_TRUE(score_members_match(score_members, {{1000000000.0000000001, +// "MM2"}, {0, "MM1"}})); +// } + +// ZRevrank +TEST_F(ZSetsTest, ZRevrankTest) { // NOLINT + int32_t ret; + int32_t rank; + + // ***************** Group 1 Test ***************** + // {-5, MM0} {-3, MM1} {-1, MM2} {0, MM3} {1, MM4} {3, MM5} {5, MM6} + // 6 5 4 3 2 1 0 + std::vector gp1_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP1_ZREVRANK_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + + s = db.ZRevrank("GP1_ZREVRANK_KEY", "MM0", &rank); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(rank, 6); + + s = db.ZRevrank("GP1_ZREVRANK_KEY", "MM2", &rank); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(rank, 4); + + s = db.ZRevrank("GP1_ZREVRANK_KEY", "MM4", &rank); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(rank, 2); + + s = db.ZRevrank("GP1_ZREVRANK_KEY", "MM6", &rank); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(rank, 0); + + s = db.ZRevrank("GP1_ZREVRANK_KEY", "MM", &rank); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(rank, -1); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{-5, "MM0"}, {-3, "MM1"}, {-1, "MM2"}, {0, "MM3"}, + {1, "MM4"}, {3, "MM5"}, {5, "MM6"}}; + s = db.ZAdd("GP2_ZREVRANK_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(7, ret); + ASSERT_TRUE(make_expired(&db, "GP2_ZREVRANK_KEY")); + + s = db.ZRevrank("GP2_ZREVRANK_KEY", "MM0", &rank); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(-1, rank); + + // ***************** Group 3 Test ***************** + s = db.ZRevrank("GP3_ZREVRANK_KEY", "MM0", &rank); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(-1, rank); +} + +// ZSCORE +TEST_F(ZSetsTest, ZScoreTest) { // NOLINT + int32_t ret; + double score; + + // ***************** Group 1 Test ***************** + std::vector gp1_sm{{54354.497895352, "MM1"}, {100.987654321, "MM2"}, {-100.000000001, "MM3"}, + {-100.000000002, "MM4"}, {-100.000000001, "MM5"}, {-100.000000002, "MM6"}}; + s = db.ZAdd("GP1_ZSCORE_KEY", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(6, ret); + ASSERT_TRUE(size_match(&db, "GP1_ZSCORE_KEY", 6)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZSCORE_KEY", + {{-100.000000002, "MM4"}, + {-100.000000002, "MM6"}, + {-100.000000001, "MM3"}, + {-100.000000001, "MM5"}, + {100.987654321, "MM2"}, + {54354.497895352, "MM1"}})); + s = db.ZScore("GP1_ZSCORE_KEY", "MM1", &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(54354.497895352, score); + + s = db.ZScore("GP1_ZSCORE_KEY", "MM2", &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(100.987654321, score); + + s = db.ZScore("GP1_ZSCORE_KEY", "MM3", &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(-100.000000001, score); + + s = db.ZScore("GP1_ZSCORE_KEY", "MM4", &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(-100.000000002, score); + + s = db.ZScore("GP1_ZSCORE_KEY", "MM5", &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(-100.000000001, score); + + s = db.ZScore("GP1_ZSCORE_KEY", "MM6", &score); + ASSERT_TRUE(s.ok()); + ASSERT_DOUBLE_EQ(-100.000000002, score); + + s = db.ZScore("GP1_ZSCORE_KEY", "MM7", &score); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_DOUBLE_EQ(0, score); + + // ***************** Group 2 Test ***************** + std::vector gp2_sm{{4, "MM1"}, {3, "MM2"}, {2, "MM3"}, {1, "MM4"}}; + s = db.ZAdd("GP2_ZSCORE_KEY", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(4, ret); + ASSERT_TRUE(size_match(&db, "GP2_ZSCORE_KEY", 4)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZSCORE_KEY", {{1, "MM4"}, {2, "MM3"}, {3, "MM2"}, {4, "MM1"}})); + ASSERT_TRUE(make_expired(&db, "GP2_ZSCORE_KEY")); + s = db.ZScore("GP2_ZSCORE_KEY", "MM1", &score); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_DOUBLE_EQ(0, score); + + // ***************** Group 3 Test ***************** + s = db.ZScore("GP3_ZSCORE_KEY", "MM1", &score); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_DOUBLE_EQ(0, score); +} + +// ZUNIONSTORE +TEST_F(ZSetsTest, ZUnionstoreTest) { // NOLINT + int32_t ret; + + // ***************** Group 1 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1001001, MM1} {10010010, MM2} {100100100, MM3} + // + std::vector gp1_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp1_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp1_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP1_ZUNIONSTORE_SM1", gp1_sm1, &ret); + s = db.ZAdd("GP1_ZUNIONSTORE_SM2", gp1_sm2, &ret); + s = db.ZAdd("GP1_ZUNIONSTORE_SM3", gp1_sm3, &ret); + std::map value_to_dest; + s = db.ZUnionstore("GP1_ZUNIONSTORE_DESTINATION", + {"GP1_ZUNIONSTORE_SM1", "GP1_ZUNIONSTORE_SM2", "GP1_ZUNIONSTORE_SM3"}, {1, 1, 1}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP1_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZUNIONSTORE_DESTINATION", + {{1001001, "MM1"}, {10010010, "MM2"}, {100100100, "MM3"}})); + + // ***************** Group 2 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // { 1, MM1} { 10, MM2} { 100, MM3} + // + std::vector gp2_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp2_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp2_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP2_ZUNIONSTORE_SM1", gp2_sm1, &ret); + s = db.ZAdd("GP2_ZUNIONSTORE_SM2", gp2_sm2, &ret); + s = db.ZAdd("GP2_ZUNIONSTORE_SM3", gp2_sm3, &ret); + s = db.ZUnionstore("GP2_ZUNIONSTORE_DESTINATION", + {"GP2_ZUNIONSTORE_SM1", "GP2_ZUNIONSTORE_SM2", "GP2_ZUNIONSTORE_SM3"}, {1, 1, 1}, storage::MIN, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP2_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZUNIONSTORE_DESTINATION", {{1, "MM1"}, {10, "MM2"}, {100, "MM3"}})); + + // ***************** Group 3 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1000000, MM1} {10000000, MM2} {100000000, MM3} + // + std::vector gp3_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp3_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp3_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP3_ZUNIONSTORE_SM1", gp3_sm1, &ret); + s = db.ZAdd("GP3_ZUNIONSTORE_SM2", gp3_sm2, &ret); + s = db.ZAdd("GP3_ZUNIONSTORE_SM3", gp3_sm3, &ret); + s = db.ZUnionstore("GP3_ZUNIONSTORE_DESTINATION", + {"GP3_ZUNIONSTORE_SM1", "GP3_ZUNIONSTORE_SM2", "GP3_ZUNIONSTORE_SM3"}, {1, 1, 1}, storage::MAX, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP3_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZUNIONSTORE_DESTINATION", + {{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}})); + + // ***************** Group 4 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 2 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 3 + // + // {3002001, MM1} {30020010, MM2} {300200100, MM3} + // + std::vector gp4_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp4_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp4_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP4_ZUNIONSTORE_SM1", gp4_sm1, &ret); + s = db.ZAdd("GP4_ZUNIONSTORE_SM2", gp4_sm2, &ret); + s = db.ZAdd("GP4_ZUNIONSTORE_SM3", gp4_sm3, &ret); + s = db.ZUnionstore("GP4_ZUNIONSTORE_DESTINATION", + {"GP4_ZUNIONSTORE_SM1", "GP4_ZUNIONSTORE_SM2", "GP4_ZUNIONSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP4_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZUNIONSTORE_DESTINATION", + {{3002001, "MM1"}, {30020010, "MM2"}, {300200100, "MM3"}})); + + // ***************** Group 5 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 2 + // {1000000, MM1} {100000000, MM3} weight 3 + // + // {3002001, MM1} { 20010, MM2} {300200100, MM3} + // + std::vector gp5_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp5_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp5_sm3{{1000000, "MM1"}, {100000000, "MM3"}}; + s = db.ZAdd("GP5_ZUNIONSTORE_SM1", gp5_sm1, &ret); + s = db.ZAdd("GP5_ZUNIONSTORE_SM2", gp5_sm2, &ret); + s = db.ZAdd("GP5_ZUNIONSTORE_SM3", gp5_sm3, &ret); + s = db.ZUnionstore("GP5_ZUNIONSTORE_DESTINATION", + {"GP5_ZUNIONSTORE_SM1", "GP5_ZUNIONSTORE_SM2", "GP5_ZUNIONSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP5_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE( + score_members_match(&db, "GP5_ZUNIONSTORE_DESTINATION", {{20010, "MM2"}, {3002001, "MM1"}, {300200100, "MM3"}})); + + // ***************** Group 6 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 2 (expire) + // {1000000, MM1} {100000000, MM3} weight 3 + // + // {3000001, MM1} { 10, MM2} {300000100, MM3} + // + std::vector gp6_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp6_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp6_sm3{{1000000, "MM1"}, {100000000, "MM3"}}; + s = db.ZAdd("GP6_ZUNIONSTORE_SM1", gp6_sm1, &ret); + s = db.ZAdd("GP6_ZUNIONSTORE_SM2", gp6_sm2, &ret); + s = db.ZAdd("GP6_ZUNIONSTORE_SM3", gp6_sm3, &ret); + ASSERT_TRUE(make_expired(&db, "GP6_ZUNIONSTORE_SM2")); + s = db.ZUnionstore("GP6_ZUNIONSTORE_DESTINATION", + {"GP6_ZUNIONSTORE_SM1", "GP6_ZUNIONSTORE_SM2", "GP6_ZUNIONSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP6_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE( + score_members_match(&db, "GP6_ZUNIONSTORE_DESTINATION", {{10, "MM2"}, {3000001, "MM1"}, {300000100, "MM3"}})); + + // ***************** Group 7 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // {1000, MM1} {10000, MM2} {100000, MM3} weight 2 (expire) + // {1000, MM4} weight 3 + // + // { 1, MM1} { 10, MM2} { 100, MM3} {3000, MM4} + // + std::vector gp7_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp7_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp7_sm3{{1000, "MM4"}}; + s = db.ZAdd("GP7_ZUNIONSTORE_SM1", gp7_sm1, &ret); + s = db.ZAdd("GP7_ZUNIONSTORE_SM2", gp7_sm2, &ret); + s = db.ZAdd("GP7_ZUNIONSTORE_SM3", gp7_sm3, &ret); + ASSERT_TRUE(make_expired(&db, "GP7_ZUNIONSTORE_SM2")); + s = db.ZUnionstore("GP7_ZUNIONSTORE_DESTINATION", + {"GP7_ZUNIONSTORE_SM1", "GP7_ZUNIONSTORE_SM2", "GP7_ZUNIONSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(size_match(&db, "GP7_ZUNIONSTORE_DESTINATION", 4)); + ASSERT_TRUE( + score_members_match(&db, "GP7_ZUNIONSTORE_DESTINATION", {{1, "MM1"}, {10, "MM2"}, {100, "MM3"}, {3000, "MM4"}})); + + // ***************** Group 8 Test ***************** + // {1, MM1} weight 1 + // {1, MM2} weight 1 + // {1, MM3} weight 1 + // + // {1, MM1} {1, MM2} {1, MM3} + // + std::vector gp8_sm1{{1, "MM1"}}; + std::vector gp8_sm2{{1, "MM2"}}; + std::vector gp8_sm3{{1, "MM3"}}; + s = db.ZAdd("GP8_ZUNIONSTORE_SM1", gp8_sm1, &ret); + s = db.ZAdd("GP8_ZUNIONSTORE_SM2", gp8_sm2, &ret); + s = db.ZAdd("GP8_ZUNIONSTORE_SM3", gp8_sm3, &ret); + s = db.ZUnionstore("GP8_ZUNIONSTORE_DESTINATION", + {"GP8_ZUNIONSTORE_SM1", "GP8_ZUNIONSTORE_SM2", "GP8_ZUNIONSTORE_SM3"}, {1, 1, 1}, storage::MIN, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP8_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP8_ZUNIONSTORE_DESTINATION", {{1, "MM1"}, {1, "MM2"}, {1, "MM3"}})); + + // ***************** Group 9 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1001001, MM1} {10010010, MM2} {100100100, MM3} + // + std::vector gp9_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp9_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp9_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + std::vector gp9_destination{{1, "MM1"}}; + s = db.ZAdd("GP9_ZUNIONSTORE_SM1", gp9_sm1, &ret); + s = db.ZAdd("GP9_ZUNIONSTORE_SM2", gp9_sm2, &ret); + s = db.ZAdd("GP9_ZUNIONSTORE_SM3", gp9_sm3, &ret); + s = db.ZAdd("GP9_ZUNIONSTORE_DESTINATION", gp9_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "GP9_ZUNIONSTORE_DESTINATION", 1)); + ASSERT_TRUE(score_members_match(&db, "GP9_ZUNIONSTORE_DESTINATION", {{1, "MM1"}})); + + s = db.ZUnionstore("GP9_ZUNIONSTORE_DESTINATION", + {"GP9_ZUNIONSTORE_SM1", "GP9_ZUNIONSTORE_SM2", "GP9_ZUNIONSTORE_SM3"}, {1, 1, 1}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP9_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP9_ZUNIONSTORE_DESTINATION", + {{1001001, "MM1"}, {10010010, "MM2"}, {100100100, "MM3"}})); + + // ***************** Group 10 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1001001, MM1} {10010010, MM2} {100100100, MM3} + // + std::vector gp10_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp10_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp10_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP10_ZUNIONSTORE_SM1", gp10_sm1, &ret); + s = db.ZAdd("GP10_ZUNIONSTORE_SM2", gp10_sm2, &ret); + s = db.ZAdd("GP10_ZUNIONSTORE_SM3", gp10_sm3, &ret); + s = db.ZUnionstore("GP10_ZUNIONSTORE_DESTINATION", + {"GP10_ZUNIONSTORE_SM1", "GP10_ZUNIONSTORE_SM2", "GP10_ZUNIONSTORE_SM3", "GP10_ZUNIONSTORE_SM4"}, + {1, 1, 1, 1}, storage::SUM, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP10_ZUNIONSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP10_ZUNIONSTORE_DESTINATION", + {{1001001, "MM1"}, {10010010, "MM2"}, {100100100, "MM3"}})); + + // ***************** Group 11 Test ***************** + // {-999999999, MM1} weight 0 + // + // { 0, MM1} + // + std::vector gp11_sm1{{-999999999, "MM1"}}; + s = db.ZAdd("GP11_ZUNIONSTORE_SM1", gp11_sm1, &ret); + s = db.ZUnionstore("GP11_ZUNIONSTORE_DESTINATION", {"GP11_ZUNIONSTORE_SM1"}, {0}, storage::SUM, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "GP11_ZUNIONSTORE_DESTINATION", 1)); + ASSERT_TRUE(score_members_match(&db, "GP11_ZUNIONSTORE_DESTINATION", {{0, "MM1"}})); +} + +// ZINTERSTORE +TEST_F(ZSetsTest, ZInterstoreTest) { // NOLINT + int32_t ret; + + // ***************** Group 1 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1001001, MM1} {10010010, MM2} {100100100, MM3} + // + std::vector gp1_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp1_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp1_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP1_ZINTERSTORE_SM1", gp1_sm1, &ret); + s = db.ZAdd("GP1_ZINTERSTORE_SM2", gp1_sm2, &ret); + s = db.ZAdd("GP1_ZINTERSTORE_SM3", gp1_sm3, &ret); + std::vector value_to_dest; + s = db.ZInterstore("GP1_ZINTERSTORE_DESTINATION", + {"GP1_ZINTERSTORE_SM1", "GP1_ZINTERSTORE_SM2", "GP1_ZINTERSTORE_SM3"}, {1, 1, 1}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP1_ZINTERSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZINTERSTORE_DESTINATION", + {{1001001, "MM1"}, {10010010, "MM2"}, {100100100, "MM3"}})); + + // ***************** Group 2 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // { 1, MM1} { 10, MM2} { 100, MM3} + // + std::vector gp2_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp2_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp2_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP2_ZINTERSTORE_SM1", gp2_sm1, &ret); + s = db.ZAdd("GP2_ZINTERSTORE_SM2", gp2_sm2, &ret); + s = db.ZAdd("GP2_ZINTERSTORE_SM3", gp2_sm3, &ret); + s = db.ZInterstore("GP2_ZINTERSTORE_DESTINATION", + {"GP2_ZINTERSTORE_SM1", "GP2_ZINTERSTORE_SM2", "GP2_ZINTERSTORE_SM3"}, {1, 1, 1}, storage::MIN, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP2_ZINTERSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZINTERSTORE_DESTINATION", {{1, "MM1"}, {10, "MM2"}, {100, "MM3"}})); + + // ***************** Group 3 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {10000 + // 00, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1000000, MM1} {10000000, MM2} {100000000, MM3} + // + std::vector gp3_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp3_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp3_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP3_ZINTERSTORE_SM1", gp3_sm1, &ret); + s = db.ZAdd("GP3_ZINTERSTORE_SM2", gp3_sm2, &ret); + s = db.ZAdd("GP3_ZINTERSTORE_SM3", gp3_sm3, &ret); + s = db.ZInterstore("GP3_ZINTERSTORE_DESTINATION", + {"GP3_ZINTERSTORE_SM1", "GP3_ZINTERSTORE_SM2", "GP3_ZINTERSTORE_SM3"}, {1, 1, 1}, storage::MAX, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP3_ZINTERSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZINTERSTORE_DESTINATION", + {{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}})); + + // ***************** Group 4 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 2 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 3 + // + // {3002001, MM1} {30020010, MM2} {300200100, MM3} + // + std::vector gp4_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp4_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp4_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP4_ZINTERSTORE_SM1", gp4_sm1, &ret); + s = db.ZAdd("GP4_ZINTERSTORE_SM2", gp4_sm2, &ret); + s = db.ZAdd("GP4_ZINTERSTORE_SM3", gp4_sm3, &ret); + s = db.ZInterstore("GP4_ZINTERSTORE_DESTINATION", + {"GP4_ZINTERSTORE_SM1", "GP4_ZINTERSTORE_SM2", "GP4_ZINTERSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP4_ZINTERSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZINTERSTORE_DESTINATION", + {{3002001, "MM1"}, {30020010, "MM2"}, {300200100, "MM3"}})); + + // ***************** Group 5 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 2 + // {1000000, MM1} {100000000, MM3} weight 3 + // + // {3002001, MM1} {300200100, MM3} + // + std::vector gp5_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp5_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp5_sm3{{1000000, "MM1"}, {100000000, "MM3"}}; + s = db.ZAdd("GP5_ZINTERSTORE_SM1", gp5_sm1, &ret); + s = db.ZAdd("GP5_ZINTERSTORE_SM2", gp5_sm2, &ret); + s = db.ZAdd("GP5_ZINTERSTORE_SM3", gp5_sm3, &ret); + s = db.ZInterstore("GP5_ZINTERSTORE_DESTINATION", + {"GP5_ZINTERSTORE_SM1", "GP5_ZINTERSTORE_SM2", "GP5_ZINTERSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP5_ZINTERSTORE_DESTINATION", 2)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZINTERSTORE_DESTINATION", {{3002001, "MM1"}, {300200100, "MM3"}})); + + // ***************** Group 6 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 2 (expire) + // {1000000, MM1} {100000000, MM3} weight 3 + // + // {3000001, MM1} { 10, MM2} {300000100, MM3} + // + std::vector gp6_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp6_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp6_sm3{{1000000, "MM1"}, {100000000, "MM3"}}; + s = db.ZAdd("GP6_ZINTERSTORE_SM1", gp6_sm1, &ret); + s = db.ZAdd("GP6_ZINTERSTORE_SM2", gp6_sm2, &ret); + s = db.ZAdd("GP6_ZINTERSTORE_SM3", gp6_sm3, &ret); + ASSERT_TRUE(make_expired(&db, "GP6_ZINTERSTORE_SM2")); + s = db.ZInterstore("GP6_ZINTERSTORE_DESTINATION", + {"GP6_ZINTERSTORE_SM1", "GP6_ZINTERSTORE_SM2", "GP6_ZINTERSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP6_ZINTERSTORE_DESTINATION", 0)); + ASSERT_TRUE(score_members_match(&db, "GP6_ZINTERSTORE_DESTINATION", {})); + + // ***************** Group 7 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // {1000, MM1} {10000, MM2} {100000, MM3} weight 2 (expire) + // {1000, MM4} weight 3 + // + // { 1, MM1} { 10, MM2} { 100, MM3} {3000, MM4} + // + std::vector gp7_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp7_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp7_sm3{{1000, "MM4"}}; + s = db.ZAdd("GP7_ZINTERSTORE_SM1", gp7_sm1, &ret); + s = db.ZAdd("GP7_ZINTERSTORE_SM2", gp7_sm2, &ret); + s = db.ZAdd("GP7_ZINTERSTORE_SM3", gp7_sm3, &ret); + ASSERT_TRUE(make_expired(&db, "GP7_ZINTERSTORE_SM2")); + s = db.ZInterstore("GP7_ZINTERSTORE_DESTINATION", + {"GP7_ZINTERSTORE_SM1", "GP7_ZINTERSTORE_SM2", "GP7_ZINTERSTORE_SM3"}, {1, 2, 3}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP7_ZINTERSTORE_DESTINATION", 0)); + ASSERT_TRUE(score_members_match(&db, "GP7_ZINTERSTORE_DESTINATION", {})); + + // ***************** Group 8 Test ***************** + // {1, MM1} weight 1 + // {1, MM2} weight 1 + // {1, MM3} weight 1 + // + // {1, MM1} {1, MM2} {1, MM3} + // + std::vector gp8_sm1{{1, "MM1"}}; + std::vector gp8_sm2{{1, "MM2"}}; + std::vector gp8_sm3{{1, "MM3"}}; + s = db.ZAdd("GP8_ZINTERSTORE_SM1", gp8_sm1, &ret); + s = db.ZAdd("GP8_ZINTERSTORE_SM2", gp8_sm2, &ret); + s = db.ZAdd("GP8_ZINTERSTORE_SM3", gp8_sm3, &ret); + s = db.ZInterstore("GP8_ZINTERSTORE_DESTINATION", + {"GP8_ZINTERSTORE_SM1", "GP8_ZINTERSTORE_SM2", "GP8_ZINTERSTORE_SM3"}, {1, 1, 1}, storage::MIN, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP8_ZINTERSTORE_DESTINATION", 0)); + ASSERT_TRUE(score_members_match(&db, "GP8_ZINTERSTORE_DESTINATION", {})); + + // ***************** Group 9 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1001001, MM1} {10010010, MM2} {100100100, MM3} + // + std::vector gp9_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp9_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp9_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + std::vector gp9_destination{{1, "MM1"}}; + s = db.ZAdd("GP9_ZINTERSTORE_SM1", gp9_sm1, &ret); + s = db.ZAdd("GP9_ZINTERSTORE_SM2", gp9_sm2, &ret); + s = db.ZAdd("GP9_ZINTERSTORE_SM3", gp9_sm3, &ret); + s = db.ZAdd("GP9_ZINTERSTORE_DESTINATION", gp9_destination, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "GP9_ZINTERSTORE_DESTINATION", 1)); + ASSERT_TRUE(score_members_match(&db, "GP9_ZINTERSTORE_DESTINATION", {{1, "MM1"}})); + + s = db.ZInterstore("GP9_ZINTERSTORE_DESTINATION", + {"GP9_ZINTERSTORE_SM1", "GP9_ZINTERSTORE_SM2", "GP9_ZINTERSTORE_SM3"}, {1, 1, 1}, storage::SUM, + value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP9_ZINTERSTORE_DESTINATION", 3)); + ASSERT_TRUE(score_members_match(&db, "GP9_ZINTERSTORE_DESTINATION", + {{1001001, "MM1"}, {10010010, "MM2"}, {100100100, "MM3"}})); + + // ***************** Group 10 Test ***************** + // { 1, MM1} { 10, MM2} { 100, MM3} weight 1 + // { 1000, MM1} { 10000, MM2} { 100000, MM3} weight 1 + // {1000000, MM1} {10000000, MM2} {100000000, MM3} weight 1 + // + // {1001001, MM1} {10010010, MM2} {100100100, MM3} + // + std::vector gp10_sm1{{1, "MM1"}, {10, "MM2"}, {100, "MM3"}}; + std::vector gp10_sm2{{1000, "MM1"}, {10000, "MM2"}, {100000, "MM3"}}; + std::vector gp10_sm3{{1000000, "MM1"}, {10000000, "MM2"}, {100000000, "MM3"}}; + s = db.ZAdd("GP10_ZINTERSTORE_SM1", gp10_sm1, &ret); + s = db.ZAdd("GP10_ZINTERSTORE_SM2", gp10_sm2, &ret); + s = db.ZAdd("GP10_ZINTERSTORE_SM3", gp10_sm3, &ret); + s = db.ZInterstore("GP10_ZINTERSTORE_DESTINATION", + {"GP10_ZINTERSTORE_SM1", "GP10_ZINTERSTORE_SM2", "GP10_ZINTERSTORE_SM3", "GP10_ZINTERSTORE_SM4"}, + {1, 1, 1, 1}, storage::SUM, value_to_dest, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP10_ZINTERSTORE_DESTINATION", 0)); + ASSERT_TRUE(score_members_match(&db, "GP10_ZINTERSTORE_DESTINATION", {})); +} + +// ZRANGEBYLEX +TEST_F(ZSetsTest, ZRangebylexTest) { // NOLINT + int32_t ret; + + std::vector members; + // ***************** Group 1 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp1_sm1{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP1_ZRANGEBYLEX", gp1_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "a", "n", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "e", "m", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "e", "m", true, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i", "j", "k", "l"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "e", "m", false, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"f", "g", "h", "i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "e", "m", false, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"f", "g", "h", "i", "j", "k", "l"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "h", "j", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"h", "i", "j"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "h", "j", true, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"h", "i"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "h", "j", false, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"i"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "i", "i", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"i"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "i", "i", true, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "i", "i", false, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "i", "i", false, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "+", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "+", true, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "+", false, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "+", false, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "i", "+", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"i", "j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "i", "+", false, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"j", "k", "l", "m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "i", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h", "i"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "i", true, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e", "f", "g", "h"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "e", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"e"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "-", "e", true, false, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "m", "+", true, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {"m"})); + + s = db.ZRangebylex("GP1_ZRANGEBYLEX", "m", "+", false, true, &members); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(members_match(members, {})); + + // ***************** Group 2 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} (expire) + // + std::vector gp2_sm1{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP2_ZRANGEBYLEX", gp1_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(make_expired(&db, "GP2_ZRANGEBYLEX")); + + s = db.ZRangebylex("GP2_ZRANGEBYLEX", "-", "+", true, true, &members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(members_match(members, {})); + + // ***************** Group 3 Test ***************** + s = db.ZRangebylex("GP3_ZRANGEBYLEX", "-", "+", true, true, &members); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_TRUE(members_match(members, {})); +} + +// ZLEXCOUNT +TEST_F(ZSetsTest, ZLexcountTest) { // NOLINT + int32_t ret; + + std::vector members; + // ***************** Group 1 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp1_sm1{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP1_ZLEXCOUNT", gp1_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "a", "n", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "e", "m", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "e", "m", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "e", "m", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "e", "m", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 7); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "h", "j", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "h", "j", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "h", "j", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "i", "i", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "i", "i", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "i", "i", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "i", "i", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "+", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "+", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "+", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "+", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "i", "+", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "i", "+", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "i", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "i", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "e", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "-", "e", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "m", "+", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + + s = db.ZLexcount("GP1_ZLEXCOUNT", "m", "+", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + + // ***************** Group 2 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} (expire) + // + std::vector gp2_sm1{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP2_ZLEXCOUNT", gp1_sm1, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(make_expired(&db, "GP2_ZLEXCOUNT")); + + s = db.ZLexcount("GP2_ZLEXCOUNT", "-", "+", true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // ***************** Group 3 Test ***************** + s = db.ZLexcount("GP3_ZLEXCOUNT", "-", "+", true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); +} + +// ZREMRANGEBYLEX +TEST_F(ZSetsTest, ZRemrangebylexTest) { // NOLINT + int32_t ret; + std::vector members; + + // ***************** Group 1 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp1_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP1_ZREMRANGEBYLEX", gp1_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP1_ZREMRANGEBYLEX", "a", "n", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP1_ZREMRANGEBYLEX", 0)); + ASSERT_TRUE(score_members_match(&db, "GP1_ZREMRANGEBYLEX", {})); + + // ***************** Group 2 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp2_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP2_ZREMRANGEBYLEX", gp2_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP2_ZREMRANGEBYLEX", "e", "m", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP2_ZREMRANGEBYLEX", 0)); + ASSERT_TRUE(score_members_match(&db, "GP2_ZREMRANGEBYLEX", {})); + + // ***************** Group 3 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp3_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP3_ZREMRANGEBYLEX", gp3_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP3_ZREMRANGEBYLEX", "e", "m", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + ASSERT_TRUE(size_match(&db, "GP3_ZREMRANGEBYLEX", 1)); + ASSERT_TRUE(score_members_match(&db, "GP3_ZREMRANGEBYLEX", {{1, "m"}})); + + // ***************** Group 4 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp4_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP4_ZREMRANGEBYLEX", gp4_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP4_ZREMRANGEBYLEX", "e", "m", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 8); + ASSERT_TRUE(size_match(&db, "GP4_ZREMRANGEBYLEX", 1)); + ASSERT_TRUE(score_members_match(&db, "GP4_ZREMRANGEBYLEX", {{1, "e"}})); + + // ***************** Group 5 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp5_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP5_ZREMRANGEBYLEX", gp5_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP5_ZREMRANGEBYLEX", "e", "m", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 7); + ASSERT_TRUE(size_match(&db, "GP5_ZREMRANGEBYLEX", 2)); + ASSERT_TRUE(score_members_match(&db, "GP5_ZREMRANGEBYLEX", {{1, "e"}, {1, "m"}})); + + // ***************** Group 6 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp6_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP6_ZREMRANGEBYLEX", gp6_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP6_ZREMRANGEBYLEX", "h", "j", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 3); + ASSERT_TRUE(size_match(&db, "GP6_ZREMRANGEBYLEX", 6)); + ASSERT_TRUE( + score_members_match(&db, "GP6_ZREMRANGEBYLEX", {{1, "e"}, {1, "f"}, {1, "g"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 7 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp7_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP7_ZREMRANGEBYLEX", gp7_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP7_ZREMRANGEBYLEX", "h", "j", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 2); + ASSERT_TRUE(size_match(&db, "GP7_ZREMRANGEBYLEX", 7)); + ASSERT_TRUE(score_members_match(&db, "GP7_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 8 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp8_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP8_ZREMRANGEBYLEX", gp8_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP8_ZREMRANGEBYLEX", "h", "j", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "GP8_ZREMRANGEBYLEX", 8)); + ASSERT_TRUE(score_members_match(&db, "GP8_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 9 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp9_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP9_ZREMRANGEBYLEX", gp9_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP9_ZREMRANGEBYLEX", "i", "i", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "GP9_ZREMRANGEBYLEX", 8)); + ASSERT_TRUE(score_members_match(&db, "GP9_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 10 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp10_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP10_ZREMRANGEBYLEX", gp10_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP10_ZREMRANGEBYLEX", "i", "i", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP10_ZREMRANGEBYLEX", 9)); + ASSERT_TRUE( + score_members_match(&db, "GP10_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 11 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp11_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP11_ZREMRANGEBYLEX", gp11_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP11_ZREMRANGEBYLEX", "i", "i", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP11_ZREMRANGEBYLEX", 9)); + ASSERT_TRUE( + score_members_match(&db, "GP11_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 12 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp12_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP12_ZREMRANGEBYLEX", gp12_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP12_ZREMRANGEBYLEX", "i", "i", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP12_ZREMRANGEBYLEX", 9)); + ASSERT_TRUE( + score_members_match(&db, "GP12_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 13 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp13_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP13_ZREMRANGEBYLEX", gp13_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP13_ZREMRANGEBYLEX", "-", "+", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP13_ZREMRANGEBYLEX", 0)); + ASSERT_TRUE(score_members_match(&db, "GP13_ZREMRANGEBYLEX", {})); + + // ***************** Group 14 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp14_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP14_ZREMRANGEBYLEX", gp14_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP14_ZREMRANGEBYLEX", "-", "+", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP14_ZREMRANGEBYLEX", 0)); + ASSERT_TRUE(score_members_match(&db, "GP14_ZREMRANGEBYLEX", {})); + + // ***************** Group 15 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp15_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP15_ZREMRANGEBYLEX", gp15_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP15_ZREMRANGEBYLEX", "-", "+", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP15_ZREMRANGEBYLEX", 0)); + ASSERT_TRUE(score_members_match(&db, "GP15_ZREMRANGEBYLEX", {})); + + // ***************** Group 16 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp16_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP16_ZREMRANGEBYLEX", gp16_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP16_ZREMRANGEBYLEX", "-", "+", false, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP16_ZREMRANGEBYLEX", 0)); + ASSERT_TRUE(score_members_match(&db, "GP16_ZREMRANGEBYLEX", {})); + + // ***************** Group 17 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp17_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP17_ZREMRANGEBYLEX", gp17_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP17_ZREMRANGEBYLEX", "i", "+", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_TRUE(size_match(&db, "GP17_ZREMRANGEBYLEX", 4)); + ASSERT_TRUE(score_members_match(&db, "GP17_ZREMRANGEBYLEX", {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}})); + + // ***************** Group 18 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp18_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP18_ZREMRANGEBYLEX", gp18_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP18_ZREMRANGEBYLEX", "i", "+", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(size_match(&db, "GP18_ZREMRANGEBYLEX", 5)); + ASSERT_TRUE(score_members_match(&db, "GP18_ZREMRANGEBYLEX", {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}})); + + // ***************** Group 19 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp19_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP19_ZREMRANGEBYLEX", gp19_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP19_ZREMRANGEBYLEX", "-", "i", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 5); + ASSERT_TRUE(size_match(&db, "GP19_ZREMRANGEBYLEX", 4)); + ASSERT_TRUE(score_members_match(&db, "GP19_ZREMRANGEBYLEX", {{1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 20 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp20_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP20_ZREMRANGEBYLEX", gp20_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP20_ZREMRANGEBYLEX", "-", "i", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 4); + ASSERT_TRUE(size_match(&db, "GP20_ZREMRANGEBYLEX", 5)); + ASSERT_TRUE(score_members_match(&db, "GP20_ZREMRANGEBYLEX", {{1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 21 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp21_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP21_ZREMRANGEBYLEX", gp21_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP21_ZREMRANGEBYLEX", "-", "e", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "GP21_ZREMRANGEBYLEX", 8)); + ASSERT_TRUE(score_members_match(&db, "GP21_ZREMRANGEBYLEX", + {{1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 22 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp22_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP22_ZREMRANGEBYLEX", gp22_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP22_ZREMRANGEBYLEX", "-", "e", true, false, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP22_ZREMRANGEBYLEX", 9)); + ASSERT_TRUE( + score_members_match(&db, "GP22_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 23 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp23_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP23_ZREMRANGEBYLEX", gp23_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP23_ZREMRANGEBYLEX", "m", "+", true, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 1); + ASSERT_TRUE(size_match(&db, "GP23_ZREMRANGEBYLEX", 8)); + ASSERT_TRUE(score_members_match(&db, "GP23_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}})); + + // ***************** Group 24 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} + // + std::vector gp24_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP24_ZREMRANGEBYLEX", gp24_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + + s = db.ZRemrangebylex("GP24_ZREMRANGEBYLEX", "m", "+", false, true, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 0); + ASSERT_TRUE(size_match(&db, "GP24_ZREMRANGEBYLEX", 9)); + ASSERT_TRUE( + score_members_match(&db, "GP24_ZREMRANGEBYLEX", + {{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}})); + + // ***************** Group 25 Test ***************** + // {1, e} {1, f} {1, g} {1, h} {1, i} {1, j} {1, k} {1, l} {1, m} (expire) + // + std::vector gp25_sm{{1, "e"}, {1, "f"}, {1, "g"}, {1, "h"}, {1, "i"}, + {1, "j"}, {1, "k"}, {1, "l"}, {1, "m"}}; + s = db.ZAdd("GP25_ZREMRANGEBYLEX", gp25_sm, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(make_expired(&db, "GP25_ZREMRANGEBYLEX")); + + s = db.ZRemrangebylex("GP25_ZREMRANGEBYLEX", "-", "+", true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); + + // ***************** Group 26 Test ***************** + s = db.ZRemrangebylex("GP26_ZREMRANGEBYLEX", "-", "+", true, true, &ret); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(ret, 0); +} + +// ZScan +TEST_F(ZSetsTest, ZScanTest) { // NOLINT + int32_t ret = 0; + int64_t cursor = 0; + int64_t next_cursor = 0; + std::vector score_member_out; + + // ***************** Group 1 Test ***************** + // {0,a} {0,b} {0,c} {0,d} {0,e} {0,f} {0,g} {0,h} + // 0 1 2 3 4 5 6 7 + std::vector gp1_score_member{{0, "a"}, {0, "b"}, {0, "c"}, {0, "d"}, + {0, "e"}, {0, "f"}, {0, "g"}, {0, "h"}}; + s = db.ZAdd("GP1_ZSCAN_KEY", gp1_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP1_ZSCAN_KEY", 8)); + + s = db.ZScan("GP1_ZSCAN_KEY", 0, "*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 3); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a"}, {0, "b"}, {0, "c"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP1_ZSCAN_KEY", cursor, "*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 3); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "d"}, {0, "e"}, {0, "f"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP1_ZSCAN_KEY", cursor, "*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 2); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "g"}, {0, "h"}})); + + // ***************** Group 2 Test ***************** + // {0,a} {0,b} {0,c} {0,d} {0,e} {0,f} {0,g} {0,h} + // 0 1 2 3 4 5 6 7 + std::vector gp2_score_member{{0, "a"}, {0, "b"}, {0, "c"}, {0, "d"}, + {0, "e"}, {0, "f"}, {0, "g"}, {0, "h"}}; + s = db.ZAdd("GP2_ZSCAN_KEY", gp2_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP2_ZSCAN_KEY", 8)); + + s = db.ZScan("GP2_ZSCAN_KEY", 0, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP2_ZSCAN_KEY", cursor, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP2_ZSCAN_KEY", cursor, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP2_ZSCAN_KEY", cursor, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 4); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "d"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP2_ZSCAN_KEY", cursor, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "e"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP2_ZSCAN_KEY", cursor, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "f"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP2_ZSCAN_KEY", cursor, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 7); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "g"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP2_ZSCAN_KEY", cursor, "*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "h"}})); + + // ***************** Group 3 Test ***************** + // {0,a} {0,b} {0,c} {0,d} {0,e} {0,f} {0,g} {0,h} + // 0 1 2 3 4 5 6 7 + std::vector gp3_score_member{{0, "a"}, {0, "b"}, {0, "c"}, {0, "d"}, + {0, "e"}, {0, "f"}, {0, "g"}, {0, "h"}}; + s = db.ZAdd("GP3_ZSCAN_KEY", gp3_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP3_ZSCAN_KEY", 8)); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP3_ZSCAN_KEY", cursor, "*", 5, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 5); + ASSERT_EQ(next_cursor, 5); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a"}, {0, "b"}, {0, "c"}, {0, "d"}, {0, "e"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP3_ZSCAN_KEY", cursor, "*", 5, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "f"}, {0, "g"}, {0, "h"}})); + + // ***************** Group 4 Test ***************** + // {0,a} {0,b} {0,c} {0,d} {0,e} {0,f} {0,g} {0,h} + // 0 1 2 3 4 5 6 7 + std::vector gp4_score_member{{0, "a"}, {0, "b"}, {0, "c"}, {0, "d"}, + {0, "e"}, {0, "f"}, {0, "g"}, {0, "h"}}; + s = db.ZAdd("GP4_ZSCAN_KEY", gp4_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_TRUE(size_match(&db, "GP4_ZSCAN_KEY", 8)); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP4_ZSCAN_KEY", cursor, "*", 10, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 8); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, + {{0, "a"}, {0, "b"}, {0, "c"}, {0, "d"}, {0, "e"}, {0, "f"}, {0, "g"}, {0, "h"}})); + + // ***************** Group 5 Test ***************** + // {0,a_1_} {0,a_2_} {0,a_3_} {0,b_1_} {0,b_2_} {0,b_3_} {0,c_1_} {0,c_2_} {0,c_3_} + // 0 1 2 3 4 5 6 7 8 + std::vector gp5_score_member{{0, "a_1_"}, {0, "a_2_"}, {0, "a_3_"}, {0, "b_1_"}, {0, "b_2_"}, + {0, "b_3_"}, {0, "c_1_"}, {0, "c_2_"}, {0, "c_3_"}}; + s = db.ZAdd("GP5_ZSCAN_KEY", gp5_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP5_ZSCAN_KEY", 9)); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP5_ZSCAN_KEY", cursor, "*1*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 3); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a_1_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP5_ZSCAN_KEY", cursor, "*1*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 6); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b_1_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP5_ZSCAN_KEY", cursor, "*1*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c_1_"}})); + + // ***************** Group 6 Test ***************** + // {0,a_1_} {0,a_2_} {0,a_3_} {0,b_1_} {0,b_2_} {0,b_3_} {0,c_1_} {0,c_2_} {0,c_3_} + // 0 1 2 3 4 5 6 7 8 + std::vector gp6_score_member{{0, "a_1_"}, {0, "a_2_"}, {0, "a_3_"}, {0, "b_1_"}, {0, "b_2_"}, + {0, "b_3_"}, {0, "c_1_"}, {0, "c_2_"}, {0, "c_3_"}}; + s = db.ZAdd("GP6_ZSCAN_KEY", gp6_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP6_ZSCAN_KEY", 9)); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP6_ZSCAN_KEY", cursor, "a*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a_1_"}, {0, "a_2_"}, {0, "a_3_"}})); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP6_ZSCAN_KEY", cursor, "a*", 2, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a_1_"}, {0, "a_2_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP6_ZSCAN_KEY", cursor, "a*", 2, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a_3_"}})); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP6_ZSCAN_KEY", cursor, "a*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a_1_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP6_ZSCAN_KEY", cursor, "a*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a_2_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP6_ZSCAN_KEY", cursor, "a*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "a_3_"}})); + + // ***************** Group 7 Test ***************** + // {0,a_1_} {0,a_2_} {0,a_3_} {0,b_1_} {0,b_2_} {0,b_3_} {0,c_1_} {0,c_2_} {0,c_3_} + // 0 1 2 3 4 5 6 7 8 + std::vector gp7_score_member{{0, "a_1_"}, {0, "a_2_"}, {0, "a_3_"}, {0, "b_1_"}, {0, "b_2_"}, + {0, "b_3_"}, {0, "c_1_"}, {0, "c_2_"}, {0, "c_3_"}}; + s = db.ZAdd("GP7_ZSCAN_KEY", gp7_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP7_ZSCAN_KEY", 9)); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP7_ZSCAN_KEY", cursor, "b*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b_1_"}, {0, "b_2_"}, {0, "b_3_"}})); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP7_ZSCAN_KEY", cursor, "b*", 2, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b_1_"}, {0, "b_2_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP7_ZSCAN_KEY", cursor, "b*", 2, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b_3_"}})); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP7_ZSCAN_KEY", cursor, "b*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b_1_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP7_ZSCAN_KEY", cursor, "b*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b_2_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP7_ZSCAN_KEY", cursor, "b*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "b_3_"}})); + + // ***************** Group 8 Test ***************** + // {0,a_1_} {0,a_2_} {0,a_3_} {0,b_1_} {0,b_2_} {0,b_3_} {0,c_1_} {0,c_2_} {0,c_3_} + // 0 1 2 3 4 5 6 7 8 + std::vector gp8_score_member{{0, "a_1_"}, {0, "a_2_"}, {0, "a_3_"}, {0, "b_1_"}, {0, "b_2_"}, + {0, "b_3_"}, {0, "c_1_"}, {0, "c_2_"}, {0, "c_3_"}}; + s = db.ZAdd("GP8_ZSCAN_KEY", gp8_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP8_ZSCAN_KEY", 9)); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP8_ZSCAN_KEY", cursor, "c*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 3); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c_1_"}, {0, "c_2_"}, {0, "c_3_"}})); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP8_ZSCAN_KEY", cursor, "c*", 2, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 2); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c_1_"}, {0, "c_2_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP8_ZSCAN_KEY", cursor, "c*", 2, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c_3_"}})); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP8_ZSCAN_KEY", cursor, "c*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 1); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c_1_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP8_ZSCAN_KEY", cursor, "c*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 2); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c_2_"}})); + + score_member_out.clear(); + cursor = next_cursor, next_cursor = 0; + s = db.ZScan("GP8_ZSCAN_KEY", cursor, "c*", 1, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 1); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {{0, "c_3_"}})); + + // ***************** Group 9 Test ***************** + // {0,a_1_} {0,a_2_} {0,a_3_} {0,b_1_} {0,b_2_} {0,b_3_} {0,c_1_} {0,c_2_} {0,c_3_} + // 0 1 2 3 4 5 6 7 8 + std::vector gp9_score_member{{0, "a_1_"}, {0, "a_2_"}, {0, "a_3_"}, {0, "b_1_"}, {0, "b_2_"}, + {0, "b_3_"}, {0, "c_1_"}, {0, "c_2_"}, {0, "c_3_"}}; + s = db.ZAdd("GP9_ZSCAN_KEY", gp9_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP9_ZSCAN_KEY", 9)); + + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP9_ZSCAN_KEY", cursor, "d*", 3, &score_member_out, &next_cursor); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(score_member_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {})); + + // ***************** Group 10 Test ***************** + // {0,a_1_} {0,a_2_} {0,a_3_} {0,b_1_} {0,b_2_} {0,b_3_} {0,c_1_} {0,c_2_} {0,c_3_} + // 0 1 2 3 4 5 6 7 8 + std::vector gp10_score_member{{0, "a_1_"}, {0, "a_2_"}, {0, "a_3_"}, {0, "b_1_"}, {0, "b_2_"}, + {0, "b_3_"}, {0, "c_1_"}, {0, "c_2_"}, {0, "c_3_"}}; + s = db.ZAdd("GP10_ZSCAN_KEY", gp10_score_member, &ret); + ASSERT_TRUE(s.ok()); + ASSERT_EQ(ret, 9); + ASSERT_TRUE(size_match(&db, "GP10_ZSCAN_KEY", 9)); + + ASSERT_TRUE(make_expired(&db, "GP10_ZSCAN_KEY")); + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP10_ZSCAN_KEY", cursor, "*", 10, &score_member_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(score_member_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {})); + + // ***************** Group 11 Test ***************** + // ZScan Not Exist Key + score_member_out.clear(); + cursor = 0, next_cursor = 0; + s = db.ZScan("GP11_ZSCAN_KEY", cursor, "*", 10, &score_member_out, &next_cursor); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ(score_member_out.size(), 0); + ASSERT_EQ(next_cursor, 0); + ASSERT_TRUE(score_members_match(score_member_out, {})); +} + +int main(int argc, char** argv) { + if (!pstd::FileExists("./log")) { + pstd::CreatePath("./log"); + } + FLAGS_log_dir = "./log"; + FLAGS_minloglevel = 0; + FLAGS_max_log_size = 1800; + FLAGS_logbufsecs = 0; + ::google::InitGoogleLogging("zsets_test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tools/pika_migrate/src/throttle.cc b/tools/pika_migrate/src/throttle.cc new file mode 100644 index 0000000000..4919fb453a --- /dev/null +++ b/tools/pika_migrate/src/throttle.cc @@ -0,0 +1,56 @@ +// Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/throttle.h" +#include +#include +#include "pstd/include/env.h" + +namespace rsync { + +Throttle::Throttle(size_t throttle_throughput_bytes, size_t check_cycle) + : throttle_throughput_bytes_(throttle_throughput_bytes), + last_throughput_check_time_us_(caculate_check_time_us_(pstd::NowMicros(), check_cycle)), + cur_throughput_bytes_(0) {} + +Throttle::~Throttle() {} + +size_t Throttle::ThrottledByThroughput(size_t bytes) { + size_t available_size = bytes; + size_t now = pstd::NowMicros(); + size_t limit_per_cycle = throttle_throughput_bytes_.load() / check_cycle_; + std::unique_lock lock(keys_mutex_); + if (cur_throughput_bytes_ + bytes > limit_per_cycle) { + // reading another |bytes| excceds the limit + if (now - last_throughput_check_time_us_ <= 1 * 1000 * 1000 / check_cycle_) { + // if a time interval is less than or equal to a cycle, read more data + // to make full use of the throughput of the current cycle. + available_size = limit_per_cycle > cur_throughput_bytes_ ? limit_per_cycle - cur_throughput_bytes_ : 0; + cur_throughput_bytes_ = limit_per_cycle; + } else { + // otherwise, read the data in the next cycle. + available_size = bytes > limit_per_cycle ? limit_per_cycle : bytes; + cur_throughput_bytes_ = available_size; + last_throughput_check_time_us_ = caculate_check_time_us_(now, check_cycle_); + } + } else { + // reading another |bytes| doesn't excced limit (less than or equal to), + // put it in the current cycle + available_size = bytes; + cur_throughput_bytes_ += available_size; + } + return available_size; +} + +void Throttle::ReturnUnusedThroughput(size_t acquired, size_t consumed, size_t elaspe_time_us) { + size_t now = pstd::NowMicros(); + std::unique_lock lock(keys_mutex_); + if (now - elaspe_time_us < last_throughput_check_time_us_) { + // Tokens are aqured in last cycle, ignore + return; + } + cur_throughput_bytes_ = std::max(cur_throughput_bytes_ - (acquired - consumed), size_t(0)); +} +} // namespace rsync From 13ecac683d02cd32ab12cd668e9701c9c089da72 Mon Sep 17 00:00:00 2001 From: wuyun Date: Fri, 3 Jan 2025 11:40:08 +0000 Subject: [PATCH 3/4] feat: pika-migrate tools support pika-v4.0.0 --- tools/CMakeLists.txt | 1 + tools/pika_migrate/conf/pika.conf | 10 + tools/pika_migrate/include/migrator_thread.h | 66 +++ tools/pika_migrate/include/pika_conf.h | 14 + .../pika_migrate/include/pika_repl_bgworker.h | 1 + tools/pika_migrate/include/pika_sender.h | 43 ++ tools/pika_migrate/include/pika_server.h | 12 + tools/pika_migrate/include/redis_sender.h | 52 ++ tools/pika_migrate/pika-migrate.md | 4 +- tools/pika_migrate/src/migrator_thread.cc | 452 ++++++++++++++++++ tools/pika_migrate/src/pika_conf.cc | 16 + tools/pika_migrate/src/pika_db.cc | 3 + tools/pika_migrate/src/pika_repl_bgworker.cc | 32 ++ tools/pika_migrate/src/pika_sender.cc | 168 +++++++ tools/pika_migrate/src/pika_server.cc | 102 ++++ tools/pika_migrate/src/redis_sender.cc | 190 ++++++++ 16 files changed, 1164 insertions(+), 2 deletions(-) create mode 100644 tools/pika_migrate/include/migrator_thread.h create mode 100644 tools/pika_migrate/include/pika_sender.h create mode 100644 tools/pika_migrate/include/redis_sender.h create mode 100644 tools/pika_migrate/src/migrator_thread.cc create mode 100644 tools/pika_migrate/src/pika_sender.cc create mode 100644 tools/pika_migrate/src/redis_sender.cc diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index c1f39f6fd8..0cf1e472bc 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(./benchmark_client) add_subdirectory(./binlog_sender) add_subdirectory(./manifest_generator) add_subdirectory(./rdb_to_pika) +add_subdirectory(./pika_migrate) #add_subdirectory(./pika_to_txt) #add_subdirectory(./txt_to_pika) #add_subdirectory(./pika-port/pika_port_3) diff --git a/tools/pika_migrate/conf/pika.conf b/tools/pika_migrate/conf/pika.conf index d4f0efb011..ffcd0c1403 100644 --- a/tools/pika_migrate/conf/pika.conf +++ b/tools/pika_migrate/conf/pika.conf @@ -293,6 +293,16 @@ sync-window-size : 9000 # Supported Units [K|M|G]. Its default unit is in [bytes] and its default value is 268435456(256MB). The value range is [64MB, 1GB]. max-conn-rbuf-size : 268435456 +################### +## Migrate Settings +################### + +target-redis-host : 127.0.0.1 +target-redis-port : 6379 +target-redis-pwd : + +sync-batch-num : 100 +redis-sender-num : 10 #######################################################################E####### #! Critical Settings !# diff --git a/tools/pika_migrate/include/migrator_thread.h b/tools/pika_migrate/include/migrator_thread.h new file mode 100644 index 0000000000..42676d3442 --- /dev/null +++ b/tools/pika_migrate/include/migrator_thread.h @@ -0,0 +1,66 @@ +#ifndef MIGRATOR_THREAD_H_ +#define MIGRATOR_THREAD_H_ + +#include +#include + +#include "storage/storage.h" +#include "net/include/redis_cli.h" + +#include "include/pika_sender.h" + +class MigratorThread : public net::Thread { + public: + MigratorThread(std::shared_ptr storage_, std::vector> *senders, int type, int thread_num) : + storage_(storage_), + should_exit_(false), + senders_(senders), + type_(type), + thread_num_(thread_num), + thread_index_(0), + num_(0) { + } + + virtual ~ MigratorThread(); + + int64_t num() { + std::lock_guard l(num_mutex_); + return num_; + } + + void Stop() { + should_exit_ = true; + } + + private: + void PlusNum() { + std::lock_guard l(num_mutex_); + ++num_; + } + + void DispatchKey(const std::string &command, const std::string& key = ""); + + void MigrateDB(); + void MigrateStringsDB(); + void MigrateListsDB(); + void MigrateHashesDB(); + void MigrateSetsDB(); + void MigrateZsetsDB(); + + virtual void *ThreadMain(); + + private: + std::shared_ptr storage_; + bool should_exit_; + + std::vector> *senders_; + int type_; + int thread_num_; + int thread_index_; + + int64_t num_; + std::mutex num_mutex_; +}; + +#endif + diff --git a/tools/pika_migrate/include/pika_conf.h b/tools/pika_migrate/include/pika_conf.h index 5aa0c790c2..19ef33afde 100644 --- a/tools/pika_migrate/include/pika_conf.h +++ b/tools/pika_migrate/include/pika_conf.h @@ -424,6 +424,13 @@ class PikaConf : public pstd::BaseConf { int max_conn_rbuf_size() { return max_conn_rbuf_size_.load(); } int consensus_level() { return consensus_level_.load(); } int replication_num() { return replication_num_.load(); } + + std::string target_redis_host() { return target_redis_host_; } + int target_redis_port() { return target_redis_port_; } + std::string target_redis_pwd() { return target_redis_pwd_; } + int sync_batch_num() { return sync_batch_num_; } + int redis_sender_num() { return redis_sender_num_; } + int rate_limiter_mode() { std::shared_lock l(rwlock_); return rate_limiter_mode_; @@ -1067,6 +1074,13 @@ class PikaConf : public pstd::BaseConf { std::map diff_commands_; void TryPushDiffCommands(const std::string& command, const std::string& value); + // migrate configure items + std::string target_redis_host_; + int target_redis_port_; + std::string target_redis_pwd_; + int sync_batch_num_; + int redis_sender_num_; + // // Critical configure items // diff --git a/tools/pika_migrate/include/pika_repl_bgworker.h b/tools/pika_migrate/include/pika_repl_bgworker.h index dd62622fb9..e548ab551d 100644 --- a/tools/pika_migrate/include/pika_repl_bgworker.h +++ b/tools/pika_migrate/include/pika_repl_bgworker.h @@ -47,6 +47,7 @@ class PikaReplBgWorker { net::BGThread bg_thread_; static int HandleWriteBinlog(net::RedisParser* parser, const net::RedisCmdArgsType& argv); static void ParseBinlogOffset(const InnerMessage::BinlogOffset& pb_offset, LogOffset* offset); + static void ParseAndSendPikaCommand(const std::shared_ptr& c_ptr); }; #endif // PIKA_REPL_BGWROKER_H_ diff --git a/tools/pika_migrate/include/pika_sender.h b/tools/pika_migrate/include/pika_sender.h new file mode 100644 index 0000000000..172c65b24c --- /dev/null +++ b/tools/pika_migrate/include/pika_sender.h @@ -0,0 +1,43 @@ +#ifndef PIKA_SENDER_H_ +#define PIKA_SENDER_H_ + +#include +#include +#include +#include +#include + +#include "net/include/bg_thread.h" +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" + +class PikaSender : public net::Thread { +public: + PikaSender(std::string ip, int64_t port, std::string password); + virtual ~PikaSender(); + void LoadKey(const std::string &cmd); + void Stop(); + + int64_t elements() { return elements_; } + + void SendCommand(std::string &command, const std::string &key); + int QueueSize(); + void ConnectRedis(); + +private: + net::NetCli *cli_; + pstd::CondVar wsignal_; + pstd::CondVar rsignal_; + std::mutex signal_mutex; + std::mutex keys_queue_mutex_; + std::queue keys_queue_; + std::string ip_; + int port_; + std::string password_; + std::atomic should_exit_; + int64_t elements_; + + virtual void *ThreadMain(); +}; + +#endif diff --git a/tools/pika_migrate/include/pika_server.h b/tools/pika_migrate/include/pika_server.h index 8c24a07a36..8418a15a85 100644 --- a/tools/pika_migrate/include/pika_server.h +++ b/tools/pika_migrate/include/pika_server.h @@ -47,6 +47,7 @@ #include "include/pika_slot_command.h" #include "include/pika_statistic.h" #include "include/pika_transaction.h" +#include "include/redis_sender.h" #include "include/rsync_server.h" extern std::unique_ptr g_pika_conf; @@ -307,6 +308,12 @@ class PikaServer : public pstd::noncopyable { pstd::Status GetCmdRouting(std::vector& redis_cmds, std::vector* dst, bool* all_local); + /* + * migrate used + */ + int SendRedisCommand(const std::string& command, const std::string& key); + void RetransmitData(const std::string& path); + // info debug use void ServerStatus(std::string* info); @@ -615,6 +622,11 @@ class PikaServer : public pstd::noncopyable { */ std::unique_ptr pika_auxiliary_thread_; + /* + * migrate to redis used + */ + std::vector> redis_senders_; + /* * Async slotsMgrt use */ diff --git a/tools/pika_migrate/include/redis_sender.h b/tools/pika_migrate/include/redis_sender.h new file mode 100644 index 0000000000..0b84c5f0f1 --- /dev/null +++ b/tools/pika_migrate/include/redis_sender.h @@ -0,0 +1,52 @@ +#ifndef REDIS_SENDER_H_ +#define REDIS_SENDER_H_ + +#include +#include +#include +#include +#include + +#include "pika_repl_bgworker.h" +#include "net/include/net_cli.h" +#include "net/include/redis_cli.h" + +class RedisSender : public net::Thread { + public: + RedisSender(int id, std::string ip, int64_t port, std::string password); + virtual ~RedisSender(); + void Stop(void); + int64_t elements() { + return elements_; + } + + void SendRedisCommand(const std::string &command); + + private: + int SendCommand(std::string &command); + void ConnectRedis(); + size_t commandQueueSize() { + std::lock_guard l(keys_mutex_); + return commands_queue_.size(); + } + + private: + int id_; + std::shared_ptr cli_; + pstd::CondVar rsignal_; + pstd::CondVar wsignal_; + pstd::Mutex signal_mutex_; + pstd::Mutex keys_mutex_; + std::queue commands_queue_; + std::string ip_; + int port_; + std::string password_; + bool should_exit_; + int32_t cnt_; + int64_t elements_; + std::atomic last_write_time_; + + virtual void *ThreadMain(); +}; + +#endif diff --git a/tools/pika_migrate/pika-migrate.md b/tools/pika_migrate/pika-migrate.md index 9236cce658..9ec245c44f 100644 --- a/tools/pika_migrate/pika-migrate.md +++ b/tools/pika_migrate/pika-migrate.md @@ -1,7 +1,7 @@ -## Pika3.5到Redis迁移工具 +## Pika4.0到Redis迁移工具 ### 适用版本: -Pika 3.5-4.0, 单机模式且只支持单db +Pika 4.0, 单机模式且只支持单db ### 功能 将Pika中的数据在线迁移到Pika、Redis(支持全量、增量同步) diff --git a/tools/pika_migrate/src/migrator_thread.cc b/tools/pika_migrate/src/migrator_thread.cc new file mode 100644 index 0000000000..093db6ef3a --- /dev/null +++ b/tools/pika_migrate/src/migrator_thread.cc @@ -0,0 +1,452 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/migrator_thread.h" + +#include + +#include +#include +#define GLOG_USE_GLOG_EXPORT +#include + +#include "storage/storage.h" +#include "storage/src/redis.h" +#include "src/scope_snapshot.h" +#include "src/strings_value_format.h" + +#include "include/pika_conf.h" + +const int64_t MAX_BATCH_NUM = 30000; + +extern PikaConf* g_pika_conf; + +MigratorThread::~MigratorThread() { +} + +void MigratorThread::MigrateStringsDB() { + int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; + if (MAX_BATCH_NUM < scan_batch_num) { + if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { + scan_batch_num = MAX_BATCH_NUM; + } else { + scan_batch_num = g_pika_conf->sync_batch_num() * 2; + } + } + + int64_t ttl = -1; + int64_t cursor = 0; + storage::Status s; + std::string value; + std::vector keys; + int64_t timestamp; + while (true) { + cursor = storage_->Scan(storage::DataType::kStrings, cursor, "*", scan_batch_num, &keys); + + for (const auto& key : keys) { + s = storage_->Get(key, &value); + if (!s.ok()) { + LOG(WARNING) << "get " << key << " error: " << s.ToString(); + continue; + } + + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("SET"); + argv.push_back(key); + argv.push_back(value); + + ttl = -1; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; + } + + if (ttl > 0) { + argv.push_back("EX"); + argv.push_back(std::to_string(ttl)); + } + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + + if (!cursor) { + break; + } + } +} + +void MigratorThread::MigrateListsDB() { + int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; + if (MAX_BATCH_NUM < scan_batch_num) { + if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { + scan_batch_num = MAX_BATCH_NUM; + } else { + scan_batch_num = g_pika_conf->sync_batch_num() * 2; + } + } + + int64_t ttl = -1; + int64_t cursor = 0; + storage::Status s; + std::vector keys; + int64_t timestamp; + + while (true) { + cursor = storage_->Scan(storage::DataType::kLists, cursor, "*", scan_batch_num, &keys); + + for (const auto& key : keys) { + int64_t pos = 0; + std::vector nodes; + storage::Status s = storage_->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); + if (!s.ok()) { + LOG(WARNING) << "db->LRange(key:" << key << ", pos:" << pos + << ", batch size: " << g_pika_conf->sync_batch_num() << ") = " << s.ToString(); + continue; + } + + while (s.ok() && !should_exit_ && !nodes.empty()) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("RPUSH"); + argv.push_back(key); + for (const auto& node : nodes) { + argv.push_back(node); + } + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + + pos += g_pika_conf->sync_batch_num(); + nodes.clear(); + s = storage_->LRange(key, pos, pos + g_pika_conf->sync_batch_num() - 1, &nodes); + if (!s.ok()) { + LOG(WARNING) << "db->LRange(key:" << key << ", pos:" << pos + << ", batch size:" << g_pika_conf->sync_batch_num() << ") = " << s.ToString(); + } + } + + ttl = -1; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; + } + + if (s.ok() && ttl > 0) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("EXPIRE"); + argv.push_back(key); + argv.push_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + } + + if (!cursor) { + break; + } + } +} + +void MigratorThread::MigrateHashesDB() { + int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; + if (MAX_BATCH_NUM < scan_batch_num) { + if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { + scan_batch_num = MAX_BATCH_NUM; + } else { + scan_batch_num = g_pika_conf->sync_batch_num() * 2; + } + } + + int64_t ttl = -1; + int64_t cursor = 0; + storage::Status s; + std::vector keys; + int64_t timestamp; + + while (true) { + cursor = storage_->Scan(storage::DataType::kHashes, cursor, "*", scan_batch_num, &keys); + + for (const auto& key : keys) { + std::vector fvs; + storage::Status s = storage_->HGetall(key, &fvs); + if (!s.ok()) { + LOG(WARNING) << "db->HGetall(key:" << key << ") = " << s.ToString(); + continue; + } + + auto it = fvs.begin(); + while (!should_exit_ && it != fvs.end()) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("HMSET"); + argv.push_back(key); + for (int idx = 0; + idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != fvs.end(); + idx++, it++) { + argv.push_back(it->field); + argv.push_back(it->value); + } + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + + ttl = -1; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; + } + + if (s.ok() && ttl > 0) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("EXPIRE"); + argv.push_back(key); + argv.push_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + } + + if (!cursor) { + break; + } + } +} + +void MigratorThread::MigrateSetsDB() { + int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; + if (MAX_BATCH_NUM < scan_batch_num) { + if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { + scan_batch_num = MAX_BATCH_NUM; + } else { + scan_batch_num = g_pika_conf->sync_batch_num() * 2; + } + } + + int64_t ttl = -1; + int64_t cursor = 0; + storage::Status s; + std::vector keys; + int64_t timestamp; + + while (true) { + cursor = storage_->Scan(storage::DataType::kSets, cursor, "*", scan_batch_num, &keys); + + for (const auto& key : keys) { + std::vector members; + storage::Status s = storage_->SMembers(key, &members); + if (!s.ok()) { + LOG(WARNING) << "db->SMembers(key:" << key << ") = " << s.ToString(); + continue; + } + auto it = members.begin(); + while (!should_exit_ && it != members.end()) { + std::string cmd; + net::RedisCmdArgsType argv; + + argv.push_back("SADD"); + argv.push_back(key); + for (int idx = 0; + idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != members.end(); + idx++, it++) { + argv.push_back(*it); + } + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + + ttl = -1; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; + } + + if (s.ok() && ttl > 0) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("EXPIRE"); + argv.push_back(key); + argv.push_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + } + + if (!cursor) { + break; + } + } +} + +void MigratorThread::MigrateZsetsDB() { + int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; + if (MAX_BATCH_NUM < scan_batch_num) { + if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { + scan_batch_num = MAX_BATCH_NUM; + } else { + scan_batch_num = g_pika_conf->sync_batch_num() * 2; + } + } + + int64_t ttl = -1; + int64_t cursor = 0; + storage::Status s; + std::vector keys; + int64_t timestamp; + + while (true) { + cursor = storage_->Scan(storage::DataType::kZSets, cursor, "*", scan_batch_num, &keys); + + for (const auto& key : keys) { + std::vector score_members; + storage::Status s = storage_->ZRange(key, 0, -1, &score_members); + if (!s.ok()) { + LOG(WARNING) << "db->ZRange(key:" << key << ") = " << s.ToString(); + continue; + } + auto it = score_members.begin(); + while (!should_exit_ && it != score_members.end()) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("ZADD"); + argv.push_back(key); + for (int idx = 0; + idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != score_members.end(); + idx++, it++) { + argv.push_back(std::to_string(it->score)); + argv.push_back(it->member); + } + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + + ttl = -1; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; + } + + if (s.ok() && ttl > 0) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("EXPIRE"); + argv.push_back(key); + argv.push_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + } + + if (!cursor) { + break; + } + } +} + +void MigratorThread::MigrateDB() { + switch (int(type_)) { + case int(storage::DataType::kStrings) : { + MigrateStringsDB(); + break; + } + + case int(storage::DataType::kLists) : { + MigrateListsDB(); + break; + } + + case int(storage::DataType::kHashes) : { + MigrateHashesDB(); + break; + } + + case int(storage::DataType::kSets) : { + MigrateSetsDB(); + break; + } + + case int(storage::DataType::kZSets) : { + MigrateZsetsDB(); + break; + } + + default: { + LOG(WARNING) << "illegal db type " << type_; + break; + } + } +} + +void MigratorThread::DispatchKey(const std::string &command, const std::string& key) { + thread_index_ = (thread_index_ + 1) % thread_num_; + size_t idx = thread_index_; + if (key.size()) { // no empty + idx = std::hash()(key) % thread_num_; + } + (*senders_)[idx]->LoadKey(command); +} + +const char* GetDBTypeString(int type) { + switch (type) { + case int(storage::DataType::kStrings) : { + return "storage::DataType::kStrings"; + } + + case int(storage::DataType::kLists) : { + return "storage::DataType::kLists"; + } + + case int(storage::DataType::kHashes) : { + return "storage::DataType::kHashes"; + } + + case int(storage::DataType::kSets) : { + return "storage::DataType::kSets"; + } + + case int(storage::DataType::kZSets) : { + return "storage::DataType::kZSets"; + } + + default: { + return "storage::Unknown"; + } + } +} + +void *MigratorThread::ThreadMain() { + MigrateDB(); + should_exit_ = true; + LOG(INFO) << GetDBTypeString(type_) << " keys have been dispatched completly"; + return NULL; +} + diff --git a/tools/pika_migrate/src/pika_conf.cc b/tools/pika_migrate/src/pika_conf.cc index 60c68b3617..00a6793699 100644 --- a/tools/pika_migrate/src/pika_conf.cc +++ b/tools/pika_migrate/src/pika_conf.cc @@ -708,6 +708,22 @@ int PikaConf::Load() { sync_window_size_.store(tmp_sync_window_size); } + // redis-migrate conifg args + target_redis_host_ = "127.0.0.1"; + GetConfStr("target-redis-host", &target_redis_host_); + + target_redis_port_ = 6379; + GetConfInt("target-redis-port", &target_redis_port_); + + target_redis_pwd_ = ""; + GetConfStr("target-redis-pwd" , &target_redis_pwd_); + + sync_batch_num_ = 100; + GetConfInt("sync-batch-num", &sync_batch_num_); + + redis_sender_num_ = 8; + GetConfInt("redis-sender-num", &redis_sender_num_); + // max conn rbuf size int tmp_max_conn_rbuf_size = PIKA_MAX_CONN_RBUF; GetConfIntHuman("max-conn-rbuf-size", &tmp_max_conn_rbuf_size); diff --git a/tools/pika_migrate/src/pika_db.cc b/tools/pika_migrate/src/pika_db.cc index f3d52fdec3..6830b63571 100644 --- a/tools/pika_migrate/src/pika_db.cc +++ b/tools/pika_migrate/src/pika_db.cc @@ -509,6 +509,9 @@ bool DB::TryUpdateMasterOffset() { << ", master_ip: " << master_ip << ", master_port: " << master_port << ", filenum: " << filenum << ", offset: " << offset << ", term: " << term << ", index: " << index; + // Retransmit Data to target redis + g_pika_server->RetransmitData(dbsync_path_); + pstd::DeleteFile(info_path); if (!ChangeDb(dbsync_path_)) { LOG(WARNING) << "DB: " << db_name_ << ", Failed to change db"; diff --git a/tools/pika_migrate/src/pika_repl_bgworker.cc b/tools/pika_migrate/src/pika_repl_bgworker.cc index 1e12ffdf0a..dc6724dcc8 100644 --- a/tools/pika_migrate/src/pika_repl_bgworker.cc +++ b/tools/pika_migrate/src/pika_repl_bgworker.cc @@ -229,6 +229,7 @@ void PikaReplBgWorker::WriteDBInSyncWay(const std::shared_ptr& c_ptr) { && PIKA_CACHE_NONE != g_pika_conf->cache_mode() && c_ptr->GetDB()->cache()->CacheStatus() == PIKA_CACHE_STATUS_OK) { if (c_ptr->is_write()) { + ParseAndSendPikaCommand(c_ptr); c_ptr->DoThroughDB(); if (c_ptr->IsNeedUpdateCache()) { c_ptr->DoUpdateCache(); @@ -237,6 +238,7 @@ void PikaReplBgWorker::WriteDBInSyncWay(const std::shared_ptr& c_ptr) { LOG(WARNING) << "It is impossbile to reach here"; } } else { + ParseAndSendPikaCommand(c_ptr); c_ptr->Do(); } if (!c_ptr->IsSuspend()) { @@ -272,3 +274,33 @@ void PikaReplBgWorker::WriteDBInSyncWay(const std::shared_ptr& c_ptr) { } } } + +void PikaReplBgWorker::ParseAndSendPikaCommand(const std::shared_ptr& c_ptr) { + const PikaCmdArgsType& argv = c_ptr->argv(); + if (!strcasecmp(argv[0].data(), "pksetexat")) { + if (argv.size() != 4) { + LOG(WARNING) << "find invaild command, command size: " << argv.size(); + return; + } else { + std::string key = argv[1]; + int timestamp = std::atoi(argv[2].data()); + std::string value = argv[3]; + + int seconds = timestamp - time(NULL); + PikaCmdArgsType tmp_argv; + tmp_argv.push_back("setex"); + tmp_argv.push_back(key); + tmp_argv.push_back(std::to_string(seconds)); + tmp_argv.push_back(value); + + std::string command; + net::SerializeRedisCommand(tmp_argv, &command); + g_pika_server->SendRedisCommand(command, key); + } + } else { + std::string key = argv.size() >= 2 ? argv[1] : argv[0]; + std::string command; + net::SerializeRedisCommand(argv, &command); + g_pika_server->SendRedisCommand(command, key); + } +} \ No newline at end of file diff --git a/tools/pika_migrate/src/pika_sender.cc b/tools/pika_migrate/src/pika_sender.cc new file mode 100644 index 0000000000..4936eaf692 --- /dev/null +++ b/tools/pika_migrate/src/pika_sender.cc @@ -0,0 +1,168 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "include/pika_sender.h" + +#include + +PikaSender::PikaSender(std::string ip, int64_t port, std::string password): + cli_(NULL), + ip_(ip), + port_(port), + password_(password), + should_exit_(false), + elements_(0) + { + } + +PikaSender::~PikaSender() { +} + +int PikaSender::QueueSize() { + std::lock_guard lock(keys_queue_mutex_); + return keys_queue_.size(); +} + +void PikaSender::Stop() { + should_exit_.store(true); + wsignal_.notify_all(); + rsignal_.notify_all(); +} + +void PikaSender::ConnectRedis() { + while (cli_ == NULL) { + // Connect to redis + cli_ = net::NewRedisCli(); + cli_->set_connect_timeout(1000); + pstd::Status s = cli_->Connect(ip_, port_); + if (!s.ok()) { + delete cli_; + cli_ = NULL; + LOG(WARNING) << "Can not connect to " << ip_ << ":" << port_ << ", status: " << s.ToString(); + continue; + } else { + // Connect success + + // Authentication + if (!password_.empty()) { + net::RedisCmdArgsType argv, resp; + std::string cmd; + + argv.push_back("AUTH"); + argv.push_back(password_); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); + + if (s.ok()) { + s = cli_->Recv(&resp); + if (resp[0] == "OK") { + } else { + LOG(FATAL) << "Connect to redis(" << ip_ << ":" << port_ << ") Invalid password"; + cli_->Close(); + delete cli_; + cli_ = NULL; + should_exit_ = true; + return; + } + } else { + LOG(WARNING) << "send auth failed: " << s.ToString(); + cli_->Close(); + delete cli_; + cli_ = NULL; + continue; + } + } else { + // If forget to input password + net::RedisCmdArgsType argv, resp; + std::string cmd; + + argv.push_back("PING"); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); + + if (s.ok()) { + s = cli_->Recv(&resp); + if (s.ok()) { + if (resp[0] == "NOAUTH Authentication required.") { + LOG(FATAL) << "Ping redis(" << ip_ << ":" << port_ << ") NOAUTH Authentication required"; + cli_->Close(); + delete cli_; + cli_ = NULL; + should_exit_ = true; + return; + } + } else { + LOG(WARNING) << "Recv failed: " << s.ToString(); + cli_->Close(); + delete cli_; + cli_ = NULL; + } + } + } + } + } +} + +void PikaSender::LoadKey(const std::string &key) { + std::unique_lock lock(signal_mutex); + wsignal_.wait(lock, [this]() { return keys_queue_.size() < 100000 || should_exit_; }); + if(!should_exit_) { + std::lock_guard lock(keys_queue_mutex_); + keys_queue_.push(key); + rsignal_.notify_one(); + } +} + +void PikaSender::SendCommand(std::string &command, const std::string &key) { + // Send command + pstd::Status s = cli_->Send(&command); + if (!s.ok()) { + elements_--; + LoadKey(key); + cli_->Close(); + LOG(INFO) << s.ToString().data(); + delete cli_; + cli_ = NULL; + ConnectRedis(); + }else { + cli_->Recv(nullptr); + } +} + +void *PikaSender::ThreadMain() { + + if (cli_ == NULL) { + ConnectRedis(); + } + + while (!should_exit_ || QueueSize() != 0) { + std::string command; + + std::unique_lock lock(signal_mutex); + rsignal_.wait(lock, [this]() { return !QueueSize() == 0 || should_exit_; }); + if (QueueSize() == 0 && should_exit_) { + return NULL; + } + lock.unlock(); + + std::string key; + { + std::lock_guard lock(keys_queue_mutex_); + key = keys_queue_.front(); + elements_++; + keys_queue_.pop(); + } + wsignal_.notify_one(); + SendCommand(key, key); + } + + if (cli_) { + cli_->Close(); + delete cli_; + cli_ = NULL; + } + return NULL; +} + diff --git a/tools/pika_migrate/src/pika_server.cc b/tools/pika_migrate/src/pika_server.cc index 72b16d82f7..a1a79cb751 100644 --- a/tools/pika_migrate/src/pika_server.cc +++ b/tools/pika_migrate/src/pika_server.cc @@ -25,6 +25,8 @@ #include "include/pika_monotonic_time.h" #include "include/pika_rm.h" #include "include/pika_server.h" +#include "include/pika_sender.h" +#include "include/migrator_thread.h" using pstd::Status; extern PikaServer* g_pika_server; @@ -102,6 +104,14 @@ PikaServer::PikaServer() } } + // Create redis sender + for (int i = 0; i < g_pika_conf->redis_sender_num(); ++i) { + redis_senders_.emplace_back(std::make_unique(int(i), + g_pika_conf->target_redis_host(), + g_pika_conf->target_redis_port(), + g_pika_conf->target_redis_pwd())); + } + acl_ = std::make_unique<::Acl>(); SetSlowCmdThreadPoolFlag(g_pika_conf->slow_cmd_pool()); bgsave_thread_.set_thread_name("PikaServer::bgsave_thread_"); @@ -131,6 +141,10 @@ PikaServer::~PikaServer() { key_scan_thread_.StopThread(); pika_migrate_thread_->StopThread(); + for (size_t i = 0; i < redis_senders_.size(); ++i) { + redis_senders_[i]->Stop(); + } + redis_senders_.clear(); dbs_.clear(); LOG(INFO) << "PikaServer " << pthread_self() << " exit!!!"; @@ -210,6 +224,15 @@ void PikaServer::Start() { << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); } + for (size_t i = 0; i < redis_senders_.size(); ++i) { + ret = redis_senders_[i]->StartThread(); + if (ret != net::kSuccess) { + dbs_.clear(); + LOG(FATAL) << "Start RedisSender Error: " << ret + << (ret == net::kCreateThreadError ? ": create thread error " : ": other error"); + } + } + time(&start_time_s_); LOG(INFO) << "Pika Server going to start"; rsync_server_->Start(); @@ -1562,6 +1585,85 @@ Status PikaServer::GetCmdRouting(std::vector& redis_cmds, return Status::OK(); } +int PikaServer::SendRedisCommand(const std::string& command, const std::string& key) { + // Send command + size_t idx = std::hash()(key) % redis_senders_.size(); + redis_senders_[idx]->SendRedisCommand(command); + return 0; +} + +static bool isFirstRetransmit = true; +void PikaServer::RetransmitData(const std::string& path) { + if (isFirstRetransmit) { + isFirstRetransmit = false; + LOG(INFO) << "Retransmit data from " << path; + }else { + LOG(FATAL) << "full DB sync shuould only be called once"; + } + + std::shared_ptr storage_ = std::make_shared(); + rocksdb::Status s = storage_->Open(g_pika_server->storage_options(), path); + + if (!s.ok()) { + LOG(FATAL) << "open received database error: " << s.ToString(); + return; + } + + // Init SenderThread + int thread_num = g_pika_conf->redis_sender_num(); + std::string target_host = g_pika_conf->target_redis_host(); + int target_port = g_pika_conf->target_redis_port(); + std::string target_pwd = g_pika_conf->target_redis_pwd(); + + LOG(INFO) << "open received database success, start retransmit data to redis(" + << target_host << ":" << target_port << ")"; + + + std::vector> pika_senders; + std::vector> migrators; + + for (int i = 0; i < thread_num; i++) { + pika_senders.emplace_back(std::make_shared(target_host, target_port, target_pwd)); + } + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kStrings), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kLists), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kHashes), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kSets), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kZSets), thread_num)); + + for (size_t i = 0; i < pika_senders.size(); i++) { + pika_senders[i]->StartThread(); + } + for (size_t i = 0; i < migrators.size(); i++) { + migrators[i]->StartThread(); + } + + for (size_t i = 0; i < migrators.size(); i++) { + migrators[i]->JoinThread(); + } + for (size_t i = 0; i < pika_senders.size(); i++) { + pika_senders[i]->Stop(); + } + for (size_t i = 0; i < pika_senders.size(); i++) { + pika_senders[i]->JoinThread(); + } + + int64_t replies = 0, records = 0; + for (size_t i = 0; i < migrators.size(); i++) { + records += migrators[i]->num(); + } + migrators.clear(); + for (size_t i = 0; i < pika_senders.size(); i++) { + replies += pika_senders[i]->elements(); + } + pika_senders.clear(); + + LOG(INFO) << "=============== Retransmit Finish ====================="; + LOG(INFO) << "Total records : " << records << " have been Scaned"; + LOG(INFO) << "Total replies : " << replies << " received from redis server"; + LOG(INFO) << "======================================================="; +} + void PikaServer::ServerStatus(std::string* info) { std::stringstream tmp_stream; size_t q_size = ClientProcessorThreadPoolCurQueueSize(); diff --git a/tools/pika_migrate/src/redis_sender.cc b/tools/pika_migrate/src/redis_sender.cc new file mode 100644 index 0000000000..dfc90464cb --- /dev/null +++ b/tools/pika_migrate/src/redis_sender.cc @@ -0,0 +1,190 @@ +// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#include "include/redis_sender.h" + +#include +#include + +#include + +static time_t kCheckDiff = 1; + +RedisSender::RedisSender(int id, std::string ip, int64_t port, std::string password): + id_(id), + cli_(NULL), + ip_(ip), + port_(port), + password_(password), + should_exit_(false), + cnt_(0), + elements_(0) { + + last_write_time_ = ::time(NULL); +} + +RedisSender::~RedisSender() { + LOG(INFO) << "RedisSender thread " << id_ << " exit!!!"; +} + +void RedisSender::ConnectRedis() { + while (cli_ == NULL) { + // Connect to redis + cli_ = std::shared_ptr(net::NewRedisCli()); + cli_->set_connect_timeout(1000); + cli_->set_recv_timeout(10000); + cli_->set_send_timeout(10000); + pstd::Status s = cli_->Connect(ip_, port_); + if (!s.ok()) { + LOG(WARNING) << "Can not connect to " << ip_ << ":" << port_ << ", status: " << s.ToString(); + cli_ = NULL; + sleep(3); + continue; + } else { + // Connect success + // LOG(INFO) << "RedisSender thread " << id_ << "Connect to redis(" << ip_ << ":" << port_ << ") success"; + // Authentication + if (!password_.empty()) { + net::RedisCmdArgsType argv, resp; + std::string cmd; + + argv.push_back("AUTH"); + argv.push_back(password_); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); + + if (s.ok()) { + s = cli_->Recv(&resp); + if (resp[0] == "OK") { + } else { + LOG(FATAL) << "Connect to redis(" << ip_ << ":" << port_ << ") Invalid password"; + cli_->Close(); + cli_ = NULL; + should_exit_ = true; + return; + } + } else { + LOG(WARNING) << "send auth failed: " << s.ToString(); + cli_->Close(); + cli_ = NULL; + continue; + } + } else { + // If forget to input password + net::RedisCmdArgsType argv, resp; + std::string cmd; + + argv.push_back("PING"); + net::SerializeRedisCommand(argv, &cmd); + pstd::Status s = cli_->Send(&cmd); + + if (s.ok()) { + s = cli_->Recv(&resp); + if (s.ok()) { + if (resp[0] == "NOAUTH Authentication required.") { + LOG(FATAL) << "Ping redis(" << ip_ << ":" << port_ << ") NOAUTH Authentication required"; + cli_->Close(); + cli_ = NULL; + should_exit_ = true; + return; + } + } else { + LOG(WARNING) << s.ToString(); + cli_->Close(); + cli_ = NULL; + } + } + } + } + } +} + +void RedisSender::Stop() { + set_should_stop(); + should_exit_ = true; + rsignal_.notify_all(); + wsignal_.notify_all(); +} + +void RedisSender::SendRedisCommand(const std::string &command) { + std::unique_lock lock(signal_mutex_); + wsignal_.wait(lock, [this]() { return commandQueueSize() < 100000; }); + if (!should_exit_) { + std::lock_guard l(keys_mutex_); + commands_queue_.push(command); + rsignal_.notify_one(); + } +} + +int RedisSender::SendCommand(std::string &command) { + time_t now = ::time(NULL); + if (kCheckDiff < now - last_write_time_) { + int ret = cli_->CheckAliveness(); + if (ret < 0) { + cli_ = nullptr; + ConnectRedis(); + } + last_write_time_ = now; + } + + // Send command + int idx = 0; + do { + pstd::Status s = cli_->Send(&command); + + if (s.ok()) { + cli_->Recv(nullptr); + return 0; + } + + cli_->Close(); + cli_ = NULL; + ConnectRedis(); + } while(++idx < 3); + LOG(WARNING) << "RedisSender " << id_ << " fails to send redis command " << command << ", times: " << idx << ", error: " << "send command failed"; + return -1; +} + +void *RedisSender::ThreadMain() { + LOG(INFO) << "Start redis sender " << id_ << " thread..."; + // sleep(15); + int ret = 0; + + ConnectRedis(); + + while (!should_exit_) { + std::unique_lock lock(signal_mutex_); + while (commandQueueSize() == 0 && !should_exit_) { + rsignal_.wait_for(lock, std::chrono::milliseconds(100)); + } + + if (should_exit_) { + break; + } + + if (commandQueueSize() == 0) { + continue; + } + + // get redis command + std::string command; + { + std::lock_guard l(keys_mutex_); + command = commands_queue_.front(); + elements_++; + commands_queue_.pop(); + } + + wsignal_.notify_one(); + ret = SendCommand(command); + + } + + LOG(INFO) << "RedisSender thread " << id_ << " complete"; + cli_ = NULL; + return NULL; +} + From 63a374613a0be6d7c40ec5b5e9445db629ace306 Mon Sep 17 00:00:00 2001 From: wuyun Date: Thu, 9 Jan 2025 09:02:49 +0000 Subject: [PATCH 4/4] fix: pika-migrate don't support Stream type --- tools/pika_migrate/include/migrator_thread.h | 1 + tools/pika_migrate/src/migrator_thread.cc | 99 +++++++++++++++++++- tools/pika_migrate/src/pika_server.cc | 1 + 3 files changed, 96 insertions(+), 5 deletions(-) diff --git a/tools/pika_migrate/include/migrator_thread.h b/tools/pika_migrate/include/migrator_thread.h index 42676d3442..6be816a4a4 100644 --- a/tools/pika_migrate/include/migrator_thread.h +++ b/tools/pika_migrate/include/migrator_thread.h @@ -46,6 +46,7 @@ class MigratorThread : public net::Thread { void MigrateHashesDB(); void MigrateSetsDB(); void MigrateZsetsDB(); + void MigrateStreamsDB(); virtual void *ThreadMain(); diff --git a/tools/pika_migrate/src/migrator_thread.cc b/tools/pika_migrate/src/migrator_thread.cc index 093db6ef3a..ba41515f5f 100644 --- a/tools/pika_migrate/src/migrator_thread.cc +++ b/tools/pika_migrate/src/migrator_thread.cc @@ -372,6 +372,87 @@ void MigratorThread::MigrateZsetsDB() { } } +void MigratorThread::MigrateStreamsDB() { + int64_t scan_batch_num = g_pika_conf->sync_batch_num() * 10; + if (MAX_BATCH_NUM < scan_batch_num) { + if (g_pika_conf->sync_batch_num() < MAX_BATCH_NUM) { + scan_batch_num = MAX_BATCH_NUM; + } else { + scan_batch_num = g_pika_conf->sync_batch_num() * 2; + } + } + + int64_t ttl = -1; + int64_t cursor = 0; + storage::Status s; + std::vector keys; + int64_t timestamp; + + while (true) { + cursor = storage_->Scan(storage::DataType::kStreams, cursor, "*", scan_batch_num, &keys); + + for (const auto& key : keys) { + std::vector id_message; + storage::StreamScanArgs arg; + storage::StreamUtils::StreamParseIntervalId("-", arg.start_sid, &arg.start_ex, 0); + storage::StreamUtils::StreamParseIntervalId("+", arg.end_sid, &arg.end_ex, UINT64_MAX); + + storage::Status s = storage_->XRange(key, arg, id_message); + if (!s.ok()) { + LOG(WARNING) << "db->XRange(key:" << key << ") = " << s.ToString(); + continue; + } + auto it = id_message.begin(); + while (!should_exit_ && it != id_message.end()) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("XADD"); + argv.push_back(key); + for (int idx = 0; + idx < g_pika_conf->sync_batch_num() && !should_exit_ && it != id_message.end(); + idx++, it++) { + std::vector message; + storage::StreamUtils::DeserializeMessage(it->value, message); + storage::streamID sid; + sid.DeserializeFrom(it->field); + argv.push_back(sid.ToString()); + for (const auto& m : message) { + argv.push_back(m); + } + } + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + + ttl = -1; + timestamp = storage_->TTL(key); + if (timestamp != -2) { + ttl = timestamp; + } + + if (s.ok() && ttl > 0) { + net::RedisCmdArgsType argv; + std::string cmd; + + argv.push_back("EXPIRE"); + argv.push_back(key); + argv.push_back(std::to_string(ttl)); + + net::SerializeRedisCommand(argv, &cmd); + PlusNum(); + DispatchKey(cmd, key); + } + } + + if (!cursor) { + break; + } + } +} + void MigratorThread::MigrateDB() { switch (int(type_)) { case int(storage::DataType::kStrings) : { @@ -399,6 +480,10 @@ void MigratorThread::MigrateDB() { break; } + case int(storage::DataType::kStreams) : { + MigrateStreamsDB(); + break; + } default: { LOG(WARNING) << "illegal db type " << type_; break; @@ -418,23 +503,27 @@ void MigratorThread::DispatchKey(const std::string &command, const std::string& const char* GetDBTypeString(int type) { switch (type) { case int(storage::DataType::kStrings) : { - return "storage::DataType::kStrings"; + return "storage::kStrings"; } case int(storage::DataType::kLists) : { - return "storage::DataType::kLists"; + return "storage::kLists"; } case int(storage::DataType::kHashes) : { - return "storage::DataType::kHashes"; + return "storage::kHashes"; } case int(storage::DataType::kSets) : { - return "storage::DataType::kSets"; + return "storage::kSets"; } case int(storage::DataType::kZSets) : { - return "storage::DataType::kZSets"; + return "storage::kZSets"; + } + + case int(storage::DataType::kStreams) : { + return "storage::kStreams"; } default: { diff --git a/tools/pika_migrate/src/pika_server.cc b/tools/pika_migrate/src/pika_server.cc index a1a79cb751..6659eda421 100644 --- a/tools/pika_migrate/src/pika_server.cc +++ b/tools/pika_migrate/src/pika_server.cc @@ -1630,6 +1630,7 @@ void PikaServer::RetransmitData(const std::string& path) { migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kHashes), thread_num)); migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kSets), thread_num)); migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kZSets), thread_num)); + migrators.emplace_back(std::make_shared(storage_, &pika_senders, int(storage::DataType::kStreams), thread_num)); for (size_t i = 0; i < pika_senders.size(); i++) { pika_senders[i]->StartThread();